siggen.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. import hashlib
  2. import logging
  3. import os
  4. import re
  5. import tempfile
  6. import pickle
  7. import bb.data
  8. from bb.checksum import FileChecksumCache
  9. logger = logging.getLogger('BitBake.SigGen')
  10. def init(d):
  11. siggens = [obj for obj in globals().values()
  12. if type(obj) is type and issubclass(obj, SignatureGenerator)]
  13. desired = d.getVar("BB_SIGNATURE_HANDLER", True) or "noop"
  14. for sg in siggens:
  15. if desired == sg.name:
  16. return sg(d)
  17. break
  18. else:
  19. logger.error("Invalid signature generator '%s', using default 'noop'\n"
  20. "Available generators: %s", desired,
  21. ', '.join(obj.name for obj in siggens))
  22. return SignatureGenerator(d)
  23. class SignatureGenerator(object):
  24. """
  25. """
  26. name = "noop"
  27. def __init__(self, data):
  28. self.taskhash = {}
  29. self.runtaskdeps = {}
  30. self.file_checksum_values = {}
  31. self.taints = {}
  32. def finalise(self, fn, d, varient):
  33. return
  34. def get_taskhash(self, fn, task, deps, dataCache):
  35. return "0"
  36. def writeout_file_checksum_cache(self):
  37. """Write/update the file checksum cache onto disk"""
  38. return
  39. def stampfile(self, stampbase, file_name, taskname, extrainfo):
  40. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  41. def stampcleanmask(self, stampbase, file_name, taskname, extrainfo):
  42. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  43. def dump_sigtask(self, fn, task, stampbase, runtime):
  44. return
  45. def invalidate_task(self, task, d, fn):
  46. bb.build.del_stamp(task, d, fn)
  47. def dump_sigs(self, dataCache, options):
  48. return
  49. def get_taskdata(self):
  50. return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints)
  51. def set_taskdata(self, data):
  52. self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints = data
  53. class SignatureGeneratorBasic(SignatureGenerator):
  54. """
  55. """
  56. name = "basic"
  57. def __init__(self, data):
  58. self.basehash = {}
  59. self.taskhash = {}
  60. self.taskdeps = {}
  61. self.runtaskdeps = {}
  62. self.file_checksum_values = {}
  63. self.taints = {}
  64. self.gendeps = {}
  65. self.lookupcache = {}
  66. self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
  67. self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
  68. self.taskwhitelist = None
  69. self.init_rundepcheck(data)
  70. checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE", True)
  71. if checksum_cache_file:
  72. self.checksum_cache = FileChecksumCache()
  73. self.checksum_cache.init_cache(data, checksum_cache_file)
  74. else:
  75. self.checksum_cache = None
  76. def init_rundepcheck(self, data):
  77. self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
  78. if self.taskwhitelist:
  79. self.twl = re.compile(self.taskwhitelist)
  80. else:
  81. self.twl = None
  82. def _build_data(self, fn, d):
  83. tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
  84. taskdeps = {}
  85. basehash = {}
  86. for task in tasklist:
  87. data = lookupcache[task]
  88. if data is None:
  89. bb.error("Task %s from %s seems to be empty?!" % (task, fn))
  90. data = ''
  91. gendeps[task] -= self.basewhitelist
  92. newdeps = gendeps[task]
  93. seen = set()
  94. while newdeps:
  95. nextdeps = newdeps
  96. seen |= nextdeps
  97. newdeps = set()
  98. for dep in nextdeps:
  99. if dep in self.basewhitelist:
  100. continue
  101. gendeps[dep] -= self.basewhitelist
  102. newdeps |= gendeps[dep]
  103. newdeps -= seen
  104. alldeps = sorted(seen)
  105. for dep in alldeps:
  106. data = data + dep
  107. var = lookupcache[dep]
  108. if var is not None:
  109. data = data + str(var)
  110. self.basehash[fn + "." + task] = hashlib.md5(data.encode("utf-8")).hexdigest()
  111. taskdeps[task] = alldeps
  112. self.taskdeps[fn] = taskdeps
  113. self.gendeps[fn] = gendeps
  114. self.lookupcache[fn] = lookupcache
  115. return taskdeps
  116. def finalise(self, fn, d, variant):
  117. mc = d.getVar("__BBMULTICONFIG", False) or ""
  118. if variant or mc:
  119. fn = bb.cache.realfn2virtual(fn, variant, mc)
  120. try:
  121. taskdeps = self._build_data(fn, d)
  122. except:
  123. bb.warn("Error during finalise of %s" % fn)
  124. raise
  125. #Slow but can be useful for debugging mismatched basehashes
  126. #for task in self.taskdeps[fn]:
  127. # self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
  128. for task in taskdeps:
  129. d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
  130. def rundep_check(self, fn, recipename, task, dep, depname, dataCache):
  131. # Return True if we should keep the dependency, False to drop it
  132. # We only manipulate the dependencies for packages not in the whitelist
  133. if self.twl and not self.twl.search(recipename):
  134. # then process the actual dependencies
  135. if self.twl.search(depname):
  136. return False
  137. return True
  138. def read_taint(self, fn, task, stampbase):
  139. taint = None
  140. try:
  141. with open(stampbase + '.' + task + '.taint', 'r') as taintf:
  142. taint = taintf.read()
  143. except IOError:
  144. pass
  145. return taint
  146. def get_taskhash(self, fn, task, deps, dataCache):
  147. k = fn + "." + task
  148. data = dataCache.basetaskhash[k]
  149. self.runtaskdeps[k] = []
  150. self.file_checksum_values[k] = []
  151. recipename = dataCache.pkg_fn[fn]
  152. for dep in sorted(deps, key=clean_basepath):
  153. depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
  154. if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
  155. continue
  156. if dep not in self.taskhash:
  157. bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
  158. data = data + self.taskhash[dep]
  159. self.runtaskdeps[k].append(dep)
  160. if task in dataCache.file_checksums[fn]:
  161. if self.checksum_cache:
  162. checksums = self.checksum_cache.get_checksums(dataCache.file_checksums[fn][task], recipename)
  163. else:
  164. checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
  165. for (f,cs) in checksums:
  166. self.file_checksum_values[k].append((f,cs))
  167. if cs:
  168. data = data + cs
  169. taskdep = dataCache.task_deps[fn]
  170. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  171. # Nostamp tasks need an implicit taint so that they force any dependent tasks to run
  172. import uuid
  173. taint = str(uuid.uuid4())
  174. data = data + taint
  175. self.taints[k] = "nostamp:" + taint
  176. taint = self.read_taint(fn, task, dataCache.stamp[fn])
  177. if taint:
  178. data = data + taint
  179. self.taints[k] = taint
  180. logger.warning("%s is tainted from a forced run" % k)
  181. h = hashlib.md5(data.encode("utf-8")).hexdigest()
  182. self.taskhash[k] = h
  183. #d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
  184. return h
  185. def writeout_file_checksum_cache(self):
  186. """Write/update the file checksum cache onto disk"""
  187. if self.checksum_cache:
  188. self.checksum_cache.save_extras()
  189. self.checksum_cache.save_merge()
  190. else:
  191. bb.fetch2.fetcher_parse_save()
  192. bb.fetch2.fetcher_parse_done()
  193. def dump_sigtask(self, fn, task, stampbase, runtime):
  194. k = fn + "." + task
  195. referencestamp = stampbase
  196. if isinstance(runtime, str) and runtime.startswith("customfile"):
  197. sigfile = stampbase
  198. referencestamp = runtime[11:]
  199. elif runtime and k in self.taskhash:
  200. sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
  201. else:
  202. sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
  203. bb.utils.mkdirhier(os.path.dirname(sigfile))
  204. data = {}
  205. data['task'] = task
  206. data['basewhitelist'] = self.basewhitelist
  207. data['taskwhitelist'] = self.taskwhitelist
  208. data['taskdeps'] = self.taskdeps[fn][task]
  209. data['basehash'] = self.basehash[k]
  210. data['gendeps'] = {}
  211. data['varvals'] = {}
  212. data['varvals'][task] = self.lookupcache[fn][task]
  213. for dep in self.taskdeps[fn][task]:
  214. if dep in self.basewhitelist:
  215. continue
  216. data['gendeps'][dep] = self.gendeps[fn][dep]
  217. data['varvals'][dep] = self.lookupcache[fn][dep]
  218. if runtime and k in self.taskhash:
  219. data['runtaskdeps'] = self.runtaskdeps[k]
  220. data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
  221. data['runtaskhashes'] = {}
  222. for dep in data['runtaskdeps']:
  223. data['runtaskhashes'][dep] = self.taskhash[dep]
  224. data['taskhash'] = self.taskhash[k]
  225. taint = self.read_taint(fn, task, referencestamp)
  226. if taint:
  227. data['taint'] = taint
  228. if runtime and k in self.taints:
  229. if 'nostamp:' in self.taints[k]:
  230. data['taint'] = self.taints[k]
  231. fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
  232. try:
  233. with os.fdopen(fd, "wb") as stream:
  234. p = pickle.dump(data, stream, -1)
  235. stream.flush()
  236. os.chmod(tmpfile, 0o664)
  237. os.rename(tmpfile, sigfile)
  238. except (OSError, IOError) as err:
  239. try:
  240. os.unlink(tmpfile)
  241. except OSError:
  242. pass
  243. raise err
  244. computed_basehash = calc_basehash(data)
  245. if computed_basehash != self.basehash[k]:
  246. bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
  247. if runtime and k in self.taskhash:
  248. computed_taskhash = calc_taskhash(data)
  249. if computed_taskhash != self.taskhash[k]:
  250. bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
  251. def dump_sigs(self, dataCaches, options):
  252. for fn in self.taskdeps:
  253. for task in self.taskdeps[fn]:
  254. tid = fn + ":" + task
  255. (mc, _, _) = bb.runqueue.split_tid(tid)
  256. k = fn + "." + task
  257. if k not in self.taskhash:
  258. continue
  259. if dataCaches[mc].basetaskhash[k] != self.basehash[k]:
  260. bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
  261. bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[k], self.basehash[k]))
  262. self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True)
  263. class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
  264. name = "basichash"
  265. def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
  266. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  267. k = fn + "." + taskname[:-9]
  268. else:
  269. k = fn + "." + taskname
  270. if clean:
  271. h = "*"
  272. elif k in self.taskhash:
  273. h = self.taskhash[k]
  274. else:
  275. # If k is not in basehash, then error
  276. h = self.basehash[k]
  277. return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
  278. def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
  279. return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
  280. def invalidate_task(self, task, d, fn):
  281. bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
  282. bb.build.write_taint(task, d, fn)
  283. def dump_this_task(outfile, d):
  284. import bb.parse
  285. fn = d.getVar("BB_FILENAME", True)
  286. task = "do_" + d.getVar("BB_CURRENTTASK", True)
  287. referencestamp = bb.build.stamp_internal(task, d, None, True)
  288. bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
  289. def clean_basepath(a):
  290. b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
  291. if a.startswith("virtual:"):
  292. b = b + ":" + a.rsplit(":", 1)[0]
  293. return b
  294. def clean_basepaths(a):
  295. b = {}
  296. for x in a:
  297. b[clean_basepath(x)] = a[x]
  298. return b
  299. def clean_basepaths_list(a):
  300. b = []
  301. for x in a:
  302. b.append(clean_basepath(x))
  303. return b
  304. def compare_sigfiles(a, b, recursecb = None):
  305. output = []
  306. with open(a, 'rb') as f:
  307. p1 = pickle.Unpickler(ff)
  308. a_data = p1.load()
  309. with open(b, 'rb') as f:
  310. p2 = pickle.Unpickler(f)
  311. b_data = p2.load()
  312. def dict_diff(a, b, whitelist=set()):
  313. sa = set(a.keys())
  314. sb = set(b.keys())
  315. common = sa & sb
  316. changed = set()
  317. for i in common:
  318. if a[i] != b[i] and i not in whitelist:
  319. changed.add(i)
  320. added = sb - sa
  321. removed = sa - sb
  322. return changed, added, removed
  323. def file_checksums_diff(a, b):
  324. from collections import Counter
  325. # Handle old siginfo format
  326. if isinstance(a, dict):
  327. a = [(os.path.basename(f), cs) for f, cs in a.items()]
  328. if isinstance(b, dict):
  329. b = [(os.path.basename(f), cs) for f, cs in b.items()]
  330. # Compare lists, ensuring we can handle duplicate filenames if they exist
  331. removedcount = Counter(a)
  332. removedcount.subtract(b)
  333. addedcount = Counter(b)
  334. addedcount.subtract(a)
  335. added = []
  336. for x in b:
  337. if addedcount[x] > 0:
  338. addedcount[x] -= 1
  339. added.append(x)
  340. removed = []
  341. changed = []
  342. for x in a:
  343. if removedcount[x] > 0:
  344. removedcount[x] -= 1
  345. for y in added:
  346. if y[0] == x[0]:
  347. changed.append((x[0], x[1], y[1]))
  348. added.remove(y)
  349. break
  350. else:
  351. removed.append(x)
  352. added = [x[0] for x in added]
  353. removed = [x[0] for x in removed]
  354. return changed, added, removed
  355. if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
  356. output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
  357. if a_data['basewhitelist'] and b_data['basewhitelist']:
  358. output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
  359. if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
  360. output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
  361. if a_data['taskwhitelist'] and b_data['taskwhitelist']:
  362. output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
  363. if a_data['taskdeps'] != b_data['taskdeps']:
  364. output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
  365. if a_data['basehash'] != b_data['basehash']:
  366. output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
  367. changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
  368. if changed:
  369. for dep in changed:
  370. output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
  371. if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
  372. output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
  373. if added:
  374. for dep in added:
  375. output.append("Dependency on variable %s was added" % (dep))
  376. if removed:
  377. for dep in removed:
  378. output.append("Dependency on Variable %s was removed" % (dep))
  379. changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
  380. if changed:
  381. for dep in changed:
  382. output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
  383. if not 'file_checksum_values' in a_data:
  384. a_data['file_checksum_values'] = {}
  385. if not 'file_checksum_values' in b_data:
  386. b_data['file_checksum_values'] = {}
  387. changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
  388. if changed:
  389. for f, old, new in changed:
  390. output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
  391. if added:
  392. for f in added:
  393. output.append("Dependency on checksum of file %s was added" % (f))
  394. if removed:
  395. for f in removed:
  396. output.append("Dependency on checksum of file %s was removed" % (f))
  397. if not 'runtaskdeps' in a_data:
  398. a_data['runtaskdeps'] = {}
  399. if not 'runtaskdeps' in b_data:
  400. b_data['runtaskdeps'] = {}
  401. if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
  402. changed = ["Number of task dependencies changed"]
  403. else:
  404. changed = []
  405. for idx, task in enumerate(a_data['runtaskdeps']):
  406. a = a_data['runtaskdeps'][idx]
  407. b = b_data['runtaskdeps'][idx]
  408. if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b]:
  409. changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
  410. if changed:
  411. output.append("runtaskdeps changed from %s to %s" % (clean_basepaths_list(a_data['runtaskdeps']), clean_basepaths_list(b_data['runtaskdeps'])))
  412. output.append("\n".join(changed))
  413. if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
  414. a = a_data['runtaskhashes']
  415. b = b_data['runtaskhashes']
  416. changed, added, removed = dict_diff(a, b)
  417. if added:
  418. for dep in added:
  419. bdep_found = False
  420. if removed:
  421. for bdep in removed:
  422. if b[dep] == a[bdep]:
  423. #output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
  424. bdep_found = True
  425. if not bdep_found:
  426. output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
  427. if removed:
  428. for dep in removed:
  429. adep_found = False
  430. if added:
  431. for adep in added:
  432. if b[adep] == a[dep]:
  433. #output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
  434. adep_found = True
  435. if not adep_found:
  436. output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
  437. if changed:
  438. for dep in changed:
  439. output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
  440. if callable(recursecb):
  441. # If a dependent hash changed, might as well print the line above and then defer to the changes in
  442. # that hash since in all likelyhood, they're the same changes this task also saw.
  443. recout = recursecb(dep, a[dep], b[dep])
  444. if recout:
  445. output = [output[-1]] + recout
  446. a_taint = a_data.get('taint', None)
  447. b_taint = b_data.get('taint', None)
  448. if a_taint != b_taint:
  449. output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
  450. return output
  451. def calc_basehash(sigdata):
  452. task = sigdata['task']
  453. basedata = sigdata['varvals'][task]
  454. if basedata is None:
  455. basedata = ''
  456. alldeps = sigdata['taskdeps']
  457. for dep in alldeps:
  458. basedata = basedata + dep
  459. val = sigdata['varvals'][dep]
  460. if val is not None:
  461. basedata = basedata + str(val)
  462. return hashlib.md5(basedata.encode("utf-8")).hexdigest()
  463. def calc_taskhash(sigdata):
  464. data = sigdata['basehash']
  465. for dep in sigdata['runtaskdeps']:
  466. data = data + sigdata['runtaskhashes'][dep]
  467. for c in sigdata['file_checksum_values']:
  468. data = data + c[1]
  469. if 'taint' in sigdata:
  470. if 'nostamp:' in sigdata['taint']:
  471. data = data + sigdata['taint'][8:]
  472. else:
  473. data = data + sigdata['taint']
  474. return hashlib.md5(data.encode("utf-8")).hexdigest()
  475. def dump_sigfile(a):
  476. output = []
  477. with open(a, 'rb') as f:
  478. p1 = pickle.Unpickler(f)
  479. a_data = p1.load()
  480. output.append("basewhitelist: %s" % (a_data['basewhitelist']))
  481. output.append("taskwhitelist: %s" % (a_data['taskwhitelist']))
  482. output.append("Task dependencies: %s" % (sorted(a_data['taskdeps'])))
  483. output.append("basehash: %s" % (a_data['basehash']))
  484. for dep in a_data['gendeps']:
  485. output.append("List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep]))
  486. for dep in a_data['varvals']:
  487. output.append("Variable %s value is %s" % (dep, a_data['varvals'][dep]))
  488. if 'runtaskdeps' in a_data:
  489. output.append("Tasks this task depends on: %s" % (a_data['runtaskdeps']))
  490. if 'file_checksum_values' in a_data:
  491. output.append("This task depends on the checksums of files: %s" % (a_data['file_checksum_values']))
  492. if 'runtaskhashes' in a_data:
  493. for dep in a_data['runtaskhashes']:
  494. output.append("Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep]))
  495. if 'taint' in a_data:
  496. output.append("Tainted (by forced/invalidated task): %s" % a_data['taint'])
  497. if 'task' in a_data:
  498. computed_basehash = calc_basehash(a_data)
  499. output.append("Computed base hash is %s and from file %s" % (computed_basehash, a_data['basehash']))
  500. else:
  501. output.append("Unable to compute base hash")
  502. computed_taskhash = calc_taskhash(a_data)
  503. output.append("Computed task hash is %s" % computed_taskhash)
  504. return output