siggen.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. import hashlib
  2. import logging
  3. import os
  4. import re
  5. import tempfile
  6. import pickle
  7. import bb.data
  8. from bb.checksum import FileChecksumCache
  9. logger = logging.getLogger('BitBake.SigGen')
  10. def init(d):
  11. siggens = [obj for obj in globals().values()
  12. if type(obj) is type and issubclass(obj, SignatureGenerator)]
  13. desired = d.getVar("BB_SIGNATURE_HANDLER") or "noop"
  14. for sg in siggens:
  15. if desired == sg.name:
  16. return sg(d)
  17. break
  18. else:
  19. logger.error("Invalid signature generator '%s', using default 'noop'\n"
  20. "Available generators: %s", desired,
  21. ', '.join(obj.name for obj in siggens))
  22. return SignatureGenerator(d)
  23. class SignatureGenerator(object):
  24. """
  25. """
  26. name = "noop"
  27. def __init__(self, data):
  28. self.basehash = {}
  29. self.taskhash = {}
  30. self.runtaskdeps = {}
  31. self.file_checksum_values = {}
  32. self.taints = {}
  33. def finalise(self, fn, d, varient):
  34. return
  35. def get_taskhash(self, fn, task, deps, dataCache):
  36. return "0"
  37. def writeout_file_checksum_cache(self):
  38. """Write/update the file checksum cache onto disk"""
  39. return
  40. def stampfile(self, stampbase, file_name, taskname, extrainfo):
  41. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  42. def stampcleanmask(self, stampbase, file_name, taskname, extrainfo):
  43. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  44. def dump_sigtask(self, fn, task, stampbase, runtime):
  45. return
  46. def invalidate_task(self, task, d, fn):
  47. bb.build.del_stamp(task, d, fn)
  48. def dump_sigs(self, dataCache, options):
  49. return
  50. def get_taskdata(self):
  51. return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash)
  52. def set_taskdata(self, data):
  53. self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
  54. class SignatureGeneratorBasic(SignatureGenerator):
  55. """
  56. """
  57. name = "basic"
  58. def __init__(self, data):
  59. self.basehash = {}
  60. self.taskhash = {}
  61. self.taskdeps = {}
  62. self.runtaskdeps = {}
  63. self.file_checksum_values = {}
  64. self.taints = {}
  65. self.gendeps = {}
  66. self.lookupcache = {}
  67. self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
  68. self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
  69. self.taskwhitelist = None
  70. self.init_rundepcheck(data)
  71. checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE")
  72. if checksum_cache_file:
  73. self.checksum_cache = FileChecksumCache()
  74. self.checksum_cache.init_cache(data, checksum_cache_file)
  75. else:
  76. self.checksum_cache = None
  77. def init_rundepcheck(self, data):
  78. self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
  79. if self.taskwhitelist:
  80. self.twl = re.compile(self.taskwhitelist)
  81. else:
  82. self.twl = None
  83. def _build_data(self, fn, d):
  84. tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
  85. taskdeps = {}
  86. basehash = {}
  87. for task in tasklist:
  88. data = lookupcache[task]
  89. if data is None:
  90. bb.error("Task %s from %s seems to be empty?!" % (task, fn))
  91. data = ''
  92. gendeps[task] -= self.basewhitelist
  93. newdeps = gendeps[task]
  94. seen = set()
  95. while newdeps:
  96. nextdeps = newdeps
  97. seen |= nextdeps
  98. newdeps = set()
  99. for dep in nextdeps:
  100. if dep in self.basewhitelist:
  101. continue
  102. gendeps[dep] -= self.basewhitelist
  103. newdeps |= gendeps[dep]
  104. newdeps -= seen
  105. alldeps = sorted(seen)
  106. for dep in alldeps:
  107. data = data + dep
  108. var = lookupcache[dep]
  109. if var is not None:
  110. data = data + str(var)
  111. datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
  112. k = fn + "." + task
  113. if k in self.basehash and self.basehash[k] != datahash:
  114. bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
  115. self.basehash[k] = datahash
  116. taskdeps[task] = alldeps
  117. self.taskdeps[fn] = taskdeps
  118. self.gendeps[fn] = gendeps
  119. self.lookupcache[fn] = lookupcache
  120. return taskdeps
  121. def finalise(self, fn, d, variant):
  122. mc = d.getVar("__BBMULTICONFIG", False) or ""
  123. if variant or mc:
  124. fn = bb.cache.realfn2virtual(fn, variant, mc)
  125. try:
  126. taskdeps = self._build_data(fn, d)
  127. except:
  128. bb.warn("Error during finalise of %s" % fn)
  129. raise
  130. #Slow but can be useful for debugging mismatched basehashes
  131. #for task in self.taskdeps[fn]:
  132. # self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
  133. for task in taskdeps:
  134. d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
  135. def rundep_check(self, fn, recipename, task, dep, depname, dataCache):
  136. # Return True if we should keep the dependency, False to drop it
  137. # We only manipulate the dependencies for packages not in the whitelist
  138. if self.twl and not self.twl.search(recipename):
  139. # then process the actual dependencies
  140. if self.twl.search(depname):
  141. return False
  142. return True
  143. def read_taint(self, fn, task, stampbase):
  144. taint = None
  145. try:
  146. with open(stampbase + '.' + task + '.taint', 'r') as taintf:
  147. taint = taintf.read()
  148. except IOError:
  149. pass
  150. return taint
  151. def get_taskhash(self, fn, task, deps, dataCache):
  152. k = fn + "." + task
  153. data = dataCache.basetaskhash[k]
  154. self.basehash[k] = data
  155. self.runtaskdeps[k] = []
  156. self.file_checksum_values[k] = []
  157. recipename = dataCache.pkg_fn[fn]
  158. for dep in sorted(deps, key=clean_basepath):
  159. depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
  160. if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
  161. continue
  162. if dep not in self.taskhash:
  163. bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
  164. data = data + self.taskhash[dep]
  165. self.runtaskdeps[k].append(dep)
  166. if task in dataCache.file_checksums[fn]:
  167. if self.checksum_cache:
  168. checksums = self.checksum_cache.get_checksums(dataCache.file_checksums[fn][task], recipename)
  169. else:
  170. checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
  171. for (f,cs) in checksums:
  172. self.file_checksum_values[k].append((f,cs))
  173. if cs:
  174. data = data + cs
  175. taskdep = dataCache.task_deps[fn]
  176. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  177. # Nostamp tasks need an implicit taint so that they force any dependent tasks to run
  178. import uuid
  179. taint = str(uuid.uuid4())
  180. data = data + taint
  181. self.taints[k] = "nostamp:" + taint
  182. taint = self.read_taint(fn, task, dataCache.stamp[fn])
  183. if taint:
  184. data = data + taint
  185. self.taints[k] = taint
  186. logger.warning("%s is tainted from a forced run" % k)
  187. h = hashlib.md5(data.encode("utf-8")).hexdigest()
  188. self.taskhash[k] = h
  189. #d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
  190. return h
  191. def writeout_file_checksum_cache(self):
  192. """Write/update the file checksum cache onto disk"""
  193. if self.checksum_cache:
  194. self.checksum_cache.save_extras()
  195. self.checksum_cache.save_merge()
  196. else:
  197. bb.fetch2.fetcher_parse_save()
  198. bb.fetch2.fetcher_parse_done()
  199. def dump_sigtask(self, fn, task, stampbase, runtime):
  200. k = fn + "." + task
  201. referencestamp = stampbase
  202. if isinstance(runtime, str) and runtime.startswith("customfile"):
  203. sigfile = stampbase
  204. referencestamp = runtime[11:]
  205. elif runtime and k in self.taskhash:
  206. sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
  207. else:
  208. sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
  209. bb.utils.mkdirhier(os.path.dirname(sigfile))
  210. data = {}
  211. data['task'] = task
  212. data['basewhitelist'] = self.basewhitelist
  213. data['taskwhitelist'] = self.taskwhitelist
  214. data['taskdeps'] = self.taskdeps[fn][task]
  215. data['basehash'] = self.basehash[k]
  216. data['gendeps'] = {}
  217. data['varvals'] = {}
  218. data['varvals'][task] = self.lookupcache[fn][task]
  219. for dep in self.taskdeps[fn][task]:
  220. if dep in self.basewhitelist:
  221. continue
  222. data['gendeps'][dep] = self.gendeps[fn][dep]
  223. data['varvals'][dep] = self.lookupcache[fn][dep]
  224. if runtime and k in self.taskhash:
  225. data['runtaskdeps'] = self.runtaskdeps[k]
  226. data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
  227. data['runtaskhashes'] = {}
  228. for dep in data['runtaskdeps']:
  229. data['runtaskhashes'][dep] = self.taskhash[dep]
  230. data['taskhash'] = self.taskhash[k]
  231. taint = self.read_taint(fn, task, referencestamp)
  232. if taint:
  233. data['taint'] = taint
  234. if runtime and k in self.taints:
  235. if 'nostamp:' in self.taints[k]:
  236. data['taint'] = self.taints[k]
  237. computed_basehash = calc_basehash(data)
  238. if computed_basehash != self.basehash[k]:
  239. bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
  240. if runtime and k in self.taskhash:
  241. computed_taskhash = calc_taskhash(data)
  242. if computed_taskhash != self.taskhash[k]:
  243. bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
  244. sigfile = sigfile.replace(self.taskhash[k], computed_taskhash)
  245. fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
  246. try:
  247. with os.fdopen(fd, "wb") as stream:
  248. p = pickle.dump(data, stream, -1)
  249. stream.flush()
  250. os.chmod(tmpfile, 0o664)
  251. os.rename(tmpfile, sigfile)
  252. except (OSError, IOError) as err:
  253. try:
  254. os.unlink(tmpfile)
  255. except OSError:
  256. pass
  257. raise err
  258. def dump_sigs(self, dataCaches, options):
  259. for fn in self.taskdeps:
  260. for task in self.taskdeps[fn]:
  261. tid = fn + ":" + task
  262. (mc, _, _) = bb.runqueue.split_tid(tid)
  263. k = fn + "." + task
  264. if k not in self.taskhash:
  265. continue
  266. if dataCaches[mc].basetaskhash[k] != self.basehash[k]:
  267. bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
  268. bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[k], self.basehash[k]))
  269. self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True)
  270. class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
  271. name = "basichash"
  272. def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
  273. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  274. k = fn + "." + taskname[:-9]
  275. else:
  276. k = fn + "." + taskname
  277. if clean:
  278. h = "*"
  279. elif k in self.taskhash:
  280. h = self.taskhash[k]
  281. else:
  282. # If k is not in basehash, then error
  283. h = self.basehash[k]
  284. return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
  285. def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
  286. return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
  287. def invalidate_task(self, task, d, fn):
  288. bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
  289. bb.build.write_taint(task, d, fn)
  290. def dump_this_task(outfile, d):
  291. import bb.parse
  292. fn = d.getVar("BB_FILENAME")
  293. task = "do_" + d.getVar("BB_CURRENTTASK")
  294. referencestamp = bb.build.stamp_internal(task, d, None, True)
  295. bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
  296. def clean_basepath(a):
  297. b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
  298. if a.startswith("virtual:"):
  299. b = b + ":" + a.rsplit(":", 1)[0]
  300. return b
  301. def clean_basepaths(a):
  302. b = {}
  303. for x in a:
  304. b[clean_basepath(x)] = a[x]
  305. return b
  306. def clean_basepaths_list(a):
  307. b = []
  308. for x in a:
  309. b.append(clean_basepath(x))
  310. return b
  311. def compare_sigfiles(a, b, recursecb = None):
  312. output = []
  313. with open(a, 'rb') as f:
  314. p1 = pickle.Unpickler(f)
  315. a_data = p1.load()
  316. with open(b, 'rb') as f:
  317. p2 = pickle.Unpickler(f)
  318. b_data = p2.load()
  319. def dict_diff(a, b, whitelist=set()):
  320. sa = set(a.keys())
  321. sb = set(b.keys())
  322. common = sa & sb
  323. changed = set()
  324. for i in common:
  325. if a[i] != b[i] and i not in whitelist:
  326. changed.add(i)
  327. added = sb - sa
  328. removed = sa - sb
  329. return changed, added, removed
  330. def file_checksums_diff(a, b):
  331. from collections import Counter
  332. # Handle old siginfo format
  333. if isinstance(a, dict):
  334. a = [(os.path.basename(f), cs) for f, cs in a.items()]
  335. if isinstance(b, dict):
  336. b = [(os.path.basename(f), cs) for f, cs in b.items()]
  337. # Compare lists, ensuring we can handle duplicate filenames if they exist
  338. removedcount = Counter(a)
  339. removedcount.subtract(b)
  340. addedcount = Counter(b)
  341. addedcount.subtract(a)
  342. added = []
  343. for x in b:
  344. if addedcount[x] > 0:
  345. addedcount[x] -= 1
  346. added.append(x)
  347. removed = []
  348. changed = []
  349. for x in a:
  350. if removedcount[x] > 0:
  351. removedcount[x] -= 1
  352. for y in added:
  353. if y[0] == x[0]:
  354. changed.append((x[0], x[1], y[1]))
  355. added.remove(y)
  356. break
  357. else:
  358. removed.append(x)
  359. added = [x[0] for x in added]
  360. removed = [x[0] for x in removed]
  361. return changed, added, removed
  362. if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
  363. output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
  364. if a_data['basewhitelist'] and b_data['basewhitelist']:
  365. output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
  366. if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
  367. output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
  368. if a_data['taskwhitelist'] and b_data['taskwhitelist']:
  369. output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
  370. if a_data['taskdeps'] != b_data['taskdeps']:
  371. output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
  372. if a_data['basehash'] != b_data['basehash']:
  373. output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
  374. changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
  375. if changed:
  376. for dep in changed:
  377. output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
  378. if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
  379. output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
  380. if added:
  381. for dep in added:
  382. output.append("Dependency on variable %s was added" % (dep))
  383. if removed:
  384. for dep in removed:
  385. output.append("Dependency on Variable %s was removed" % (dep))
  386. changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
  387. if changed:
  388. for dep in changed:
  389. output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
  390. if not 'file_checksum_values' in a_data:
  391. a_data['file_checksum_values'] = {}
  392. if not 'file_checksum_values' in b_data:
  393. b_data['file_checksum_values'] = {}
  394. changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
  395. if changed:
  396. for f, old, new in changed:
  397. output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
  398. if added:
  399. for f in added:
  400. output.append("Dependency on checksum of file %s was added" % (f))
  401. if removed:
  402. for f in removed:
  403. output.append("Dependency on checksum of file %s was removed" % (f))
  404. if not 'runtaskdeps' in a_data:
  405. a_data['runtaskdeps'] = {}
  406. if not 'runtaskdeps' in b_data:
  407. b_data['runtaskdeps'] = {}
  408. if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
  409. changed = ["Number of task dependencies changed"]
  410. else:
  411. changed = []
  412. for idx, task in enumerate(a_data['runtaskdeps']):
  413. a = a_data['runtaskdeps'][idx]
  414. b = b_data['runtaskdeps'][idx]
  415. if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b]:
  416. changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
  417. if changed:
  418. output.append("runtaskdeps changed from %s to %s" % (clean_basepaths_list(a_data['runtaskdeps']), clean_basepaths_list(b_data['runtaskdeps'])))
  419. output.append("\n".join(changed))
  420. if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
  421. a = a_data['runtaskhashes']
  422. b = b_data['runtaskhashes']
  423. changed, added, removed = dict_diff(a, b)
  424. if added:
  425. for dep in added:
  426. bdep_found = False
  427. if removed:
  428. for bdep in removed:
  429. if b[dep] == a[bdep]:
  430. #output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
  431. bdep_found = True
  432. if not bdep_found:
  433. output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
  434. if removed:
  435. for dep in removed:
  436. adep_found = False
  437. if added:
  438. for adep in added:
  439. if b[adep] == a[dep]:
  440. #output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
  441. adep_found = True
  442. if not adep_found:
  443. output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
  444. if changed:
  445. for dep in changed:
  446. output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
  447. if callable(recursecb):
  448. # If a dependent hash changed, might as well print the line above and then defer to the changes in
  449. # that hash since in all likelyhood, they're the same changes this task also saw.
  450. recout = recursecb(dep, a[dep], b[dep])
  451. if recout:
  452. output = [output[-1]] + recout
  453. a_taint = a_data.get('taint', None)
  454. b_taint = b_data.get('taint', None)
  455. if a_taint != b_taint:
  456. output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
  457. return output
  458. def calc_basehash(sigdata):
  459. task = sigdata['task']
  460. basedata = sigdata['varvals'][task]
  461. if basedata is None:
  462. basedata = ''
  463. alldeps = sigdata['taskdeps']
  464. for dep in alldeps:
  465. basedata = basedata + dep
  466. val = sigdata['varvals'][dep]
  467. if val is not None:
  468. basedata = basedata + str(val)
  469. return hashlib.md5(basedata.encode("utf-8")).hexdigest()
  470. def calc_taskhash(sigdata):
  471. data = sigdata['basehash']
  472. for dep in sigdata['runtaskdeps']:
  473. data = data + sigdata['runtaskhashes'][dep]
  474. for c in sigdata['file_checksum_values']:
  475. data = data + c[1]
  476. if 'taint' in sigdata:
  477. if 'nostamp:' in sigdata['taint']:
  478. data = data + sigdata['taint'][8:]
  479. else:
  480. data = data + sigdata['taint']
  481. return hashlib.md5(data.encode("utf-8")).hexdigest()
  482. def dump_sigfile(a):
  483. output = []
  484. with open(a, 'rb') as f:
  485. p1 = pickle.Unpickler(f)
  486. a_data = p1.load()
  487. output.append("basewhitelist: %s" % (a_data['basewhitelist']))
  488. output.append("taskwhitelist: %s" % (a_data['taskwhitelist']))
  489. output.append("Task dependencies: %s" % (sorted(a_data['taskdeps'])))
  490. output.append("basehash: %s" % (a_data['basehash']))
  491. for dep in a_data['gendeps']:
  492. output.append("List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep]))
  493. for dep in a_data['varvals']:
  494. output.append("Variable %s value is %s" % (dep, a_data['varvals'][dep]))
  495. if 'runtaskdeps' in a_data:
  496. output.append("Tasks this task depends on: %s" % (a_data['runtaskdeps']))
  497. if 'file_checksum_values' in a_data:
  498. output.append("This task depends on the checksums of files: %s" % (a_data['file_checksum_values']))
  499. if 'runtaskhashes' in a_data:
  500. for dep in a_data['runtaskhashes']:
  501. output.append("Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep]))
  502. if 'taint' in a_data:
  503. output.append("Tainted (by forced/invalidated task): %s" % a_data['taint'])
  504. if 'task' in a_data:
  505. computed_basehash = calc_basehash(a_data)
  506. output.append("Computed base hash is %s and from file %s" % (computed_basehash, a_data['basehash']))
  507. else:
  508. output.append("Unable to compute base hash")
  509. computed_taskhash = calc_taskhash(a_data)
  510. output.append("Computed task hash is %s" % computed_taskhash)
  511. return output