siggen.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. import hashlib
  2. import logging
  3. import os
  4. import re
  5. import tempfile
  6. import bb.data
  7. from bb.checksum import FileChecksumCache
  8. logger = logging.getLogger('BitBake.SigGen')
  9. try:
  10. import cPickle as pickle
  11. except ImportError:
  12. import pickle
  13. logger.info('Importing cPickle failed. Falling back to a very slow implementation.')
  14. def init(d):
  15. siggens = [obj for obj in globals().itervalues()
  16. if type(obj) is type and issubclass(obj, SignatureGenerator)]
  17. desired = d.getVar("BB_SIGNATURE_HANDLER", True) or "noop"
  18. for sg in siggens:
  19. if desired == sg.name:
  20. return sg(d)
  21. break
  22. else:
  23. logger.error("Invalid signature generator '%s', using default 'noop'\n"
  24. "Available generators: %s", desired,
  25. ', '.join(obj.name for obj in siggens))
  26. return SignatureGenerator(d)
  27. class SignatureGenerator(object):
  28. """
  29. """
  30. name = "noop"
  31. def __init__(self, data):
  32. self.taskhash = {}
  33. self.runtaskdeps = {}
  34. self.file_checksum_values = {}
  35. self.taints = {}
  36. def finalise(self, fn, d, varient):
  37. return
  38. def get_taskhash(self, fn, task, deps, dataCache):
  39. return "0"
  40. def writeout_file_checksum_cache(self):
  41. """Write/update the file checksum cache onto disk"""
  42. return
  43. def stampfile(self, stampbase, file_name, taskname, extrainfo):
  44. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  45. def stampcleanmask(self, stampbase, file_name, taskname, extrainfo):
  46. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  47. def dump_sigtask(self, fn, task, stampbase, runtime):
  48. return
  49. def invalidate_task(self, task, d, fn):
  50. bb.build.del_stamp(task, d, fn)
  51. def dump_sigs(self, dataCache, options):
  52. return
  53. def get_taskdata(self):
  54. return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints)
  55. def set_taskdata(self, data):
  56. self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints = data
  57. class SignatureGeneratorBasic(SignatureGenerator):
  58. """
  59. """
  60. name = "basic"
  61. def __init__(self, data):
  62. self.basehash = {}
  63. self.taskhash = {}
  64. self.taskdeps = {}
  65. self.runtaskdeps = {}
  66. self.file_checksum_values = {}
  67. self.taints = {}
  68. self.gendeps = {}
  69. self.lookupcache = {}
  70. self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
  71. self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
  72. self.taskwhitelist = None
  73. self.init_rundepcheck(data)
  74. checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE", True)
  75. if checksum_cache_file:
  76. self.checksum_cache = FileChecksumCache()
  77. self.checksum_cache.init_cache(data, checksum_cache_file)
  78. else:
  79. self.checksum_cache = None
  80. def init_rundepcheck(self, data):
  81. self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
  82. if self.taskwhitelist:
  83. self.twl = re.compile(self.taskwhitelist)
  84. else:
  85. self.twl = None
  86. def _build_data(self, fn, d):
  87. tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
  88. taskdeps = {}
  89. basehash = {}
  90. for task in tasklist:
  91. data = lookupcache[task]
  92. if data is None:
  93. bb.error("Task %s from %s seems to be empty?!" % (task, fn))
  94. data = ''
  95. gendeps[task] -= self.basewhitelist
  96. newdeps = gendeps[task]
  97. seen = set()
  98. while newdeps:
  99. nextdeps = newdeps
  100. seen |= nextdeps
  101. newdeps = set()
  102. for dep in nextdeps:
  103. if dep in self.basewhitelist:
  104. continue
  105. gendeps[dep] -= self.basewhitelist
  106. newdeps |= gendeps[dep]
  107. newdeps -= seen
  108. alldeps = sorted(seen)
  109. for dep in alldeps:
  110. data = data + dep
  111. var = lookupcache[dep]
  112. if var is not None:
  113. data = data + str(var)
  114. self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest()
  115. taskdeps[task] = alldeps
  116. self.taskdeps[fn] = taskdeps
  117. self.gendeps[fn] = gendeps
  118. self.lookupcache[fn] = lookupcache
  119. return taskdeps
  120. def finalise(self, fn, d, variant):
  121. if variant:
  122. fn = "virtual:" + variant + ":" + fn
  123. try:
  124. taskdeps = self._build_data(fn, d)
  125. except:
  126. bb.warn("Error during finalise of %s" % fn)
  127. raise
  128. #Slow but can be useful for debugging mismatched basehashes
  129. #for task in self.taskdeps[fn]:
  130. # self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
  131. for task in taskdeps:
  132. d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
  133. def rundep_check(self, fn, recipename, task, dep, depname, dataCache):
  134. # Return True if we should keep the dependency, False to drop it
  135. # We only manipulate the dependencies for packages not in the whitelist
  136. if self.twl and not self.twl.search(recipename):
  137. # then process the actual dependencies
  138. if self.twl.search(depname):
  139. return False
  140. return True
  141. def read_taint(self, fn, task, stampbase):
  142. taint = None
  143. try:
  144. with open(stampbase + '.' + task + '.taint', 'r') as taintf:
  145. taint = taintf.read()
  146. except IOError:
  147. pass
  148. return taint
  149. def get_taskhash(self, fn, task, deps, dataCache):
  150. k = fn + "." + task
  151. data = dataCache.basetaskhash[k]
  152. self.runtaskdeps[k] = []
  153. self.file_checksum_values[k] = []
  154. recipename = dataCache.pkg_fn[fn]
  155. for dep in sorted(deps, key=clean_basepath):
  156. depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
  157. if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
  158. continue
  159. if dep not in self.taskhash:
  160. bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
  161. data = data + self.taskhash[dep]
  162. self.runtaskdeps[k].append(dep)
  163. if task in dataCache.file_checksums[fn]:
  164. if self.checksum_cache:
  165. checksums = self.checksum_cache.get_checksums(dataCache.file_checksums[fn][task], recipename)
  166. else:
  167. checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
  168. for (f,cs) in checksums:
  169. self.file_checksum_values[k].append((f,cs))
  170. if cs:
  171. data = data + cs
  172. taskdep = dataCache.task_deps[fn]
  173. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  174. # Nostamp tasks need an implicit taint so that they force any dependent tasks to run
  175. import uuid
  176. taint = str(uuid.uuid4())
  177. data = data + taint
  178. self.taints[k] = "nostamp:" + taint
  179. taint = self.read_taint(fn, task, dataCache.stamp[fn])
  180. if taint:
  181. data = data + taint
  182. self.taints[k] = taint
  183. logger.warning("%s is tainted from a forced run" % k)
  184. h = hashlib.md5(data).hexdigest()
  185. self.taskhash[k] = h
  186. #d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
  187. return h
  188. def writeout_file_checksum_cache(self):
  189. """Write/update the file checksum cache onto disk"""
  190. if self.checksum_cache:
  191. self.checksum_cache.save_extras()
  192. self.checksum_cache.save_merge()
  193. else:
  194. bb.fetch2.fetcher_parse_save()
  195. bb.fetch2.fetcher_parse_done()
  196. def dump_sigtask(self, fn, task, stampbase, runtime):
  197. k = fn + "." + task
  198. referencestamp = stampbase
  199. if isinstance(runtime, str) and runtime.startswith("customfile"):
  200. sigfile = stampbase
  201. referencestamp = runtime[11:]
  202. elif runtime and k in self.taskhash:
  203. sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
  204. else:
  205. sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
  206. bb.utils.mkdirhier(os.path.dirname(sigfile))
  207. data = {}
  208. data['task'] = task
  209. data['basewhitelist'] = self.basewhitelist
  210. data['taskwhitelist'] = self.taskwhitelist
  211. data['taskdeps'] = self.taskdeps[fn][task]
  212. data['basehash'] = self.basehash[k]
  213. data['gendeps'] = {}
  214. data['varvals'] = {}
  215. data['varvals'][task] = self.lookupcache[fn][task]
  216. for dep in self.taskdeps[fn][task]:
  217. if dep in self.basewhitelist:
  218. continue
  219. data['gendeps'][dep] = self.gendeps[fn][dep]
  220. data['varvals'][dep] = self.lookupcache[fn][dep]
  221. if runtime and k in self.taskhash:
  222. data['runtaskdeps'] = self.runtaskdeps[k]
  223. data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
  224. data['runtaskhashes'] = {}
  225. for dep in data['runtaskdeps']:
  226. data['runtaskhashes'][dep] = self.taskhash[dep]
  227. data['taskhash'] = self.taskhash[k]
  228. taint = self.read_taint(fn, task, referencestamp)
  229. if taint:
  230. data['taint'] = taint
  231. if runtime and k in self.taints:
  232. if 'nostamp:' in self.taints[k]:
  233. data['taint'] = self.taints[k]
  234. fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
  235. try:
  236. with os.fdopen(fd, "wb") as stream:
  237. p = pickle.dump(data, stream, -1)
  238. stream.flush()
  239. os.chmod(tmpfile, 0664)
  240. os.rename(tmpfile, sigfile)
  241. except (OSError, IOError) as err:
  242. try:
  243. os.unlink(tmpfile)
  244. except OSError:
  245. pass
  246. raise err
  247. computed_basehash = calc_basehash(data)
  248. if computed_basehash != self.basehash[k]:
  249. bb.error("Basehash mismatch %s verses %s for %s" % (computed_basehash, self.basehash[k], k))
  250. if k in self.taskhash:
  251. computed_taskhash = calc_taskhash(data)
  252. if computed_taskhash != self.taskhash[k]:
  253. bb.error("Taskhash mismatch %s verses %s for %s" % (computed_taskhash, self.taskhash[k], k))
  254. def dump_sigs(self, dataCache, options):
  255. for fn in self.taskdeps:
  256. for task in self.taskdeps[fn]:
  257. k = fn + "." + task
  258. if k not in self.taskhash:
  259. continue
  260. if dataCache.basetaskhash[k] != self.basehash[k]:
  261. bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
  262. bb.error("The mismatched hashes were %s and %s" % (dataCache.basetaskhash[k], self.basehash[k]))
  263. self.dump_sigtask(fn, task, dataCache.stamp[fn], True)
  264. class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
  265. name = "basichash"
  266. def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
  267. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  268. k = fn + "." + taskname[:-9]
  269. else:
  270. k = fn + "." + taskname
  271. if clean:
  272. h = "*"
  273. elif k in self.taskhash:
  274. h = self.taskhash[k]
  275. else:
  276. # If k is not in basehash, then error
  277. h = self.basehash[k]
  278. return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
  279. def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
  280. return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
  281. def invalidate_task(self, task, d, fn):
  282. bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
  283. bb.build.write_taint(task, d, fn)
  284. def dump_this_task(outfile, d):
  285. import bb.parse
  286. fn = d.getVar("BB_FILENAME", True)
  287. task = "do_" + d.getVar("BB_CURRENTTASK", True)
  288. referencestamp = bb.build.stamp_internal(task, d, None, True)
  289. bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
  290. def clean_basepath(a):
  291. b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
  292. if a.startswith("virtual:"):
  293. b = b + ":" + a.rsplit(":", 1)[0]
  294. return b
  295. def clean_basepaths(a):
  296. b = {}
  297. for x in a:
  298. b[clean_basepath(x)] = a[x]
  299. return b
  300. def clean_basepaths_list(a):
  301. b = []
  302. for x in a:
  303. b.append(clean_basepath(x))
  304. return b
  305. def compare_sigfiles(a, b, recursecb = None):
  306. output = []
  307. p1 = pickle.Unpickler(open(a, "rb"))
  308. a_data = p1.load()
  309. p2 = pickle.Unpickler(open(b, "rb"))
  310. b_data = p2.load()
  311. def dict_diff(a, b, whitelist=set()):
  312. sa = set(a.keys())
  313. sb = set(b.keys())
  314. common = sa & sb
  315. changed = set()
  316. for i in common:
  317. if a[i] != b[i] and i not in whitelist:
  318. changed.add(i)
  319. added = sb - sa
  320. removed = sa - sb
  321. return changed, added, removed
  322. def file_checksums_diff(a, b):
  323. from collections import Counter
  324. # Handle old siginfo format
  325. if isinstance(a, dict):
  326. a = [(os.path.basename(f), cs) for f, cs in a.items()]
  327. if isinstance(b, dict):
  328. b = [(os.path.basename(f), cs) for f, cs in b.items()]
  329. # Compare lists, ensuring we can handle duplicate filenames if they exist
  330. removedcount = Counter(a)
  331. removedcount.subtract(b)
  332. addedcount = Counter(b)
  333. addedcount.subtract(a)
  334. added = []
  335. for x in b:
  336. if addedcount[x] > 0:
  337. addedcount[x] -= 1
  338. added.append(x)
  339. removed = []
  340. changed = []
  341. for x in a:
  342. if removedcount[x] > 0:
  343. removedcount[x] -= 1
  344. for y in added:
  345. if y[0] == x[0]:
  346. changed.append((x[0], x[1], y[1]))
  347. added.remove(y)
  348. break
  349. else:
  350. removed.append(x)
  351. added = [x[0] for x in added]
  352. removed = [x[0] for x in removed]
  353. return changed, added, removed
  354. if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
  355. output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
  356. if a_data['basewhitelist'] and b_data['basewhitelist']:
  357. output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
  358. if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
  359. output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
  360. if a_data['taskwhitelist'] and b_data['taskwhitelist']:
  361. output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
  362. if a_data['taskdeps'] != b_data['taskdeps']:
  363. output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
  364. if a_data['basehash'] != b_data['basehash']:
  365. output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
  366. changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
  367. if changed:
  368. for dep in changed:
  369. output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
  370. if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
  371. output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
  372. if added:
  373. for dep in added:
  374. output.append("Dependency on variable %s was added" % (dep))
  375. if removed:
  376. for dep in removed:
  377. output.append("Dependency on Variable %s was removed" % (dep))
  378. changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
  379. if changed:
  380. for dep in changed:
  381. output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
  382. changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
  383. if changed:
  384. for f, old, new in changed:
  385. output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
  386. if added:
  387. for f in added:
  388. output.append("Dependency on checksum of file %s was added" % (f))
  389. if removed:
  390. for f in removed:
  391. output.append("Dependency on checksum of file %s was removed" % (f))
  392. if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
  393. changed = ["Number of task dependencies changed"]
  394. else:
  395. changed = []
  396. for idx, task in enumerate(a_data['runtaskdeps']):
  397. a = a_data['runtaskdeps'][idx]
  398. b = b_data['runtaskdeps'][idx]
  399. if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b]:
  400. changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
  401. if changed:
  402. output.append("runtaskdeps changed from %s to %s" % (clean_basepaths_list(a_data['runtaskdeps']), clean_basepaths_list(b_data['runtaskdeps'])))
  403. output.append("\n".join(changed))
  404. if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
  405. a = a_data['runtaskhashes']
  406. b = b_data['runtaskhashes']
  407. changed, added, removed = dict_diff(a, b)
  408. if added:
  409. for dep in added:
  410. bdep_found = False
  411. if removed:
  412. for bdep in removed:
  413. if b[dep] == a[bdep]:
  414. #output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
  415. bdep_found = True
  416. if not bdep_found:
  417. output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
  418. if removed:
  419. for dep in removed:
  420. adep_found = False
  421. if added:
  422. for adep in added:
  423. if b[adep] == a[dep]:
  424. #output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
  425. adep_found = True
  426. if not adep_found:
  427. output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
  428. if changed:
  429. for dep in changed:
  430. output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
  431. if callable(recursecb):
  432. # If a dependent hash changed, might as well print the line above and then defer to the changes in
  433. # that hash since in all likelyhood, they're the same changes this task also saw.
  434. recout = recursecb(dep, a[dep], b[dep])
  435. if recout:
  436. output = [output[-1]] + recout
  437. a_taint = a_data.get('taint', None)
  438. b_taint = b_data.get('taint', None)
  439. if a_taint != b_taint:
  440. output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
  441. return output
  442. def calc_basehash(sigdata):
  443. task = sigdata['task']
  444. basedata = sigdata['varvals'][task]
  445. if basedata is None:
  446. basedata = ''
  447. alldeps = sigdata['taskdeps']
  448. for dep in alldeps:
  449. basedata = basedata + dep
  450. val = sigdata['varvals'][dep]
  451. if val is not None:
  452. basedata = basedata + str(val)
  453. return hashlib.md5(basedata).hexdigest()
  454. def calc_taskhash(sigdata):
  455. data = sigdata['basehash']
  456. for dep in sigdata['runtaskdeps']:
  457. data = data + sigdata['runtaskhashes'][dep]
  458. for c in sigdata['file_checksum_values']:
  459. data = data + c[1]
  460. if 'taint' in sigdata:
  461. if 'nostamp:' in sigdata['taint']:
  462. data = data + sigdata['taint'][8:]
  463. else:
  464. data = data + sigdata['taint']
  465. return hashlib.md5(data).hexdigest()
  466. def dump_sigfile(a):
  467. output = []
  468. p1 = pickle.Unpickler(open(a, "rb"))
  469. a_data = p1.load()
  470. output.append("basewhitelist: %s" % (a_data['basewhitelist']))
  471. output.append("taskwhitelist: %s" % (a_data['taskwhitelist']))
  472. output.append("Task dependencies: %s" % (sorted(a_data['taskdeps'])))
  473. output.append("basehash: %s" % (a_data['basehash']))
  474. for dep in a_data['gendeps']:
  475. output.append("List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep]))
  476. for dep in a_data['varvals']:
  477. output.append("Variable %s value is %s" % (dep, a_data['varvals'][dep]))
  478. if 'runtaskdeps' in a_data:
  479. output.append("Tasks this task depends on: %s" % (a_data['runtaskdeps']))
  480. if 'file_checksum_values' in a_data:
  481. output.append("This task depends on the checksums of files: %s" % (a_data['file_checksum_values']))
  482. if 'runtaskhashes' in a_data:
  483. for dep in a_data['runtaskhashes']:
  484. output.append("Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep]))
  485. if 'taint' in a_data:
  486. output.append("Tainted (by forced/invalidated task): %s" % a_data['taint'])
  487. if 'task' in a_data:
  488. computed_basehash = calc_basehash(a_data)
  489. output.append("Computed base hash is %s and from file %s" % (computed_basehash, a_data['basehash']))
  490. else:
  491. output.append("Unable to compute base hash")
  492. computed_taskhash = calc_taskhash(a_data)
  493. output.append("Computed task hash is %s" % computed_taskhash)
  494. return output