runqueue.py 93 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285
  1. #!/usr/bin/env python
  2. # ex:ts=4:sw=4:sts=4:et
  3. # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
  4. """
  5. BitBake 'RunQueue' implementation
  6. Handles preparation and execution of a queue of tasks
  7. """
  8. # Copyright (C) 2006-2007 Richard Purdie
  9. #
  10. # This program is free software; you can redistribute it and/or modify
  11. # it under the terms of the GNU General Public License version 2 as
  12. # published by the Free Software Foundation.
  13. #
  14. # This program is distributed in the hope that it will be useful,
  15. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. # GNU General Public License for more details.
  18. #
  19. # You should have received a copy of the GNU General Public License along
  20. # with this program; if not, write to the Free Software Foundation, Inc.,
  21. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. import copy
  23. import os
  24. import sys
  25. import signal
  26. import stat
  27. import fcntl
  28. import errno
  29. import logging
  30. import re
  31. import bb
  32. from bb import msg, data, event
  33. from bb import monitordisk
  34. import subprocess
  35. try:
  36. import cPickle as pickle
  37. except ImportError:
  38. import pickle
  39. bblogger = logging.getLogger("BitBake")
  40. logger = logging.getLogger("BitBake.RunQueue")
  41. __find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
  42. class RunQueueStats:
  43. """
  44. Holds statistics on the tasks handled by the associated runQueue
  45. """
  46. def __init__(self, total):
  47. self.completed = 0
  48. self.skipped = 0
  49. self.failed = 0
  50. self.active = 0
  51. self.total = total
  52. def copy(self):
  53. obj = self.__class__(self.total)
  54. obj.__dict__.update(self.__dict__)
  55. return obj
  56. def taskFailed(self):
  57. self.active = self.active - 1
  58. self.failed = self.failed + 1
  59. def taskCompleted(self, number = 1):
  60. self.active = self.active - number
  61. self.completed = self.completed + number
  62. def taskSkipped(self, number = 1):
  63. self.active = self.active + number
  64. self.skipped = self.skipped + number
  65. def taskActive(self):
  66. self.active = self.active + 1
  67. # These values indicate the next step due to be run in the
  68. # runQueue state machine
  69. runQueuePrepare = 2
  70. runQueueSceneInit = 3
  71. runQueueSceneRun = 4
  72. runQueueRunInit = 5
  73. runQueueRunning = 6
  74. runQueueFailed = 7
  75. runQueueCleanUp = 8
  76. runQueueComplete = 9
  77. class RunQueueScheduler(object):
  78. """
  79. Control the order tasks are scheduled in.
  80. """
  81. name = "basic"
  82. def __init__(self, runqueue, rqdata):
  83. """
  84. The default scheduler just returns the first buildable task (the
  85. priority map is sorted by task number)
  86. """
  87. self.rq = runqueue
  88. self.rqdata = rqdata
  89. self.numTasks = len(self.rqdata.runq_fnid)
  90. self.prio_map = []
  91. self.prio_map.extend(range(self.numTasks))
  92. self.buildable = []
  93. self.stamps = {}
  94. for taskid in xrange(self.numTasks):
  95. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[taskid]]
  96. taskname = self.rqdata.runq_task[taskid]
  97. self.stamps[taskid] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  98. if self.rq.runq_buildable[taskid] == 1:
  99. self.buildable.append(taskid)
  100. self.rev_prio_map = None
  101. def next_buildable_task(self):
  102. """
  103. Return the id of the first task we find that is buildable
  104. """
  105. self.buildable = [x for x in self.buildable if not self.rq.runq_running[x] == 1]
  106. if not self.buildable:
  107. return None
  108. if len(self.buildable) == 1:
  109. taskid = self.buildable[0]
  110. stamp = self.stamps[taskid]
  111. if stamp not in self.rq.build_stamps.itervalues():
  112. return taskid
  113. if not self.rev_prio_map:
  114. self.rev_prio_map = range(self.numTasks)
  115. for taskid in xrange(self.numTasks):
  116. self.rev_prio_map[self.prio_map[taskid]] = taskid
  117. best = None
  118. bestprio = None
  119. for taskid in self.buildable:
  120. prio = self.rev_prio_map[taskid]
  121. if bestprio is None or bestprio > prio:
  122. stamp = self.stamps[taskid]
  123. if stamp in self.rq.build_stamps.itervalues():
  124. continue
  125. bestprio = prio
  126. best = taskid
  127. return best
  128. def next(self):
  129. """
  130. Return the id of the task we should build next
  131. """
  132. if self.rq.stats.active < self.rq.number_tasks:
  133. return self.next_buildable_task()
  134. def newbuilable(self, task):
  135. self.buildable.append(task)
  136. class RunQueueSchedulerSpeed(RunQueueScheduler):
  137. """
  138. A scheduler optimised for speed. The priority map is sorted by task weight,
  139. heavier weighted tasks (tasks needed by the most other tasks) are run first.
  140. """
  141. name = "speed"
  142. def __init__(self, runqueue, rqdata):
  143. """
  144. The priority map is sorted by task weight.
  145. """
  146. RunQueueScheduler.__init__(self, runqueue, rqdata)
  147. sortweight = sorted(copy.deepcopy(self.rqdata.runq_weight))
  148. copyweight = copy.deepcopy(self.rqdata.runq_weight)
  149. self.prio_map = []
  150. for weight in sortweight:
  151. idx = copyweight.index(weight)
  152. self.prio_map.append(idx)
  153. copyweight[idx] = -1
  154. self.prio_map.reverse()
  155. class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
  156. """
  157. A scheduler optimised to complete .bb files are quickly as possible. The
  158. priority map is sorted by task weight, but then reordered so once a given
  159. .bb file starts to build, it's completed as quickly as possible. This works
  160. well where disk space is at a premium and classes like OE's rm_work are in
  161. force.
  162. """
  163. name = "completion"
  164. def __init__(self, runqueue, rqdata):
  165. RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata)
  166. #FIXME - whilst this groups all fnids together it does not reorder the
  167. #fnid groups optimally.
  168. basemap = copy.deepcopy(self.prio_map)
  169. self.prio_map = []
  170. while (len(basemap) > 0):
  171. entry = basemap.pop(0)
  172. self.prio_map.append(entry)
  173. fnid = self.rqdata.runq_fnid[entry]
  174. todel = []
  175. for entry in basemap:
  176. entry_fnid = self.rqdata.runq_fnid[entry]
  177. if entry_fnid == fnid:
  178. todel.append(basemap.index(entry))
  179. self.prio_map.append(entry)
  180. todel.reverse()
  181. for idx in todel:
  182. del basemap[idx]
  183. class RunQueueData:
  184. """
  185. BitBake Run Queue implementation
  186. """
  187. def __init__(self, rq, cooker, cfgData, dataCache, taskData, targets):
  188. self.cooker = cooker
  189. self.dataCache = dataCache
  190. self.taskData = taskData
  191. self.targets = targets
  192. self.rq = rq
  193. self.warn_multi_bb = False
  194. self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST", True) or ""
  195. self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
  196. self.reset()
  197. def reset(self):
  198. self.runq_fnid = []
  199. self.runq_task = []
  200. self.runq_depends = []
  201. self.runq_revdeps = []
  202. self.runq_hash = []
  203. def runq_depends_names(self, ids):
  204. import re
  205. ret = []
  206. for id in self.runq_depends[ids]:
  207. nam = os.path.basename(self.get_user_idstring(id))
  208. nam = re.sub("_[^,]*,", ",", nam)
  209. ret.extend([nam])
  210. return ret
  211. def get_task_name(self, task):
  212. return self.runq_task[task]
  213. def get_task_file(self, task):
  214. return self.taskData.fn_index[self.runq_fnid[task]]
  215. def get_task_hash(self, task):
  216. return self.runq_hash[task]
  217. def get_user_idstring(self, task, task_name_suffix = ""):
  218. fn = self.taskData.fn_index[self.runq_fnid[task]]
  219. taskname = self.runq_task[task] + task_name_suffix
  220. return "%s, %s" % (fn, taskname)
  221. def get_short_user_idstring(self, task, task_name_suffix = ""):
  222. fn = self.taskData.fn_index[self.runq_fnid[task]]
  223. pn = self.dataCache.pkg_fn[fn]
  224. taskname = self.runq_task[task] + task_name_suffix
  225. return "%s:%s" % (pn, taskname)
  226. def get_task_id(self, fnid, taskname):
  227. for listid in xrange(len(self.runq_fnid)):
  228. if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname:
  229. return listid
  230. return None
  231. def circular_depchains_handler(self, tasks):
  232. """
  233. Some tasks aren't buildable, likely due to circular dependency issues.
  234. Identify the circular dependencies and print them in a user readable format.
  235. """
  236. from copy import deepcopy
  237. valid_chains = []
  238. explored_deps = {}
  239. msgs = []
  240. def chain_reorder(chain):
  241. """
  242. Reorder a dependency chain so the lowest task id is first
  243. """
  244. lowest = 0
  245. new_chain = []
  246. for entry in xrange(len(chain)):
  247. if chain[entry] < chain[lowest]:
  248. lowest = entry
  249. new_chain.extend(chain[lowest:])
  250. new_chain.extend(chain[:lowest])
  251. return new_chain
  252. def chain_compare_equal(chain1, chain2):
  253. """
  254. Compare two dependency chains and see if they're the same
  255. """
  256. if len(chain1) != len(chain2):
  257. return False
  258. for index in xrange(len(chain1)):
  259. if chain1[index] != chain2[index]:
  260. return False
  261. return True
  262. def chain_array_contains(chain, chain_array):
  263. """
  264. Return True if chain_array contains chain
  265. """
  266. for ch in chain_array:
  267. if chain_compare_equal(ch, chain):
  268. return True
  269. return False
  270. def find_chains(taskid, prev_chain):
  271. prev_chain.append(taskid)
  272. total_deps = []
  273. total_deps.extend(self.runq_revdeps[taskid])
  274. for revdep in self.runq_revdeps[taskid]:
  275. if revdep in prev_chain:
  276. idx = prev_chain.index(revdep)
  277. # To prevent duplicates, reorder the chain to start with the lowest taskid
  278. # and search through an array of those we've already printed
  279. chain = prev_chain[idx:]
  280. new_chain = chain_reorder(chain)
  281. if not chain_array_contains(new_chain, valid_chains):
  282. valid_chains.append(new_chain)
  283. msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
  284. for dep in new_chain:
  285. msgs.append(" Task %s (%s) (dependent Tasks %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends_names(dep)))
  286. msgs.append("\n")
  287. if len(valid_chains) > 10:
  288. msgs.append("Aborted dependency loops search after 10 matches.\n")
  289. return msgs
  290. continue
  291. scan = False
  292. if revdep not in explored_deps:
  293. scan = True
  294. elif revdep in explored_deps[revdep]:
  295. scan = True
  296. else:
  297. for dep in prev_chain:
  298. if dep in explored_deps[revdep]:
  299. scan = True
  300. if scan:
  301. find_chains(revdep, copy.deepcopy(prev_chain))
  302. for dep in explored_deps[revdep]:
  303. if dep not in total_deps:
  304. total_deps.append(dep)
  305. explored_deps[taskid] = total_deps
  306. for task in tasks:
  307. find_chains(task, [])
  308. return msgs
  309. def calculate_task_weights(self, endpoints):
  310. """
  311. Calculate a number representing the "weight" of each task. Heavier weighted tasks
  312. have more dependencies and hence should be executed sooner for maximum speed.
  313. This function also sanity checks the task list finding tasks that are not
  314. possible to execute due to circular dependencies.
  315. """
  316. numTasks = len(self.runq_fnid)
  317. weight = []
  318. deps_left = []
  319. task_done = []
  320. for listid in xrange(numTasks):
  321. task_done.append(False)
  322. weight.append(1)
  323. deps_left.append(len(self.runq_revdeps[listid]))
  324. for listid in endpoints:
  325. weight[listid] = 10
  326. task_done[listid] = True
  327. while True:
  328. next_points = []
  329. for listid in endpoints:
  330. for revdep in self.runq_depends[listid]:
  331. weight[revdep] = weight[revdep] + weight[listid]
  332. deps_left[revdep] = deps_left[revdep] - 1
  333. if deps_left[revdep] == 0:
  334. next_points.append(revdep)
  335. task_done[revdep] = True
  336. endpoints = next_points
  337. if len(next_points) == 0:
  338. break
  339. # Circular dependency sanity check
  340. problem_tasks = []
  341. for task in xrange(numTasks):
  342. if task_done[task] is False or deps_left[task] != 0:
  343. problem_tasks.append(task)
  344. logger.debug(2, "Task %s (%s) is not buildable", task, self.get_user_idstring(task))
  345. logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[task], deps_left[task])
  346. if problem_tasks:
  347. message = "Unbuildable tasks were found.\n"
  348. message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
  349. message = message + "Identifying dependency loops (this may take a short while)...\n"
  350. logger.error(message)
  351. msgs = self.circular_depchains_handler(problem_tasks)
  352. message = "\n"
  353. for msg in msgs:
  354. message = message + msg
  355. bb.msg.fatal("RunQueue", message)
  356. return weight
  357. def prepare(self):
  358. """
  359. Turn a set of taskData into a RunQueue and compute data needed
  360. to optimise the execution order.
  361. """
  362. runq_build = []
  363. recursivetasks = {}
  364. recursiveitasks = {}
  365. recursivetasksselfref = set()
  366. taskData = self.taskData
  367. if len(taskData.tasks_name) == 0:
  368. # Nothing to do
  369. return 0
  370. logger.info("Preparing RunQueue")
  371. # Step A - Work out a list of tasks to run
  372. #
  373. # Taskdata gives us a list of possible providers for every build and run
  374. # target ordered by priority. It also gives information on each of those
  375. # providers.
  376. #
  377. # To create the actual list of tasks to execute we fix the list of
  378. # providers and then resolve the dependencies into task IDs. This
  379. # process is repeated for each type of dependency (tdepends, deptask,
  380. # rdeptast, recrdeptask, idepends).
  381. def add_build_dependencies(depids, tasknames, depends):
  382. for depid in depids:
  383. # Won't be in build_targets if ASSUME_PROVIDED
  384. if depid not in taskData.build_targets:
  385. continue
  386. depdata = taskData.build_targets[depid][0]
  387. if depdata is None:
  388. continue
  389. for taskname in tasknames:
  390. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  391. if taskid is not None:
  392. depends.add(taskid)
  393. def add_runtime_dependencies(depids, tasknames, depends):
  394. for depid in depids:
  395. if depid not in taskData.run_targets:
  396. continue
  397. depdata = taskData.run_targets[depid][0]
  398. if depdata is None:
  399. continue
  400. for taskname in tasknames:
  401. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  402. if taskid is not None:
  403. depends.add(taskid)
  404. def add_resolved_dependencies(depids, tasknames, depends):
  405. for depid in depids:
  406. for taskname in tasknames:
  407. taskid = taskData.gettask_id_fromfnid(depid, taskname)
  408. if taskid is not None:
  409. depends.add(taskid)
  410. for task in xrange(len(taskData.tasks_name)):
  411. depends = set()
  412. fnid = taskData.tasks_fnid[task]
  413. fn = taskData.fn_index[fnid]
  414. task_deps = self.dataCache.task_deps[fn]
  415. #logger.debug(2, "Processing %s:%s", fn, taskData.tasks_name[task])
  416. if fnid not in taskData.failed_fnids:
  417. # Resolve task internal dependencies
  418. #
  419. # e.g. addtask before X after Y
  420. depends = set(taskData.tasks_tdepends[task])
  421. # Resolve 'deptask' dependencies
  422. #
  423. # e.g. do_sometask[deptask] = "do_someothertask"
  424. # (makes sure sometask runs after someothertask of all DEPENDS)
  425. if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
  426. tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
  427. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  428. # Resolve 'rdeptask' dependencies
  429. #
  430. # e.g. do_sometask[rdeptask] = "do_someothertask"
  431. # (makes sure sometask runs after someothertask of all RDEPENDS)
  432. if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
  433. tasknames = task_deps['rdeptask'][taskData.tasks_name[task]].split()
  434. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  435. # Resolve inter-task dependencies
  436. #
  437. # e.g. do_sometask[depends] = "targetname:do_someothertask"
  438. # (makes sure sometask runs after targetname's someothertask)
  439. idepends = taskData.tasks_idepends[task]
  440. for (depid, idependtask) in idepends:
  441. if depid in taskData.build_targets and not depid in taskData.failed_deps:
  442. # Won't be in build_targets if ASSUME_PROVIDED
  443. depdata = taskData.build_targets[depid][0]
  444. if depdata is not None:
  445. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  446. if taskid is None:
  447. bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  448. depends.add(taskid)
  449. irdepends = taskData.tasks_irdepends[task]
  450. for (depid, idependtask) in irdepends:
  451. if depid in taskData.run_targets:
  452. # Won't be in run_targets if ASSUME_PROVIDED
  453. depdata = taskData.run_targets[depid][0]
  454. if depdata is not None:
  455. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  456. if taskid is None:
  457. bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  458. depends.add(taskid)
  459. # Resolve recursive 'recrdeptask' dependencies (Part A)
  460. #
  461. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  462. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  463. # We cover the recursive part of the dependencies below
  464. if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
  465. tasknames = task_deps['recrdeptask'][taskData.tasks_name[task]].split()
  466. recursivetasks[task] = tasknames
  467. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  468. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  469. if taskData.tasks_name[task] in tasknames:
  470. recursivetasksselfref.add(task)
  471. if 'recideptask' in task_deps and taskData.tasks_name[task] in task_deps['recideptask']:
  472. recursiveitasks[task] = []
  473. for t in task_deps['recideptask'][taskData.tasks_name[task]].split():
  474. newdep = taskData.gettask_id_fromfnid(fnid, t)
  475. recursiveitasks[task].append(newdep)
  476. self.runq_fnid.append(taskData.tasks_fnid[task])
  477. self.runq_task.append(taskData.tasks_name[task])
  478. self.runq_depends.append(depends)
  479. self.runq_revdeps.append(set())
  480. self.runq_hash.append("")
  481. runq_build.append(0)
  482. # Resolve recursive 'recrdeptask' dependencies (Part B)
  483. #
  484. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  485. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  486. # We need to do this separately since we need all of self.runq_depends to be complete before this is processed
  487. extradeps = {}
  488. for task in recursivetasks:
  489. extradeps[task] = set(self.runq_depends[task])
  490. tasknames = recursivetasks[task]
  491. seendeps = set()
  492. seenfnid = []
  493. def generate_recdeps(t):
  494. newdeps = set()
  495. add_resolved_dependencies([taskData.tasks_fnid[t]], tasknames, newdeps)
  496. extradeps[task].update(newdeps)
  497. seendeps.add(t)
  498. newdeps.add(t)
  499. for i in newdeps:
  500. for n in self.runq_depends[i]:
  501. if n not in seendeps:
  502. generate_recdeps(n)
  503. generate_recdeps(task)
  504. if task in recursiveitasks:
  505. for dep in recursiveitasks[task]:
  506. generate_recdeps(dep)
  507. # Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
  508. for task in recursivetasks:
  509. extradeps[task].difference_update(recursivetasksselfref)
  510. for task in xrange(len(taskData.tasks_name)):
  511. # Add in extra dependencies
  512. if task in extradeps:
  513. self.runq_depends[task] = extradeps[task]
  514. # Remove all self references
  515. if task in self.runq_depends[task]:
  516. logger.debug(2, "Task %s (%s %s) contains self reference! %s", task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], self.runq_depends[task])
  517. self.runq_depends[task].remove(task)
  518. # Step B - Mark all active tasks
  519. #
  520. # Start with the tasks we were asked to run and mark all dependencies
  521. # as active too. If the task is to be 'forced', clear its stamp. Once
  522. # all active tasks are marked, prune the ones we don't need.
  523. logger.verbose("Marking Active Tasks")
  524. def mark_active(listid, depth):
  525. """
  526. Mark an item as active along with its depends
  527. (calls itself recursively)
  528. """
  529. if runq_build[listid] == 1:
  530. return
  531. runq_build[listid] = 1
  532. depends = self.runq_depends[listid]
  533. for depend in depends:
  534. mark_active(depend, depth+1)
  535. self.target_pairs = []
  536. for target in self.targets:
  537. targetid = taskData.getbuild_id(target[0])
  538. if targetid not in taskData.build_targets:
  539. continue
  540. if targetid in taskData.failed_deps:
  541. continue
  542. fnid = taskData.build_targets[targetid][0]
  543. fn = taskData.fn_index[fnid]
  544. task = target[1]
  545. parents = False
  546. if task.endswith('-'):
  547. parents = True
  548. task = task[:-1]
  549. self.target_pairs.append((fn, task))
  550. if fnid in taskData.failed_fnids:
  551. continue
  552. if task not in taskData.tasks_lookup[fnid]:
  553. import difflib
  554. close_matches = difflib.get_close_matches(task, taskData.tasks_lookup[fnid], cutoff=0.7)
  555. if close_matches:
  556. extra = ". Close matches:\n %s" % "\n ".join(close_matches)
  557. else:
  558. extra = ""
  559. bb.msg.fatal("RunQueue", "Task %s does not exist for target %s%s" % (task, target[0], extra))
  560. # For tasks called "XXXX-", ony run their dependencies
  561. listid = taskData.tasks_lookup[fnid][task]
  562. if parents:
  563. for i in self.runq_depends[listid]:
  564. mark_active(i, 1)
  565. else:
  566. mark_active(listid, 1)
  567. # Step C - Prune all inactive tasks
  568. #
  569. # Once all active tasks are marked, prune the ones we don't need.
  570. maps = []
  571. delcount = 0
  572. for listid in xrange(len(self.runq_fnid)):
  573. if runq_build[listid-delcount] == 1:
  574. maps.append(listid-delcount)
  575. else:
  576. del self.runq_fnid[listid-delcount]
  577. del self.runq_task[listid-delcount]
  578. del self.runq_depends[listid-delcount]
  579. del runq_build[listid-delcount]
  580. del self.runq_revdeps[listid-delcount]
  581. del self.runq_hash[listid-delcount]
  582. delcount = delcount + 1
  583. maps.append(-1)
  584. #
  585. # Step D - Sanity checks and computation
  586. #
  587. # Check to make sure we still have tasks to run
  588. if len(self.runq_fnid) == 0:
  589. if not taskData.abort:
  590. bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
  591. else:
  592. bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
  593. logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runq_fnid))
  594. # Remap the dependencies to account for the deleted tasks
  595. # Check we didn't delete a task we depend on
  596. for listid in xrange(len(self.runq_fnid)):
  597. newdeps = []
  598. origdeps = self.runq_depends[listid]
  599. for origdep in origdeps:
  600. if maps[origdep] == -1:
  601. bb.msg.fatal("RunQueue", "Invalid mapping - Should never happen!")
  602. newdeps.append(maps[origdep])
  603. self.runq_depends[listid] = set(newdeps)
  604. logger.verbose("Assign Weightings")
  605. # Generate a list of reverse dependencies to ease future calculations
  606. for listid in xrange(len(self.runq_fnid)):
  607. for dep in self.runq_depends[listid]:
  608. self.runq_revdeps[dep].add(listid)
  609. # Identify tasks at the end of dependency chains
  610. # Error on circular dependency loops (length two)
  611. endpoints = []
  612. for listid in xrange(len(self.runq_fnid)):
  613. revdeps = self.runq_revdeps[listid]
  614. if len(revdeps) == 0:
  615. endpoints.append(listid)
  616. for dep in revdeps:
  617. if dep in self.runq_depends[listid]:
  618. #self.dump_data(taskData)
  619. bb.msg.fatal("RunQueue", "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
  620. logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
  621. # Calculate task weights
  622. # Check of higher length circular dependencies
  623. self.runq_weight = self.calculate_task_weights(endpoints)
  624. # Sanity Check - Check for multiple tasks building the same provider
  625. prov_list = {}
  626. seen_fn = []
  627. for task in xrange(len(self.runq_fnid)):
  628. fn = taskData.fn_index[self.runq_fnid[task]]
  629. if fn in seen_fn:
  630. continue
  631. seen_fn.append(fn)
  632. for prov in self.dataCache.fn_provides[fn]:
  633. if prov not in prov_list:
  634. prov_list[prov] = [fn]
  635. elif fn not in prov_list[prov]:
  636. prov_list[prov].append(fn)
  637. for prov in prov_list:
  638. if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
  639. seen_pn = []
  640. # If two versions of the same PN are being built its fatal, we don't support it.
  641. for fn in prov_list[prov]:
  642. pn = self.dataCache.pkg_fn[fn]
  643. if pn not in seen_pn:
  644. seen_pn.append(pn)
  645. else:
  646. bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
  647. msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
  648. #
  649. # Construct a list of things which uniquely depend on each provider
  650. # since this may help the user figure out which dependency is triggering this warning
  651. #
  652. msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
  653. deplist = {}
  654. commondeps = None
  655. for provfn in prov_list[prov]:
  656. deps = set()
  657. for task, fnid in enumerate(self.runq_fnid):
  658. fn = taskData.fn_index[fnid]
  659. if fn != provfn:
  660. continue
  661. for dep in self.runq_revdeps[task]:
  662. fn = taskData.fn_index[self.runq_fnid[dep]]
  663. if fn == provfn:
  664. continue
  665. deps.add(self.get_short_user_idstring(dep))
  666. if not commondeps:
  667. commondeps = set(deps)
  668. else:
  669. commondeps &= deps
  670. deplist[provfn] = deps
  671. for provfn in deplist:
  672. msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
  673. #
  674. # Construct a list of provides and runtime providers for each recipe
  675. # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
  676. #
  677. msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
  678. provide_results = {}
  679. rprovide_results = {}
  680. commonprovs = None
  681. commonrprovs = None
  682. for provfn in prov_list[prov]:
  683. provides = set(self.dataCache.fn_provides[provfn])
  684. rprovides = set()
  685. for rprovide in self.dataCache.rproviders:
  686. if provfn in self.dataCache.rproviders[rprovide]:
  687. rprovides.add(rprovide)
  688. for package in self.dataCache.packages:
  689. if provfn in self.dataCache.packages[package]:
  690. rprovides.add(package)
  691. for package in self.dataCache.packages_dynamic:
  692. if provfn in self.dataCache.packages_dynamic[package]:
  693. rprovides.add(package)
  694. if not commonprovs:
  695. commonprovs = set(provides)
  696. else:
  697. commonprovs &= provides
  698. provide_results[provfn] = provides
  699. if not commonrprovs:
  700. commonrprovs = set(rprovides)
  701. else:
  702. commonrprovs &= rprovides
  703. rprovide_results[provfn] = rprovides
  704. #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
  705. #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
  706. for provfn in prov_list[prov]:
  707. msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
  708. msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
  709. if self.warn_multi_bb:
  710. logger.warning(msg)
  711. else:
  712. logger.error(msg)
  713. # Create a whitelist usable by the stamp checks
  714. stampfnwhitelist = []
  715. for entry in self.stampwhitelist.split():
  716. entryid = self.taskData.getbuild_id(entry)
  717. if entryid not in self.taskData.build_targets:
  718. continue
  719. fnid = self.taskData.build_targets[entryid][0]
  720. fn = self.taskData.fn_index[fnid]
  721. stampfnwhitelist.append(fn)
  722. self.stampfnwhitelist = stampfnwhitelist
  723. # Iterate over the task list looking for tasks with a 'setscene' function
  724. self.runq_setscene = []
  725. if not self.cooker.configuration.nosetscene:
  726. for task in range(len(self.runq_fnid)):
  727. setscene = taskData.gettask_id(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task] + "_setscene", False)
  728. if not setscene:
  729. continue
  730. self.runq_setscene.append(task)
  731. def invalidate_task(fn, taskname, error_nostamp):
  732. taskdep = self.dataCache.task_deps[fn]
  733. fnid = self.taskData.getfn_id(fn)
  734. if taskname not in taskData.tasks_lookup[fnid]:
  735. logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
  736. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  737. if error_nostamp:
  738. bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
  739. else:
  740. bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
  741. else:
  742. logger.verbose("Invalidate task %s, %s", taskname, fn)
  743. bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn)
  744. # Invalidate task if force mode active
  745. if self.cooker.configuration.force:
  746. for (fn, target) in self.target_pairs:
  747. invalidate_task(fn, target, False)
  748. # Invalidate task if invalidate mode active
  749. if self.cooker.configuration.invalidate_stamp:
  750. for (fn, target) in self.target_pairs:
  751. for st in self.cooker.configuration.invalidate_stamp.split(','):
  752. if not st.startswith("do_"):
  753. st = "do_%s" % st
  754. invalidate_task(fn, st, True)
  755. # Create and print to the logs a virtual/xxxx -> PN (fn) table
  756. virtmap = taskData.get_providermap(prefix="virtual/")
  757. virtpnmap = {}
  758. for v in virtmap:
  759. virtpnmap[v] = self.dataCache.pkg_fn[virtmap[v]]
  760. bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
  761. if hasattr(bb.parse.siggen, "tasks_resolved"):
  762. bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCache)
  763. # Iterate over the task list and call into the siggen code
  764. dealtwith = set()
  765. todeal = set(range(len(self.runq_fnid)))
  766. while len(todeal) > 0:
  767. for task in todeal.copy():
  768. if len(self.runq_depends[task] - dealtwith) == 0:
  769. dealtwith.add(task)
  770. todeal.remove(task)
  771. procdep = []
  772. for dep in self.runq_depends[task]:
  773. procdep.append(self.taskData.fn_index[self.runq_fnid[dep]] + "." + self.runq_task[dep])
  774. self.runq_hash[task] = bb.parse.siggen.get_taskhash(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task], procdep, self.dataCache)
  775. bb.parse.siggen.writeout_file_checksum_cache()
  776. return len(self.runq_fnid)
  777. def dump_data(self, taskQueue):
  778. """
  779. Dump some debug information on the internal data structures
  780. """
  781. logger.debug(3, "run_tasks:")
  782. for task in xrange(len(self.rqdata.runq_task)):
  783. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  784. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  785. self.rqdata.runq_task[task],
  786. self.rqdata.runq_weight[task],
  787. self.rqdata.runq_depends[task],
  788. self.rqdata.runq_revdeps[task])
  789. logger.debug(3, "sorted_tasks:")
  790. for task1 in xrange(len(self.rqdata.runq_task)):
  791. if task1 in self.prio_map:
  792. task = self.prio_map[task1]
  793. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  794. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  795. self.rqdata.runq_task[task],
  796. self.rqdata.runq_weight[task],
  797. self.rqdata.runq_depends[task],
  798. self.rqdata.runq_revdeps[task])
  799. class RunQueue:
  800. def __init__(self, cooker, cfgData, dataCache, taskData, targets):
  801. self.cooker = cooker
  802. self.cfgData = cfgData
  803. self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets)
  804. self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile"
  805. self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None
  806. self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION", True) or None
  807. self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID", True) or None
  808. self.state = runQueuePrepare
  809. # For disk space monitor
  810. self.dm = monitordisk.diskMonitor(cfgData)
  811. self.rqexe = None
  812. self.worker = None
  813. self.workerpipe = None
  814. self.fakeworker = None
  815. self.fakeworkerpipe = None
  816. def _start_worker(self, fakeroot = False, rqexec = None):
  817. logger.debug(1, "Starting bitbake-worker")
  818. magic = "decafbad"
  819. if self.cooker.configuration.profile:
  820. magic = "decafbadbad"
  821. if fakeroot:
  822. magic = magic + "beef"
  823. fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True)
  824. fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split()
  825. env = os.environ.copy()
  826. for key, value in (var.split('=') for var in fakerootenv):
  827. env[key] = value
  828. worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
  829. else:
  830. worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
  831. bb.utils.nonblockingfd(worker.stdout)
  832. workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
  833. workerdata = {
  834. "taskdeps" : self.rqdata.dataCache.task_deps,
  835. "fakerootenv" : self.rqdata.dataCache.fakerootenv,
  836. "fakerootdirs" : self.rqdata.dataCache.fakerootdirs,
  837. "fakerootnoenv" : self.rqdata.dataCache.fakerootnoenv,
  838. "sigdata" : bb.parse.siggen.get_taskdata(),
  839. "runq_hash" : self.rqdata.runq_hash,
  840. "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
  841. "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
  842. "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
  843. "logdefaultdomain" : bb.msg.loggerDefaultDomains,
  844. "prhost" : self.cooker.prhost,
  845. "buildname" : self.cfgData.getVar("BUILDNAME", True),
  846. "date" : self.cfgData.getVar("DATE", True),
  847. "time" : self.cfgData.getVar("TIME", True),
  848. }
  849. worker.stdin.write("<cookerconfig>" + pickle.dumps(self.cooker.configuration) + "</cookerconfig>")
  850. worker.stdin.write("<workerdata>" + pickle.dumps(workerdata) + "</workerdata>")
  851. worker.stdin.flush()
  852. return worker, workerpipe
  853. def _teardown_worker(self, worker, workerpipe):
  854. if not worker:
  855. return
  856. logger.debug(1, "Teardown for bitbake-worker")
  857. try:
  858. worker.stdin.write("<quit></quit>")
  859. worker.stdin.flush()
  860. except IOError:
  861. pass
  862. while worker.returncode is None:
  863. workerpipe.read()
  864. worker.poll()
  865. while workerpipe.read():
  866. continue
  867. workerpipe.close()
  868. def start_worker(self):
  869. if self.worker:
  870. self.teardown_workers()
  871. self.teardown = False
  872. self.worker, self.workerpipe = self._start_worker()
  873. def start_fakeworker(self, rqexec):
  874. if not self.fakeworker:
  875. self.fakeworker, self.fakeworkerpipe = self._start_worker(True, rqexec)
  876. def teardown_workers(self):
  877. self.teardown = True
  878. self._teardown_worker(self.worker, self.workerpipe)
  879. self.worker = None
  880. self.workerpipe = None
  881. self._teardown_worker(self.fakeworker, self.fakeworkerpipe)
  882. self.fakeworker = None
  883. self.fakeworkerpipe = None
  884. def read_workers(self):
  885. self.workerpipe.read()
  886. if self.fakeworkerpipe:
  887. self.fakeworkerpipe.read()
  888. def active_fds(self):
  889. fds = []
  890. if self.workerpipe:
  891. fds.append(self.workerpipe.input)
  892. if self.fakeworkerpipe:
  893. fds.append(self.fakeworkerpipe.input)
  894. return fds
  895. def check_stamp_task(self, task, taskname = None, recurse = False, cache = None):
  896. def get_timestamp(f):
  897. try:
  898. if not os.access(f, os.F_OK):
  899. return None
  900. return os.stat(f)[stat.ST_MTIME]
  901. except:
  902. return None
  903. if self.stamppolicy == "perfile":
  904. fulldeptree = False
  905. else:
  906. fulldeptree = True
  907. stampwhitelist = []
  908. if self.stamppolicy == "whitelist":
  909. stampwhitelist = self.rqdata.stampfnwhitelist
  910. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  911. if taskname is None:
  912. taskname = self.rqdata.runq_task[task]
  913. stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  914. # If the stamp is missing, it's not current
  915. if not os.access(stampfile, os.F_OK):
  916. logger.debug(2, "Stampfile %s not available", stampfile)
  917. return False
  918. # If it's a 'nostamp' task, it's not current
  919. taskdep = self.rqdata.dataCache.task_deps[fn]
  920. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  921. logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
  922. return False
  923. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  924. return True
  925. if cache is None:
  926. cache = {}
  927. iscurrent = True
  928. t1 = get_timestamp(stampfile)
  929. for dep in self.rqdata.runq_depends[task]:
  930. if iscurrent:
  931. fn2 = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[dep]]
  932. taskname2 = self.rqdata.runq_task[dep]
  933. stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2)
  934. stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCache, fn2)
  935. t2 = get_timestamp(stampfile2)
  936. t3 = get_timestamp(stampfile3)
  937. if t3 and t3 > t2:
  938. continue
  939. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  940. if not t2:
  941. logger.debug(2, 'Stampfile %s does not exist', stampfile2)
  942. iscurrent = False
  943. if t1 < t2:
  944. logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
  945. iscurrent = False
  946. if recurse and iscurrent:
  947. if dep in cache:
  948. iscurrent = cache[dep]
  949. if not iscurrent:
  950. logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
  951. else:
  952. iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
  953. cache[dep] = iscurrent
  954. if recurse:
  955. cache[task] = iscurrent
  956. return iscurrent
  957. def _execute_runqueue(self):
  958. """
  959. Run the tasks in a queue prepared by rqdata.prepare()
  960. Upon failure, optionally try to recover the build using any alternate providers
  961. (if the abort on failure configuration option isn't set)
  962. """
  963. retval = True
  964. if self.state is runQueuePrepare:
  965. self.rqexe = RunQueueExecuteDummy(self)
  966. if self.rqdata.prepare() == 0:
  967. self.state = runQueueComplete
  968. else:
  969. self.state = runQueueSceneInit
  970. # we are ready to run, see if any UI client needs the dependency info
  971. if bb.cooker.CookerFeatures.SEND_DEPENDS_TREE in self.cooker.featureset:
  972. depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
  973. bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
  974. if self.state is runQueueSceneInit:
  975. dump = self.cooker.configuration.dump_signatures
  976. if dump:
  977. if 'printdiff' in dump:
  978. invalidtasks = self.print_diffscenetasks()
  979. self.dump_signatures(dump)
  980. if 'printdiff' in dump:
  981. self.write_diffscenetasks(invalidtasks)
  982. self.state = runQueueComplete
  983. else:
  984. self.start_worker()
  985. self.rqexe = RunQueueExecuteScenequeue(self)
  986. if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp]:
  987. self.dm.check(self)
  988. if self.state is runQueueSceneRun:
  989. retval = self.rqexe.execute()
  990. if self.state is runQueueRunInit:
  991. if self.cooker.configuration.setsceneonly:
  992. self.state = runQueueComplete
  993. else:
  994. logger.info("Executing RunQueue Tasks")
  995. self.rqexe = RunQueueExecuteTasks(self)
  996. self.state = runQueueRunning
  997. if self.state is runQueueRunning:
  998. retval = self.rqexe.execute()
  999. if self.state is runQueueCleanUp:
  1000. retval = self.rqexe.finish()
  1001. if (self.state is runQueueComplete or self.state is runQueueFailed) and self.rqexe:
  1002. self.teardown_workers()
  1003. if self.rqexe.stats.failed:
  1004. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
  1005. else:
  1006. # Let's avoid the word "failed" if nothing actually did
  1007. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
  1008. if self.state is runQueueFailed:
  1009. if not self.rqdata.taskData.tryaltconfigs:
  1010. raise bb.runqueue.TaskFailure(self.rqexe.failed_fnids)
  1011. for fnid in self.rqexe.failed_fnids:
  1012. self.rqdata.taskData.fail_fnid(fnid)
  1013. self.rqdata.reset()
  1014. if self.state is runQueueComplete:
  1015. # All done
  1016. return False
  1017. # Loop
  1018. return retval
  1019. def execute_runqueue(self):
  1020. # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
  1021. try:
  1022. return self._execute_runqueue()
  1023. except bb.runqueue.TaskFailure:
  1024. raise
  1025. except SystemExit:
  1026. raise
  1027. except bb.BBHandledException:
  1028. try:
  1029. self.teardown_workers()
  1030. except:
  1031. pass
  1032. self.state = runQueueComplete
  1033. raise
  1034. except:
  1035. logger.error("An uncaught exception occured in runqueue, please see the failure below:")
  1036. try:
  1037. self.teardown_workers()
  1038. except:
  1039. pass
  1040. self.state = runQueueComplete
  1041. raise
  1042. def finish_runqueue(self, now = False):
  1043. if not self.rqexe:
  1044. self.state = runQueueComplete
  1045. return
  1046. if now:
  1047. self.rqexe.finish_now()
  1048. else:
  1049. self.rqexe.finish()
  1050. def dump_signatures(self, options):
  1051. done = set()
  1052. bb.note("Reparsing files to collect dependency data")
  1053. for task in range(len(self.rqdata.runq_fnid)):
  1054. if self.rqdata.runq_fnid[task] not in done:
  1055. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1056. the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data)
  1057. done.add(self.rqdata.runq_fnid[task])
  1058. bb.parse.siggen.dump_sigs(self.rqdata.dataCache, options)
  1059. return
  1060. def print_diffscenetasks(self):
  1061. valid = []
  1062. sq_hash = []
  1063. sq_hashfn = []
  1064. sq_fn = []
  1065. sq_taskname = []
  1066. sq_task = []
  1067. noexec = []
  1068. stamppresent = []
  1069. valid_new = set()
  1070. for task in xrange(len(self.rqdata.runq_fnid)):
  1071. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1072. taskname = self.rqdata.runq_task[task]
  1073. taskdep = self.rqdata.dataCache.task_deps[fn]
  1074. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1075. noexec.append(task)
  1076. continue
  1077. sq_fn.append(fn)
  1078. sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
  1079. sq_hash.append(self.rqdata.runq_hash[task])
  1080. sq_taskname.append(taskname)
  1081. sq_task.append(task)
  1082. locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
  1083. try:
  1084. call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
  1085. valid = bb.utils.better_eval(call, locs)
  1086. # Handle version with no siginfo parameter
  1087. except TypeError:
  1088. call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
  1089. valid = bb.utils.better_eval(call, locs)
  1090. for v in valid:
  1091. valid_new.add(sq_task[v])
  1092. # Tasks which are both setscene and noexec never care about dependencies
  1093. # We therefore find tasks which are setscene and noexec and mark their
  1094. # unique dependencies as valid.
  1095. for task in noexec:
  1096. if task not in self.rqdata.runq_setscene:
  1097. continue
  1098. for dep in self.rqdata.runq_depends[task]:
  1099. hasnoexecparents = True
  1100. for dep2 in self.rqdata.runq_revdeps[dep]:
  1101. if dep2 in self.rqdata.runq_setscene and dep2 in noexec:
  1102. continue
  1103. hasnoexecparents = False
  1104. break
  1105. if hasnoexecparents:
  1106. valid_new.add(dep)
  1107. invalidtasks = set()
  1108. for task in xrange(len(self.rqdata.runq_fnid)):
  1109. if task not in valid_new and task not in noexec:
  1110. invalidtasks.add(task)
  1111. found = set()
  1112. processed = set()
  1113. for task in invalidtasks:
  1114. toprocess = set([task])
  1115. while toprocess:
  1116. next = set()
  1117. for t in toprocess:
  1118. for dep in self.rqdata.runq_depends[t]:
  1119. if dep in invalidtasks:
  1120. found.add(task)
  1121. if dep not in processed:
  1122. processed.add(dep)
  1123. next.add(dep)
  1124. toprocess = next
  1125. if task in found:
  1126. toprocess = set()
  1127. tasklist = []
  1128. for task in invalidtasks.difference(found):
  1129. tasklist.append(self.rqdata.get_user_idstring(task))
  1130. if tasklist:
  1131. bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
  1132. return invalidtasks.difference(found)
  1133. def write_diffscenetasks(self, invalidtasks):
  1134. # Define recursion callback
  1135. def recursecb(key, hash1, hash2):
  1136. hashes = [hash1, hash2]
  1137. hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
  1138. recout = []
  1139. if len(hashfiles) == 2:
  1140. out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
  1141. recout.extend(list(' ' + l for l in out2))
  1142. else:
  1143. recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
  1144. return recout
  1145. for task in invalidtasks:
  1146. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1147. pn = self.rqdata.dataCache.pkg_fn[fn]
  1148. taskname = self.rqdata.runq_task[task]
  1149. h = self.rqdata.runq_hash[task]
  1150. matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
  1151. match = None
  1152. for m in matches:
  1153. if h in m:
  1154. match = m
  1155. if match is None:
  1156. bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
  1157. matches = {k : v for k, v in matches.iteritems() if h not in k}
  1158. if matches:
  1159. latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
  1160. prevh = __find_md5__.search(latestmatch).group(0)
  1161. output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
  1162. bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
  1163. class RunQueueExecute:
  1164. def __init__(self, rq):
  1165. self.rq = rq
  1166. self.cooker = rq.cooker
  1167. self.cfgData = rq.cfgData
  1168. self.rqdata = rq.rqdata
  1169. self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS", True) or 1)
  1170. self.scheduler = self.cfgData.getVar("BB_SCHEDULER", True) or "speed"
  1171. self.runq_buildable = []
  1172. self.runq_running = []
  1173. self.runq_complete = []
  1174. self.build_stamps = {}
  1175. self.build_stamps2 = []
  1176. self.failed_fnids = []
  1177. self.stampcache = {}
  1178. rq.workerpipe.setrunqueueexec(self)
  1179. if rq.fakeworkerpipe:
  1180. rq.fakeworkerpipe.setrunqueueexec(self)
  1181. if self.number_tasks <= 0:
  1182. bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
  1183. def runqueue_process_waitpid(self, task, status):
  1184. # self.build_stamps[pid] may not exist when use shared work directory.
  1185. if task in self.build_stamps:
  1186. self.build_stamps2.remove(self.build_stamps[task])
  1187. del self.build_stamps[task]
  1188. if status != 0:
  1189. self.task_fail(task, status)
  1190. else:
  1191. self.task_complete(task)
  1192. return True
  1193. def finish_now(self):
  1194. for worker in [self.rq.worker, self.rq.fakeworker]:
  1195. if not worker:
  1196. continue
  1197. try:
  1198. worker.stdin.write("<finishnow></finishnow>")
  1199. worker.stdin.flush()
  1200. except IOError:
  1201. # worker must have died?
  1202. pass
  1203. if len(self.failed_fnids) != 0:
  1204. self.rq.state = runQueueFailed
  1205. return
  1206. self.rq.state = runQueueComplete
  1207. return
  1208. def finish(self):
  1209. self.rq.state = runQueueCleanUp
  1210. if self.stats.active > 0:
  1211. bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
  1212. self.rq.read_workers()
  1213. return self.rq.active_fds()
  1214. if len(self.failed_fnids) != 0:
  1215. self.rq.state = runQueueFailed
  1216. return True
  1217. self.rq.state = runQueueComplete
  1218. return True
  1219. def check_dependencies(self, task, taskdeps, setscene = False):
  1220. if not self.rq.depvalidate:
  1221. return False
  1222. taskdata = {}
  1223. taskdeps.add(task)
  1224. for dep in taskdeps:
  1225. if setscene:
  1226. depid = self.rqdata.runq_setscene[dep]
  1227. else:
  1228. depid = dep
  1229. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[depid]]
  1230. pn = self.rqdata.dataCache.pkg_fn[fn]
  1231. taskname = self.rqdata.runq_task[depid]
  1232. taskdata[dep] = [pn, taskname, fn]
  1233. call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
  1234. locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data }
  1235. valid = bb.utils.better_eval(call, locs)
  1236. return valid
  1237. class RunQueueExecuteDummy(RunQueueExecute):
  1238. def __init__(self, rq):
  1239. self.rq = rq
  1240. self.stats = RunQueueStats(0)
  1241. def finish(self):
  1242. self.rq.state = runQueueComplete
  1243. return
  1244. class RunQueueExecuteTasks(RunQueueExecute):
  1245. def __init__(self, rq):
  1246. RunQueueExecute.__init__(self, rq)
  1247. self.stats = RunQueueStats(len(self.rqdata.runq_fnid))
  1248. self.stampcache = {}
  1249. initial_covered = self.rq.scenequeue_covered.copy()
  1250. # Mark initial buildable tasks
  1251. for task in xrange(self.stats.total):
  1252. self.runq_running.append(0)
  1253. self.runq_complete.append(0)
  1254. if len(self.rqdata.runq_depends[task]) == 0:
  1255. self.runq_buildable.append(1)
  1256. else:
  1257. self.runq_buildable.append(0)
  1258. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
  1259. self.rq.scenequeue_covered.add(task)
  1260. found = True
  1261. while found:
  1262. found = False
  1263. for task in xrange(self.stats.total):
  1264. if task in self.rq.scenequeue_covered:
  1265. continue
  1266. logger.debug(1, 'Considering %s (%s): %s' % (task, self.rqdata.get_user_idstring(task), str(self.rqdata.runq_revdeps[task])))
  1267. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
  1268. found = True
  1269. self.rq.scenequeue_covered.add(task)
  1270. logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
  1271. # Allow the metadata to elect for setscene tasks to run anyway
  1272. covered_remove = set()
  1273. if self.rq.setsceneverify:
  1274. invalidtasks = []
  1275. for task in xrange(len(self.rqdata.runq_task)):
  1276. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1277. taskname = self.rqdata.runq_task[task]
  1278. taskdep = self.rqdata.dataCache.task_deps[fn]
  1279. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1280. continue
  1281. if self.rq.check_stamp_task(task, taskname + "_setscene", cache=self.stampcache):
  1282. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1283. continue
  1284. if self.rq.check_stamp_task(task, taskname, recurse = True, cache=self.stampcache):
  1285. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1286. continue
  1287. invalidtasks.append(task)
  1288. call = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d, invalidtasks=invalidtasks)"
  1289. call2 = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d)"
  1290. locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : self.rqdata.runq_task, "fnids" : self.rqdata.runq_fnid, "fns" : self.rqdata.taskData.fn_index, "d" : self.cooker.expanded_data, "invalidtasks" : invalidtasks }
  1291. # Backwards compatibility with older versions without invalidtasks
  1292. try:
  1293. covered_remove = bb.utils.better_eval(call, locs)
  1294. except TypeError:
  1295. covered_remove = bb.utils.better_eval(call2, locs)
  1296. def removecoveredtask(task):
  1297. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1298. taskname = self.rqdata.runq_task[task] + '_setscene'
  1299. bb.build.del_stamp(taskname, self.rqdata.dataCache, fn)
  1300. self.rq.scenequeue_covered.remove(task)
  1301. toremove = covered_remove
  1302. for task in toremove:
  1303. logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
  1304. while toremove:
  1305. covered_remove = []
  1306. for task in toremove:
  1307. removecoveredtask(task)
  1308. for deptask in self.rqdata.runq_depends[task]:
  1309. if deptask not in self.rq.scenequeue_covered:
  1310. continue
  1311. if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
  1312. continue
  1313. logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
  1314. covered_remove.append(deptask)
  1315. toremove = covered_remove
  1316. logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
  1317. event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData)
  1318. schedulers = self.get_schedulers()
  1319. for scheduler in schedulers:
  1320. if self.scheduler == scheduler.name:
  1321. self.sched = scheduler(self, self.rqdata)
  1322. logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
  1323. break
  1324. else:
  1325. bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
  1326. (self.scheduler, ", ".join(obj.name for obj in schedulers)))
  1327. def get_schedulers(self):
  1328. schedulers = set(obj for obj in globals().values()
  1329. if type(obj) is type and
  1330. issubclass(obj, RunQueueScheduler))
  1331. user_schedulers = self.cfgData.getVar("BB_SCHEDULERS", True)
  1332. if user_schedulers:
  1333. for sched in user_schedulers.split():
  1334. if not "." in sched:
  1335. bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
  1336. continue
  1337. modname, name = sched.rsplit(".", 1)
  1338. try:
  1339. module = __import__(modname, fromlist=(name,))
  1340. except ImportError as exc:
  1341. logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
  1342. raise SystemExit(1)
  1343. else:
  1344. schedulers.add(getattr(module, name))
  1345. return schedulers
  1346. def setbuildable(self, task):
  1347. self.runq_buildable[task] = 1
  1348. self.sched.newbuilable(task)
  1349. def task_completeoutright(self, task):
  1350. """
  1351. Mark a task as completed
  1352. Look at the reverse dependencies and mark any task with
  1353. completed dependencies as buildable
  1354. """
  1355. self.runq_complete[task] = 1
  1356. for revdep in self.rqdata.runq_revdeps[task]:
  1357. if self.runq_running[revdep] == 1:
  1358. continue
  1359. if self.runq_buildable[revdep] == 1:
  1360. continue
  1361. alldeps = 1
  1362. for dep in self.rqdata.runq_depends[revdep]:
  1363. if self.runq_complete[dep] != 1:
  1364. alldeps = 0
  1365. if alldeps == 1:
  1366. self.setbuildable(revdep)
  1367. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
  1368. taskname = self.rqdata.runq_task[revdep]
  1369. logger.debug(1, "Marking task %s (%s, %s) as buildable", revdep, fn, taskname)
  1370. def task_complete(self, task):
  1371. self.stats.taskCompleted()
  1372. bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
  1373. self.task_completeoutright(task)
  1374. def task_fail(self, task, exitcode):
  1375. """
  1376. Called when a task has failed
  1377. Updates the state engine with the failure
  1378. """
  1379. self.stats.taskFailed()
  1380. fnid = self.rqdata.runq_fnid[task]
  1381. self.failed_fnids.append(fnid)
  1382. bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
  1383. if self.rqdata.taskData.abort:
  1384. self.rq.state = runQueueCleanUp
  1385. def task_skip(self, task, reason):
  1386. self.runq_running[task] = 1
  1387. self.setbuildable(task)
  1388. bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
  1389. self.task_completeoutright(task)
  1390. self.stats.taskCompleted()
  1391. self.stats.taskSkipped()
  1392. def execute(self):
  1393. """
  1394. Run the tasks in a queue prepared by rqdata.prepare()
  1395. """
  1396. self.rq.read_workers()
  1397. if self.stats.total == 0:
  1398. # nothing to do
  1399. self.rq.state = runQueueCleanUp
  1400. task = self.sched.next()
  1401. if task is not None:
  1402. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1403. taskname = self.rqdata.runq_task[task]
  1404. if task in self.rq.scenequeue_covered:
  1405. logger.debug(2, "Setscene covered task %s (%s)", task,
  1406. self.rqdata.get_user_idstring(task))
  1407. self.task_skip(task, "covered")
  1408. return True
  1409. if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
  1410. logger.debug(2, "Stamp current task %s (%s)", task,
  1411. self.rqdata.get_user_idstring(task))
  1412. self.task_skip(task, "existing")
  1413. return True
  1414. taskdep = self.rqdata.dataCache.task_deps[fn]
  1415. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1416. startevent = runQueueTaskStarted(task, self.stats, self.rq,
  1417. noexec=True)
  1418. bb.event.fire(startevent, self.cfgData)
  1419. self.runq_running[task] = 1
  1420. self.stats.taskActive()
  1421. if not self.cooker.configuration.dry_run:
  1422. bb.build.make_stamp(taskname, self.rqdata.dataCache, fn)
  1423. self.task_complete(task)
  1424. return True
  1425. else:
  1426. startevent = runQueueTaskStarted(task, self.stats, self.rq)
  1427. bb.event.fire(startevent, self.cfgData)
  1428. taskdepdata = self.build_taskdepdata(task)
  1429. taskdep = self.rqdata.dataCache.task_deps[fn]
  1430. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
  1431. if not self.rq.fakeworker:
  1432. try:
  1433. self.rq.start_fakeworker(self)
  1434. except OSError as exc:
  1435. logger.critical("Failed to spawn fakeroot worker to run %s:%s: %s" % (fn, taskname, str(exc)))
  1436. self.rq.state = runQueueFailed
  1437. return True
  1438. self.rq.fakeworker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + "</runtask>")
  1439. self.rq.fakeworker.stdin.flush()
  1440. else:
  1441. self.rq.worker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + "</runtask>")
  1442. self.rq.worker.stdin.flush()
  1443. self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  1444. self.build_stamps2.append(self.build_stamps[task])
  1445. self.runq_running[task] = 1
  1446. self.stats.taskActive()
  1447. if self.stats.active < self.number_tasks:
  1448. return True
  1449. if self.stats.active > 0:
  1450. self.rq.read_workers()
  1451. return self.rq.active_fds()
  1452. if len(self.failed_fnids) != 0:
  1453. self.rq.state = runQueueFailed
  1454. return True
  1455. # Sanity Checks
  1456. for task in xrange(self.stats.total):
  1457. if self.runq_buildable[task] == 0:
  1458. logger.error("Task %s never buildable!", task)
  1459. if self.runq_running[task] == 0:
  1460. logger.error("Task %s never ran!", task)
  1461. if self.runq_complete[task] == 0:
  1462. logger.error("Task %s never completed!", task)
  1463. self.rq.state = runQueueComplete
  1464. return True
  1465. def build_taskdepdata(self, task):
  1466. taskdepdata = {}
  1467. next = self.rqdata.runq_depends[task]
  1468. next.add(task)
  1469. while next:
  1470. additional = []
  1471. for revdep in next:
  1472. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
  1473. pn = self.rqdata.dataCache.pkg_fn[fn]
  1474. taskname = self.rqdata.runq_task[revdep]
  1475. deps = self.rqdata.runq_depends[revdep]
  1476. provides = self.rqdata.dataCache.fn_provides[fn]
  1477. taskdepdata[revdep] = [pn, taskname, fn, deps, provides]
  1478. for revdep2 in deps:
  1479. if revdep2 not in taskdepdata:
  1480. additional.append(revdep2)
  1481. next = additional
  1482. #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
  1483. return taskdepdata
  1484. class RunQueueExecuteScenequeue(RunQueueExecute):
  1485. def __init__(self, rq):
  1486. RunQueueExecute.__init__(self, rq)
  1487. self.scenequeue_covered = set()
  1488. self.scenequeue_notcovered = set()
  1489. self.scenequeue_notneeded = set()
  1490. # If we don't have any setscene functions, skip this step
  1491. if len(self.rqdata.runq_setscene) == 0:
  1492. rq.scenequeue_covered = set()
  1493. rq.state = runQueueRunInit
  1494. return
  1495. self.stats = RunQueueStats(len(self.rqdata.runq_setscene))
  1496. sq_revdeps = []
  1497. sq_revdeps_new = []
  1498. sq_revdeps_squash = []
  1499. self.sq_harddeps = {}
  1500. # We need to construct a dependency graph for the setscene functions. Intermediate
  1501. # dependencies between the setscene tasks only complicate the code. This code
  1502. # therefore aims to collapse the huge runqueue dependency tree into a smaller one
  1503. # only containing the setscene functions.
  1504. for task in xrange(self.stats.total):
  1505. self.runq_running.append(0)
  1506. self.runq_complete.append(0)
  1507. self.runq_buildable.append(0)
  1508. # First process the chains up to the first setscene task.
  1509. endpoints = {}
  1510. for task in xrange(len(self.rqdata.runq_fnid)):
  1511. sq_revdeps.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1512. sq_revdeps_new.append(set())
  1513. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1514. endpoints[task] = set()
  1515. # Secondly process the chains between setscene tasks.
  1516. for task in self.rqdata.runq_setscene:
  1517. for dep in self.rqdata.runq_depends[task]:
  1518. if dep not in endpoints:
  1519. endpoints[dep] = set()
  1520. endpoints[dep].add(task)
  1521. def process_endpoints(endpoints):
  1522. newendpoints = {}
  1523. for point, task in endpoints.items():
  1524. tasks = set()
  1525. if task:
  1526. tasks |= task
  1527. if sq_revdeps_new[point]:
  1528. tasks |= sq_revdeps_new[point]
  1529. sq_revdeps_new[point] = set()
  1530. if point in self.rqdata.runq_setscene:
  1531. sq_revdeps_new[point] = tasks
  1532. tasks = set()
  1533. for dep in self.rqdata.runq_depends[point]:
  1534. if point in sq_revdeps[dep]:
  1535. sq_revdeps[dep].remove(point)
  1536. if tasks:
  1537. sq_revdeps_new[dep] |= tasks
  1538. if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1539. newendpoints[dep] = task
  1540. if len(newendpoints) != 0:
  1541. process_endpoints(newendpoints)
  1542. process_endpoints(endpoints)
  1543. # Build a list of setscene tasks which are "unskippable"
  1544. # These are direct endpoints referenced by the build
  1545. endpoints2 = {}
  1546. sq_revdeps2 = []
  1547. sq_revdeps_new2 = []
  1548. def process_endpoints2(endpoints):
  1549. newendpoints = {}
  1550. for point, task in endpoints.items():
  1551. tasks = set([point])
  1552. if task:
  1553. tasks |= task
  1554. if sq_revdeps_new2[point]:
  1555. tasks |= sq_revdeps_new2[point]
  1556. sq_revdeps_new2[point] = set()
  1557. if point in self.rqdata.runq_setscene:
  1558. sq_revdeps_new2[point] = tasks
  1559. for dep in self.rqdata.runq_depends[point]:
  1560. if point in sq_revdeps2[dep]:
  1561. sq_revdeps2[dep].remove(point)
  1562. if tasks:
  1563. sq_revdeps_new2[dep] |= tasks
  1564. if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1565. newendpoints[dep] = tasks
  1566. if len(newendpoints) != 0:
  1567. process_endpoints2(newendpoints)
  1568. for task in xrange(len(self.rqdata.runq_fnid)):
  1569. sq_revdeps2.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1570. sq_revdeps_new2.append(set())
  1571. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1572. endpoints2[task] = set()
  1573. process_endpoints2(endpoints2)
  1574. self.unskippable = []
  1575. for task in self.rqdata.runq_setscene:
  1576. if sq_revdeps_new2[task]:
  1577. self.unskippable.append(self.rqdata.runq_setscene.index(task))
  1578. for task in xrange(len(self.rqdata.runq_fnid)):
  1579. if task in self.rqdata.runq_setscene:
  1580. deps = set()
  1581. for dep in sq_revdeps_new[task]:
  1582. deps.add(self.rqdata.runq_setscene.index(dep))
  1583. sq_revdeps_squash.append(deps)
  1584. elif len(sq_revdeps_new[task]) != 0:
  1585. bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
  1586. # Resolve setscene inter-task dependencies
  1587. # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
  1588. # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
  1589. for task in self.rqdata.runq_setscene:
  1590. realid = self.rqdata.taskData.gettask_id(self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]], self.rqdata.runq_task[task] + "_setscene", False)
  1591. idepends = self.rqdata.taskData.tasks_idepends[realid]
  1592. for (depid, idependtask) in idepends:
  1593. if depid not in self.rqdata.taskData.build_targets:
  1594. continue
  1595. depdata = self.rqdata.taskData.build_targets[depid][0]
  1596. if depdata is None:
  1597. continue
  1598. dep = self.rqdata.taskData.fn_index[depdata]
  1599. taskid = self.rqdata.get_task_id(self.rqdata.taskData.getfn_id(dep), idependtask.replace("_setscene", ""))
  1600. if taskid is None:
  1601. bb.msg.fatal("RunQueue", "Task %s_setscene depends upon non-existent task %s:%s" % (self.rqdata.get_user_idstring(task), dep, idependtask))
  1602. if not self.rqdata.runq_setscene.index(taskid) in self.sq_harddeps:
  1603. self.sq_harddeps[self.rqdata.runq_setscene.index(taskid)] = set()
  1604. self.sq_harddeps[self.rqdata.runq_setscene.index(taskid)].add(self.rqdata.runq_setscene.index(task))
  1605. sq_revdeps_squash[self.rqdata.runq_setscene.index(task)].add(self.rqdata.runq_setscene.index(taskid))
  1606. # Have to zero this to avoid circular dependencies
  1607. sq_revdeps_squash[self.rqdata.runq_setscene.index(taskid)] = set()
  1608. for task in self.sq_harddeps:
  1609. for dep in self.sq_harddeps[task]:
  1610. sq_revdeps_squash[dep].add(task)
  1611. #for task in xrange(len(sq_revdeps_squash)):
  1612. # realtask = self.rqdata.runq_setscene[task]
  1613. # bb.warn("Task %s: %s_setscene is %s " % (task, self.rqdata.get_user_idstring(realtask) , sq_revdeps_squash[task]))
  1614. self.sq_deps = []
  1615. self.sq_revdeps = sq_revdeps_squash
  1616. self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
  1617. for task in xrange(len(self.sq_revdeps)):
  1618. self.sq_deps.append(set())
  1619. for task in xrange(len(self.sq_revdeps)):
  1620. for dep in self.sq_revdeps[task]:
  1621. self.sq_deps[dep].add(task)
  1622. for task in xrange(len(self.sq_revdeps)):
  1623. if len(self.sq_revdeps[task]) == 0:
  1624. self.runq_buildable[task] = 1
  1625. self.outrightfail = []
  1626. if self.rq.hashvalidate:
  1627. sq_hash = []
  1628. sq_hashfn = []
  1629. sq_fn = []
  1630. sq_taskname = []
  1631. sq_task = []
  1632. noexec = []
  1633. stamppresent = []
  1634. for task in xrange(len(self.sq_revdeps)):
  1635. realtask = self.rqdata.runq_setscene[task]
  1636. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1637. taskname = self.rqdata.runq_task[realtask]
  1638. taskdep = self.rqdata.dataCache.task_deps[fn]
  1639. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1640. noexec.append(task)
  1641. self.task_skip(task)
  1642. bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
  1643. continue
  1644. if self.rq.check_stamp_task(realtask, taskname + "_setscene", cache=self.stampcache):
  1645. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1646. stamppresent.append(task)
  1647. self.task_skip(task)
  1648. continue
  1649. if self.rq.check_stamp_task(realtask, taskname, recurse = True, cache=self.stampcache):
  1650. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1651. stamppresent.append(task)
  1652. self.task_skip(task)
  1653. continue
  1654. sq_fn.append(fn)
  1655. sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
  1656. sq_hash.append(self.rqdata.runq_hash[realtask])
  1657. sq_taskname.append(taskname)
  1658. sq_task.append(task)
  1659. call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
  1660. locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
  1661. valid = bb.utils.better_eval(call, locs)
  1662. valid_new = stamppresent
  1663. for v in valid:
  1664. valid_new.append(sq_task[v])
  1665. for task in xrange(len(self.sq_revdeps)):
  1666. if task not in valid_new and task not in noexec:
  1667. realtask = self.rqdata.runq_setscene[task]
  1668. logger.debug(2, 'No package found, so skipping setscene task %s',
  1669. self.rqdata.get_user_idstring(realtask))
  1670. self.outrightfail.append(task)
  1671. logger.info('Executing SetScene Tasks')
  1672. self.rq.state = runQueueSceneRun
  1673. def scenequeue_updatecounters(self, task, fail = False):
  1674. for dep in self.sq_deps[task]:
  1675. if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
  1676. realtask = self.rqdata.runq_setscene[task]
  1677. realdep = self.rqdata.runq_setscene[dep]
  1678. logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (self.rqdata.get_user_idstring(realtask), self.rqdata.get_user_idstring(realdep)))
  1679. self.scenequeue_updatecounters(dep, fail)
  1680. continue
  1681. if task not in self.sq_revdeps2[dep]:
  1682. # May already have been removed by the fail case above
  1683. continue
  1684. self.sq_revdeps2[dep].remove(task)
  1685. if len(self.sq_revdeps2[dep]) == 0:
  1686. self.runq_buildable[dep] = 1
  1687. def task_completeoutright(self, task):
  1688. """
  1689. Mark a task as completed
  1690. Look at the reverse dependencies and mark any task with
  1691. completed dependencies as buildable
  1692. """
  1693. index = self.rqdata.runq_setscene[task]
  1694. logger.debug(1, 'Found task %s which could be accelerated',
  1695. self.rqdata.get_user_idstring(index))
  1696. self.scenequeue_covered.add(task)
  1697. self.scenequeue_updatecounters(task)
  1698. def task_complete(self, task):
  1699. self.stats.taskCompleted()
  1700. bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
  1701. self.task_completeoutright(task)
  1702. def task_fail(self, task, result):
  1703. self.stats.taskFailed()
  1704. bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
  1705. self.scenequeue_notcovered.add(task)
  1706. self.scenequeue_updatecounters(task, True)
  1707. def task_failoutright(self, task):
  1708. self.runq_running[task] = 1
  1709. self.runq_buildable[task] = 1
  1710. self.stats.taskCompleted()
  1711. self.stats.taskSkipped()
  1712. index = self.rqdata.runq_setscene[task]
  1713. self.scenequeue_notcovered.add(task)
  1714. self.scenequeue_updatecounters(task, True)
  1715. def task_skip(self, task):
  1716. self.runq_running[task] = 1
  1717. self.runq_buildable[task] = 1
  1718. self.task_completeoutright(task)
  1719. self.stats.taskCompleted()
  1720. self.stats.taskSkipped()
  1721. def execute(self):
  1722. """
  1723. Run the tasks in a queue prepared by prepare_runqueue
  1724. """
  1725. self.rq.read_workers()
  1726. task = None
  1727. if self.stats.active < self.number_tasks:
  1728. # Find the next setscene to run
  1729. for nexttask in xrange(self.stats.total):
  1730. if self.runq_buildable[nexttask] == 1 and self.runq_running[nexttask] != 1:
  1731. if nexttask in self.unskippable:
  1732. logger.debug(2, "Setscene task %s is unskippable" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1733. if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
  1734. realtask = self.rqdata.runq_setscene[nexttask]
  1735. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1736. foundtarget = False
  1737. for target in self.rqdata.target_pairs:
  1738. if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
  1739. foundtarget = True
  1740. break
  1741. if not foundtarget:
  1742. logger.debug(2, "Skipping setscene for task %s" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1743. self.task_skip(nexttask)
  1744. self.scenequeue_notneeded.add(nexttask)
  1745. return True
  1746. if nexttask in self.outrightfail:
  1747. self.task_failoutright(nexttask)
  1748. return True
  1749. task = nexttask
  1750. break
  1751. if task is not None:
  1752. realtask = self.rqdata.runq_setscene[task]
  1753. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1754. taskname = self.rqdata.runq_task[realtask] + "_setscene"
  1755. if self.rq.check_stamp_task(realtask, self.rqdata.runq_task[realtask], recurse = True, cache=self.stampcache):
  1756. logger.debug(2, 'Stamp for underlying task %s(%s) is current, so skipping setscene variant',
  1757. task, self.rqdata.get_user_idstring(realtask))
  1758. self.task_failoutright(task)
  1759. return True
  1760. if self.cooker.configuration.force:
  1761. for target in self.rqdata.target_pairs:
  1762. if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
  1763. self.task_failoutright(task)
  1764. return True
  1765. if self.rq.check_stamp_task(realtask, taskname, cache=self.stampcache):
  1766. logger.debug(2, 'Setscene stamp current task %s(%s), so skip it and its dependencies',
  1767. task, self.rqdata.get_user_idstring(realtask))
  1768. self.task_skip(task)
  1769. return True
  1770. startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
  1771. bb.event.fire(startevent, self.cfgData)
  1772. taskdep = self.rqdata.dataCache.task_deps[fn]
  1773. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot']:
  1774. if not self.rq.fakeworker:
  1775. self.rq.start_fakeworker(self)
  1776. self.rq.fakeworker.stdin.write("<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + "</runtask>")
  1777. self.rq.fakeworker.stdin.flush()
  1778. else:
  1779. self.rq.worker.stdin.write("<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + "</runtask>")
  1780. self.rq.worker.stdin.flush()
  1781. self.runq_running[task] = 1
  1782. self.stats.taskActive()
  1783. if self.stats.active < self.number_tasks:
  1784. return True
  1785. if self.stats.active > 0:
  1786. self.rq.read_workers()
  1787. return self.rq.active_fds()
  1788. #for task in xrange(self.stats.total):
  1789. # if self.runq_running[task] != 1:
  1790. # buildable = self.runq_buildable[task]
  1791. # revdeps = self.sq_revdeps[task]
  1792. # bb.warn("Found we didn't run %s %s %s %s" % (task, buildable, str(revdeps), self.rqdata.get_user_idstring(self.rqdata.runq_setscene[task])))
  1793. # Convert scenequeue_covered task numbers into full taskgraph ids
  1794. oldcovered = self.scenequeue_covered
  1795. self.rq.scenequeue_covered = set()
  1796. for task in oldcovered:
  1797. self.rq.scenequeue_covered.add(self.rqdata.runq_setscene[task])
  1798. logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
  1799. self.rq.state = runQueueRunInit
  1800. completeevent = sceneQueueComplete(self.stats, self.rq)
  1801. bb.event.fire(completeevent, self.cfgData)
  1802. return True
  1803. def runqueue_process_waitpid(self, task, status):
  1804. task = self.rq.rqdata.runq_setscene.index(task)
  1805. RunQueueExecute.runqueue_process_waitpid(self, task, status)
  1806. class TaskFailure(Exception):
  1807. """
  1808. Exception raised when a task in a runqueue fails
  1809. """
  1810. def __init__(self, x):
  1811. self.args = x
  1812. class runQueueExitWait(bb.event.Event):
  1813. """
  1814. Event when waiting for task processes to exit
  1815. """
  1816. def __init__(self, remain):
  1817. self.remain = remain
  1818. self.message = "Waiting for %s active tasks to finish" % remain
  1819. bb.event.Event.__init__(self)
  1820. class runQueueEvent(bb.event.Event):
  1821. """
  1822. Base runQueue event class
  1823. """
  1824. def __init__(self, task, stats, rq):
  1825. self.taskid = task
  1826. self.taskstring = rq.rqdata.get_user_idstring(task)
  1827. self.taskname = rq.rqdata.get_task_name(task)
  1828. self.taskfile = rq.rqdata.get_task_file(task)
  1829. self.taskhash = rq.rqdata.get_task_hash(task)
  1830. self.stats = stats.copy()
  1831. bb.event.Event.__init__(self)
  1832. class sceneQueueEvent(runQueueEvent):
  1833. """
  1834. Base sceneQueue event class
  1835. """
  1836. def __init__(self, task, stats, rq, noexec=False):
  1837. runQueueEvent.__init__(self, task, stats, rq)
  1838. realtask = rq.rqdata.runq_setscene[task]
  1839. self.taskstring = rq.rqdata.get_user_idstring(realtask, "_setscene")
  1840. self.taskname = rq.rqdata.get_task_name(realtask) + "_setscene"
  1841. self.taskfile = rq.rqdata.get_task_file(realtask)
  1842. self.taskhash = rq.rqdata.get_task_hash(realtask)
  1843. class runQueueTaskStarted(runQueueEvent):
  1844. """
  1845. Event notifying a task was started
  1846. """
  1847. def __init__(self, task, stats, rq, noexec=False):
  1848. runQueueEvent.__init__(self, task, stats, rq)
  1849. self.noexec = noexec
  1850. class sceneQueueTaskStarted(sceneQueueEvent):
  1851. """
  1852. Event notifying a setscene task was started
  1853. """
  1854. def __init__(self, task, stats, rq, noexec=False):
  1855. sceneQueueEvent.__init__(self, task, stats, rq)
  1856. self.noexec = noexec
  1857. class runQueueTaskFailed(runQueueEvent):
  1858. """
  1859. Event notifying a task failed
  1860. """
  1861. def __init__(self, task, stats, exitcode, rq):
  1862. runQueueEvent.__init__(self, task, stats, rq)
  1863. self.exitcode = exitcode
  1864. class sceneQueueTaskFailed(sceneQueueEvent):
  1865. """
  1866. Event notifying a setscene task failed
  1867. """
  1868. def __init__(self, task, stats, exitcode, rq):
  1869. sceneQueueEvent.__init__(self, task, stats, rq)
  1870. self.exitcode = exitcode
  1871. class sceneQueueComplete(sceneQueueEvent):
  1872. """
  1873. Event when all the sceneQueue tasks are complete
  1874. """
  1875. def __init__(self, stats, rq):
  1876. self.stats = stats.copy()
  1877. bb.event.Event.__init__(self)
  1878. class runQueueTaskCompleted(runQueueEvent):
  1879. """
  1880. Event notifying a task completed
  1881. """
  1882. class sceneQueueTaskCompleted(sceneQueueEvent):
  1883. """
  1884. Event notifying a setscene task completed
  1885. """
  1886. class runQueueTaskSkipped(runQueueEvent):
  1887. """
  1888. Event notifying a task was skipped
  1889. """
  1890. def __init__(self, task, stats, rq, reason):
  1891. runQueueEvent.__init__(self, task, stats, rq)
  1892. self.reason = reason
  1893. class runQueuePipe():
  1894. """
  1895. Abstraction for a pipe between a worker thread and the server
  1896. """
  1897. def __init__(self, pipein, pipeout, d, rq, rqexec):
  1898. self.input = pipein
  1899. if pipeout:
  1900. pipeout.close()
  1901. bb.utils.nonblockingfd(self.input)
  1902. self.queue = ""
  1903. self.d = d
  1904. self.rq = rq
  1905. self.rqexec = rqexec
  1906. def setrunqueueexec(self, rqexec):
  1907. self.rqexec = rqexec
  1908. def read(self):
  1909. for w in [self.rq.worker, self.rq.fakeworker]:
  1910. if not w:
  1911. continue
  1912. w.poll()
  1913. if w.returncode is not None and not self.rq.teardown:
  1914. name = None
  1915. if self.rq.worker and w.pid == self.rq.worker.pid:
  1916. name = "Worker"
  1917. elif self.rq.fakeworker and w.pid == self.rq.fakeworker.pid:
  1918. name = "Fakeroot"
  1919. bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, w.pid, str(w.returncode)))
  1920. self.rq.finish_runqueue(True)
  1921. start = len(self.queue)
  1922. try:
  1923. self.queue = self.queue + self.input.read(102400)
  1924. except (OSError, IOError) as e:
  1925. if e.errno != errno.EAGAIN:
  1926. raise
  1927. end = len(self.queue)
  1928. found = True
  1929. while found and len(self.queue):
  1930. found = False
  1931. index = self.queue.find("</event>")
  1932. while index != -1 and self.queue.startswith("<event>"):
  1933. try:
  1934. event = pickle.loads(self.queue[7:index])
  1935. except ValueError as e:
  1936. bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
  1937. bb.event.fire_from_worker(event, self.d)
  1938. found = True
  1939. self.queue = self.queue[index+8:]
  1940. index = self.queue.find("</event>")
  1941. index = self.queue.find("</exitcode>")
  1942. while index != -1 and self.queue.startswith("<exitcode>"):
  1943. try:
  1944. task, status = pickle.loads(self.queue[10:index])
  1945. except ValueError as e:
  1946. bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
  1947. self.rqexec.runqueue_process_waitpid(task, status)
  1948. found = True
  1949. self.queue = self.queue[index+11:]
  1950. index = self.queue.find("</exitcode>")
  1951. return (end > start)
  1952. def close(self):
  1953. while self.read():
  1954. continue
  1955. if len(self.queue) > 0:
  1956. print("Warning, worker left partial message: %s" % self.queue)
  1957. self.input.close()