runqueue.py 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931
  1. #!/usr/bin/env python
  2. # ex:ts=4:sw=4:sts=4:et
  3. # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
  4. """
  5. BitBake 'RunQueue' implementation
  6. Handles preparation and execution of a queue of tasks
  7. """
  8. # Copyright (C) 2006-2007 Richard Purdie
  9. #
  10. # This program is free software; you can redistribute it and/or modify
  11. # it under the terms of the GNU General Public License version 2 as
  12. # published by the Free Software Foundation.
  13. #
  14. # This program is distributed in the hope that it will be useful,
  15. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. # GNU General Public License for more details.
  18. #
  19. # You should have received a copy of the GNU General Public License along
  20. # with this program; if not, write to the Free Software Foundation, Inc.,
  21. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. import copy
  23. import os
  24. import sys
  25. import signal
  26. import stat
  27. import fcntl
  28. import errno
  29. import logging
  30. import bb
  31. from bb import msg, data, event
  32. from bb import monitordisk
  33. import subprocess
  34. try:
  35. import cPickle as pickle
  36. except ImportError:
  37. import pickle
  38. bblogger = logging.getLogger("BitBake")
  39. logger = logging.getLogger("BitBake.RunQueue")
  40. class RunQueueStats:
  41. """
  42. Holds statistics on the tasks handled by the associated runQueue
  43. """
  44. def __init__(self, total):
  45. self.completed = 0
  46. self.skipped = 0
  47. self.failed = 0
  48. self.active = 0
  49. self.total = total
  50. def copy(self):
  51. obj = self.__class__(self.total)
  52. obj.__dict__.update(self.__dict__)
  53. return obj
  54. def taskFailed(self):
  55. self.active = self.active - 1
  56. self.failed = self.failed + 1
  57. def taskCompleted(self, number = 1):
  58. self.active = self.active - number
  59. self.completed = self.completed + number
  60. def taskSkipped(self, number = 1):
  61. self.active = self.active + number
  62. self.skipped = self.skipped + number
  63. def taskActive(self):
  64. self.active = self.active + 1
  65. # These values indicate the next step due to be run in the
  66. # runQueue state machine
  67. runQueuePrepare = 2
  68. runQueueSceneInit = 3
  69. runQueueSceneRun = 4
  70. runQueueRunInit = 5
  71. runQueueRunning = 6
  72. runQueueFailed = 7
  73. runQueueCleanUp = 8
  74. runQueueComplete = 9
  75. class RunQueueScheduler(object):
  76. """
  77. Control the order tasks are scheduled in.
  78. """
  79. name = "basic"
  80. def __init__(self, runqueue, rqdata):
  81. """
  82. The default scheduler just returns the first buildable task (the
  83. priority map is sorted by task numer)
  84. """
  85. self.rq = runqueue
  86. self.rqdata = rqdata
  87. numTasks = len(self.rqdata.runq_fnid)
  88. self.prio_map = []
  89. self.prio_map.extend(range(numTasks))
  90. def next_buildable_task(self):
  91. """
  92. Return the id of the first task we find that is buildable
  93. """
  94. for tasknum in xrange(len(self.rqdata.runq_fnid)):
  95. taskid = self.prio_map[tasknum]
  96. if self.rq.runq_running[taskid] == 1:
  97. continue
  98. if self.rq.runq_buildable[taskid] == 1:
  99. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[taskid]]
  100. taskname = self.rqdata.runq_task[taskid]
  101. stamp = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  102. if stamp in self.rq.build_stamps.values():
  103. continue
  104. return taskid
  105. def next(self):
  106. """
  107. Return the id of the task we should build next
  108. """
  109. if self.rq.stats.active < self.rq.number_tasks:
  110. return self.next_buildable_task()
  111. class RunQueueSchedulerSpeed(RunQueueScheduler):
  112. """
  113. A scheduler optimised for speed. The priority map is sorted by task weight,
  114. heavier weighted tasks (tasks needed by the most other tasks) are run first.
  115. """
  116. name = "speed"
  117. def __init__(self, runqueue, rqdata):
  118. """
  119. The priority map is sorted by task weight.
  120. """
  121. self.rq = runqueue
  122. self.rqdata = rqdata
  123. sortweight = sorted(copy.deepcopy(self.rqdata.runq_weight))
  124. copyweight = copy.deepcopy(self.rqdata.runq_weight)
  125. self.prio_map = []
  126. for weight in sortweight:
  127. idx = copyweight.index(weight)
  128. self.prio_map.append(idx)
  129. copyweight[idx] = -1
  130. self.prio_map.reverse()
  131. class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
  132. """
  133. A scheduler optimised to complete .bb files are quickly as possible. The
  134. priority map is sorted by task weight, but then reordered so once a given
  135. .bb file starts to build, its completed as quickly as possible. This works
  136. well where disk space is at a premium and classes like OE's rm_work are in
  137. force.
  138. """
  139. name = "completion"
  140. def __init__(self, runqueue, rqdata):
  141. RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata)
  142. #FIXME - whilst this groups all fnids together it does not reorder the
  143. #fnid groups optimally.
  144. basemap = copy.deepcopy(self.prio_map)
  145. self.prio_map = []
  146. while (len(basemap) > 0):
  147. entry = basemap.pop(0)
  148. self.prio_map.append(entry)
  149. fnid = self.rqdata.runq_fnid[entry]
  150. todel = []
  151. for entry in basemap:
  152. entry_fnid = self.rqdata.runq_fnid[entry]
  153. if entry_fnid == fnid:
  154. todel.append(basemap.index(entry))
  155. self.prio_map.append(entry)
  156. todel.reverse()
  157. for idx in todel:
  158. del basemap[idx]
  159. class RunQueueData:
  160. """
  161. BitBake Run Queue implementation
  162. """
  163. def __init__(self, rq, cooker, cfgData, dataCache, taskData, targets):
  164. self.cooker = cooker
  165. self.dataCache = dataCache
  166. self.taskData = taskData
  167. self.targets = targets
  168. self.rq = rq
  169. self.warn_multi_bb = False
  170. self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST", True) or ""
  171. self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
  172. self.reset()
  173. def reset(self):
  174. self.runq_fnid = []
  175. self.runq_task = []
  176. self.runq_depends = []
  177. self.runq_revdeps = []
  178. self.runq_hash = []
  179. def runq_depends_names(self, ids):
  180. import re
  181. ret = []
  182. for id in self.runq_depends[ids]:
  183. nam = os.path.basename(self.get_user_idstring(id))
  184. nam = re.sub("_[^,]*,", ",", nam)
  185. ret.extend([nam])
  186. return ret
  187. def get_task_name(self, task):
  188. return self.runq_task[task]
  189. def get_task_file(self, task):
  190. return self.taskData.fn_index[self.runq_fnid[task]]
  191. def get_task_hash(self, task):
  192. return self.runq_hash[task]
  193. def get_user_idstring(self, task, task_name_suffix = ""):
  194. fn = self.taskData.fn_index[self.runq_fnid[task]]
  195. taskname = self.runq_task[task] + task_name_suffix
  196. return "%s, %s" % (fn, taskname)
  197. def get_task_id(self, fnid, taskname):
  198. for listid in xrange(len(self.runq_fnid)):
  199. if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname:
  200. return listid
  201. return None
  202. def circular_depchains_handler(self, tasks):
  203. """
  204. Some tasks aren't buildable, likely due to circular dependency issues.
  205. Identify the circular dependencies and print them in a user readable format.
  206. """
  207. from copy import deepcopy
  208. valid_chains = []
  209. explored_deps = {}
  210. msgs = []
  211. def chain_reorder(chain):
  212. """
  213. Reorder a dependency chain so the lowest task id is first
  214. """
  215. lowest = 0
  216. new_chain = []
  217. for entry in xrange(len(chain)):
  218. if chain[entry] < chain[lowest]:
  219. lowest = entry
  220. new_chain.extend(chain[lowest:])
  221. new_chain.extend(chain[:lowest])
  222. return new_chain
  223. def chain_compare_equal(chain1, chain2):
  224. """
  225. Compare two dependency chains and see if they're the same
  226. """
  227. if len(chain1) != len(chain2):
  228. return False
  229. for index in xrange(len(chain1)):
  230. if chain1[index] != chain2[index]:
  231. return False
  232. return True
  233. def chain_array_contains(chain, chain_array):
  234. """
  235. Return True if chain_array contains chain
  236. """
  237. for ch in chain_array:
  238. if chain_compare_equal(ch, chain):
  239. return True
  240. return False
  241. def find_chains(taskid, prev_chain):
  242. prev_chain.append(taskid)
  243. total_deps = []
  244. total_deps.extend(self.runq_revdeps[taskid])
  245. for revdep in self.runq_revdeps[taskid]:
  246. if revdep in prev_chain:
  247. idx = prev_chain.index(revdep)
  248. # To prevent duplicates, reorder the chain to start with the lowest taskid
  249. # and search through an array of those we've already printed
  250. chain = prev_chain[idx:]
  251. new_chain = chain_reorder(chain)
  252. if not chain_array_contains(new_chain, valid_chains):
  253. valid_chains.append(new_chain)
  254. msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
  255. for dep in new_chain:
  256. msgs.append(" Task %s (%s) (dependent Tasks %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends_names(dep)))
  257. msgs.append("\n")
  258. if len(valid_chains) > 10:
  259. msgs.append("Aborted dependency loops search after 10 matches.\n")
  260. return msgs
  261. continue
  262. scan = False
  263. if revdep not in explored_deps:
  264. scan = True
  265. elif revdep in explored_deps[revdep]:
  266. scan = True
  267. else:
  268. for dep in prev_chain:
  269. if dep in explored_deps[revdep]:
  270. scan = True
  271. if scan:
  272. find_chains(revdep, copy.deepcopy(prev_chain))
  273. for dep in explored_deps[revdep]:
  274. if dep not in total_deps:
  275. total_deps.append(dep)
  276. explored_deps[taskid] = total_deps
  277. for task in tasks:
  278. find_chains(task, [])
  279. return msgs
  280. def calculate_task_weights(self, endpoints):
  281. """
  282. Calculate a number representing the "weight" of each task. Heavier weighted tasks
  283. have more dependencies and hence should be executed sooner for maximum speed.
  284. This function also sanity checks the task list finding tasks that are not
  285. possible to execute due to circular dependencies.
  286. """
  287. numTasks = len(self.runq_fnid)
  288. weight = []
  289. deps_left = []
  290. task_done = []
  291. for listid in xrange(numTasks):
  292. task_done.append(False)
  293. weight.append(0)
  294. deps_left.append(len(self.runq_revdeps[listid]))
  295. for listid in endpoints:
  296. weight[listid] = 1
  297. task_done[listid] = True
  298. while True:
  299. next_points = []
  300. for listid in endpoints:
  301. for revdep in self.runq_depends[listid]:
  302. weight[revdep] = weight[revdep] + weight[listid]
  303. deps_left[revdep] = deps_left[revdep] - 1
  304. if deps_left[revdep] == 0:
  305. next_points.append(revdep)
  306. task_done[revdep] = True
  307. endpoints = next_points
  308. if len(next_points) == 0:
  309. break
  310. # Circular dependency sanity check
  311. problem_tasks = []
  312. for task in xrange(numTasks):
  313. if task_done[task] is False or deps_left[task] != 0:
  314. problem_tasks.append(task)
  315. logger.debug(2, "Task %s (%s) is not buildable", task, self.get_user_idstring(task))
  316. logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[task], deps_left[task])
  317. if problem_tasks:
  318. message = "Unbuildable tasks were found.\n"
  319. message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
  320. message = message + "Identifying dependency loops (this may take a short while)...\n"
  321. logger.error(message)
  322. msgs = self.circular_depchains_handler(problem_tasks)
  323. message = "\n"
  324. for msg in msgs:
  325. message = message + msg
  326. bb.msg.fatal("RunQueue", message)
  327. return weight
  328. def prepare(self):
  329. """
  330. Turn a set of taskData into a RunQueue and compute data needed
  331. to optimise the execution order.
  332. """
  333. runq_build = []
  334. recursivetasks = {}
  335. recursiveitasks = {}
  336. recursivetasksselfref = set()
  337. taskData = self.taskData
  338. if len(taskData.tasks_name) == 0:
  339. # Nothing to do
  340. return 0
  341. logger.info("Preparing runqueue")
  342. # Step A - Work out a list of tasks to run
  343. #
  344. # Taskdata gives us a list of possible providers for every build and run
  345. # target ordered by priority. It also gives information on each of those
  346. # providers.
  347. #
  348. # To create the actual list of tasks to execute we fix the list of
  349. # providers and then resolve the dependencies into task IDs. This
  350. # process is repeated for each type of dependency (tdepends, deptask,
  351. # rdeptast, recrdeptask, idepends).
  352. def add_build_dependencies(depids, tasknames, depends):
  353. for depid in depids:
  354. # Won't be in build_targets if ASSUME_PROVIDED
  355. if depid not in taskData.build_targets:
  356. continue
  357. depdata = taskData.build_targets[depid][0]
  358. if depdata is None:
  359. continue
  360. for taskname in tasknames:
  361. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  362. if taskid is not None:
  363. depends.add(taskid)
  364. def add_runtime_dependencies(depids, tasknames, depends):
  365. for depid in depids:
  366. if depid not in taskData.run_targets:
  367. continue
  368. depdata = taskData.run_targets[depid][0]
  369. if depdata is None:
  370. continue
  371. for taskname in tasknames:
  372. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  373. if taskid is not None:
  374. depends.add(taskid)
  375. def add_resolved_dependencies(depids, tasknames, depends):
  376. for depid in depids:
  377. for taskname in tasknames:
  378. taskid = taskData.gettask_id_fromfnid(depid, taskname)
  379. if taskid is not None:
  380. depends.add(taskid)
  381. for task in xrange(len(taskData.tasks_name)):
  382. depends = set()
  383. fnid = taskData.tasks_fnid[task]
  384. fn = taskData.fn_index[fnid]
  385. task_deps = self.dataCache.task_deps[fn]
  386. logger.debug(2, "Processing %s:%s", fn, taskData.tasks_name[task])
  387. if fnid not in taskData.failed_fnids:
  388. # Resolve task internal dependencies
  389. #
  390. # e.g. addtask before X after Y
  391. depends = set(taskData.tasks_tdepends[task])
  392. # Resolve 'deptask' dependencies
  393. #
  394. # e.g. do_sometask[deptask] = "do_someothertask"
  395. # (makes sure sometask runs after someothertask of all DEPENDS)
  396. if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
  397. tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
  398. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  399. # Resolve 'rdeptask' dependencies
  400. #
  401. # e.g. do_sometask[rdeptask] = "do_someothertask"
  402. # (makes sure sometask runs after someothertask of all RDEPENDS)
  403. if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
  404. tasknames = task_deps['rdeptask'][taskData.tasks_name[task]].split()
  405. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  406. # Resolve inter-task dependencies
  407. #
  408. # e.g. do_sometask[depends] = "targetname:do_someothertask"
  409. # (makes sure sometask runs after targetname's someothertask)
  410. idepends = taskData.tasks_idepends[task]
  411. for (depid, idependtask) in idepends:
  412. if depid in taskData.build_targets and not depid in taskData.failed_deps:
  413. # Won't be in build_targets if ASSUME_PROVIDED
  414. depdata = taskData.build_targets[depid][0]
  415. if depdata is not None:
  416. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  417. if taskid is None:
  418. bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  419. depends.add(taskid)
  420. irdepends = taskData.tasks_irdepends[task]
  421. for (depid, idependtask) in irdepends:
  422. if depid in taskData.run_targets:
  423. # Won't be in run_targets if ASSUME_PROVIDED
  424. depdata = taskData.run_targets[depid][0]
  425. if depdata is not None:
  426. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  427. if taskid is None:
  428. bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  429. depends.add(taskid)
  430. # Resolve recursive 'recrdeptask' dependencies (Part A)
  431. #
  432. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  433. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  434. # We cover the recursive part of the dependencies below
  435. if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
  436. tasknames = task_deps['recrdeptask'][taskData.tasks_name[task]].split()
  437. recursivetasks[task] = tasknames
  438. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  439. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  440. if taskData.tasks_name[task] in tasknames:
  441. recursivetasksselfref.add(task)
  442. if 'recideptask' in task_deps and taskData.tasks_name[task] in task_deps['recideptask']:
  443. recursiveitasks[task] = []
  444. for t in task_deps['recideptask'][taskData.tasks_name[task]].split():
  445. newdep = taskData.gettask_id_fromfnid(fnid, t)
  446. recursiveitasks[task].append(newdep)
  447. self.runq_fnid.append(taskData.tasks_fnid[task])
  448. self.runq_task.append(taskData.tasks_name[task])
  449. self.runq_depends.append(depends)
  450. self.runq_revdeps.append(set())
  451. self.runq_hash.append("")
  452. runq_build.append(0)
  453. # Resolve recursive 'recrdeptask' dependencies (Part B)
  454. #
  455. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  456. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  457. # We need to do this separately since we need all of self.runq_depends to be complete before this is processed
  458. extradeps = {}
  459. for task in recursivetasks:
  460. extradeps[task] = set(self.runq_depends[task])
  461. tasknames = recursivetasks[task]
  462. seendeps = set()
  463. seenfnid = []
  464. def generate_recdeps(t):
  465. newdeps = set()
  466. add_resolved_dependencies([taskData.tasks_fnid[t]], tasknames, newdeps)
  467. extradeps[task].update(newdeps)
  468. seendeps.add(t)
  469. newdeps.add(t)
  470. for i in newdeps:
  471. for n in self.runq_depends[i]:
  472. if n not in seendeps:
  473. generate_recdeps(n)
  474. generate_recdeps(task)
  475. if task in recursiveitasks:
  476. for dep in recursiveitasks[task]:
  477. generate_recdeps(dep)
  478. # Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
  479. for task in recursivetasks:
  480. extradeps[task].difference_update(recursivetasksselfref)
  481. for task in xrange(len(taskData.tasks_name)):
  482. # Add in extra dependencies
  483. if task in extradeps:
  484. self.runq_depends[task] = extradeps[task]
  485. # Remove all self references
  486. if task in self.runq_depends[task]:
  487. logger.debug(2, "Task %s (%s %s) contains self reference! %s", task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], self.runq_depends[task])
  488. self.runq_depends[task].remove(task)
  489. # Step B - Mark all active tasks
  490. #
  491. # Start with the tasks we were asked to run and mark all dependencies
  492. # as active too. If the task is to be 'forced', clear its stamp. Once
  493. # all active tasks are marked, prune the ones we don't need.
  494. logger.verbose("Marking Active Tasks")
  495. def mark_active(listid, depth):
  496. """
  497. Mark an item as active along with its depends
  498. (calls itself recursively)
  499. """
  500. if runq_build[listid] == 1:
  501. return
  502. runq_build[listid] = 1
  503. depends = self.runq_depends[listid]
  504. for depend in depends:
  505. mark_active(depend, depth+1)
  506. self.target_pairs = []
  507. for target in self.targets:
  508. targetid = taskData.getbuild_id(target[0])
  509. if targetid not in taskData.build_targets:
  510. continue
  511. if targetid in taskData.failed_deps:
  512. continue
  513. fnid = taskData.build_targets[targetid][0]
  514. fn = taskData.fn_index[fnid]
  515. self.target_pairs.append((fn, target[1]))
  516. if fnid in taskData.failed_fnids:
  517. continue
  518. if target[1] not in taskData.tasks_lookup[fnid]:
  519. import difflib
  520. close_matches = difflib.get_close_matches(target[1], taskData.tasks_lookup[fnid], cutoff=0.7)
  521. if close_matches:
  522. extra = ". Close matches:\n %s" % "\n ".join(close_matches)
  523. else:
  524. extra = ""
  525. bb.msg.fatal("RunQueue", "Task %s does not exist for target %s%s" % (target[1], target[0], extra))
  526. listid = taskData.tasks_lookup[fnid][target[1]]
  527. mark_active(listid, 1)
  528. # Step C - Prune all inactive tasks
  529. #
  530. # Once all active tasks are marked, prune the ones we don't need.
  531. maps = []
  532. delcount = 0
  533. for listid in xrange(len(self.runq_fnid)):
  534. if runq_build[listid-delcount] == 1:
  535. maps.append(listid-delcount)
  536. else:
  537. del self.runq_fnid[listid-delcount]
  538. del self.runq_task[listid-delcount]
  539. del self.runq_depends[listid-delcount]
  540. del runq_build[listid-delcount]
  541. del self.runq_revdeps[listid-delcount]
  542. del self.runq_hash[listid-delcount]
  543. delcount = delcount + 1
  544. maps.append(-1)
  545. #
  546. # Step D - Sanity checks and computation
  547. #
  548. # Check to make sure we still have tasks to run
  549. if len(self.runq_fnid) == 0:
  550. if not taskData.abort:
  551. bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
  552. else:
  553. bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
  554. logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runq_fnid))
  555. # Remap the dependencies to account for the deleted tasks
  556. # Check we didn't delete a task we depend on
  557. for listid in xrange(len(self.runq_fnid)):
  558. newdeps = []
  559. origdeps = self.runq_depends[listid]
  560. for origdep in origdeps:
  561. if maps[origdep] == -1:
  562. bb.msg.fatal("RunQueue", "Invalid mapping - Should never happen!")
  563. newdeps.append(maps[origdep])
  564. self.runq_depends[listid] = set(newdeps)
  565. logger.verbose("Assign Weightings")
  566. # Generate a list of reverse dependencies to ease future calculations
  567. for listid in xrange(len(self.runq_fnid)):
  568. for dep in self.runq_depends[listid]:
  569. self.runq_revdeps[dep].add(listid)
  570. # Identify tasks at the end of dependency chains
  571. # Error on circular dependency loops (length two)
  572. endpoints = []
  573. for listid in xrange(len(self.runq_fnid)):
  574. revdeps = self.runq_revdeps[listid]
  575. if len(revdeps) == 0:
  576. endpoints.append(listid)
  577. for dep in revdeps:
  578. if dep in self.runq_depends[listid]:
  579. #self.dump_data(taskData)
  580. bb.msg.fatal("RunQueue", "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
  581. logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
  582. # Calculate task weights
  583. # Check of higher length circular dependencies
  584. self.runq_weight = self.calculate_task_weights(endpoints)
  585. # Sanity Check - Check for multiple tasks building the same provider
  586. prov_list = {}
  587. seen_fn = []
  588. for task in xrange(len(self.runq_fnid)):
  589. fn = taskData.fn_index[self.runq_fnid[task]]
  590. if fn in seen_fn:
  591. continue
  592. seen_fn.append(fn)
  593. for prov in self.dataCache.fn_provides[fn]:
  594. if prov not in prov_list:
  595. prov_list[prov] = [fn]
  596. elif fn not in prov_list[prov]:
  597. prov_list[prov].append(fn)
  598. for prov in prov_list:
  599. if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
  600. seen_pn = []
  601. # If two versions of the same PN are being built its fatal, we don't support it.
  602. for fn in prov_list[prov]:
  603. pn = self.dataCache.pkg_fn[fn]
  604. if pn not in seen_pn:
  605. seen_pn.append(pn)
  606. else:
  607. bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
  608. msg = "Multiple .bb files are due to be built which each provide %s (%s)." % (prov, " ".join(prov_list[prov]))
  609. if self.warn_multi_bb:
  610. logger.warn(msg)
  611. else:
  612. msg += "\n This usually means one provides something the other doesn't and should."
  613. logger.error(msg)
  614. # Create a whitelist usable by the stamp checks
  615. stampfnwhitelist = []
  616. for entry in self.stampwhitelist.split():
  617. entryid = self.taskData.getbuild_id(entry)
  618. if entryid not in self.taskData.build_targets:
  619. continue
  620. fnid = self.taskData.build_targets[entryid][0]
  621. fn = self.taskData.fn_index[fnid]
  622. stampfnwhitelist.append(fn)
  623. self.stampfnwhitelist = stampfnwhitelist
  624. # Iterate over the task list looking for tasks with a 'setscene' function
  625. self.runq_setscene = []
  626. if not self.cooker.configuration.nosetscene:
  627. for task in range(len(self.runq_fnid)):
  628. setscene = taskData.gettask_id(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task] + "_setscene", False)
  629. if not setscene:
  630. continue
  631. self.runq_setscene.append(task)
  632. def invalidate_task(fn, taskname, error_nostamp):
  633. taskdep = self.dataCache.task_deps[fn]
  634. fnid = self.taskData.getfn_id(fn)
  635. if taskname not in taskData.tasks_lookup[fnid]:
  636. logger.warn("Task %s does not exist, invalidating this task will have no effect" % taskname)
  637. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  638. if error_nostamp:
  639. bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
  640. else:
  641. bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
  642. else:
  643. logger.verbose("Invalidate task %s, %s", taskname, fn)
  644. bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn)
  645. # Invalidate task if force mode active
  646. if self.cooker.configuration.force:
  647. for (fn, target) in self.target_pairs:
  648. invalidate_task(fn, target, False)
  649. # Invalidate task if invalidate mode active
  650. if self.cooker.configuration.invalidate_stamp:
  651. for (fn, target) in self.target_pairs:
  652. for st in self.cooker.configuration.invalidate_stamp.split(','):
  653. invalidate_task(fn, "do_%s" % st, True)
  654. # Interate over the task list and call into the siggen code
  655. dealtwith = set()
  656. todeal = set(range(len(self.runq_fnid)))
  657. while len(todeal) > 0:
  658. for task in todeal.copy():
  659. if len(self.runq_depends[task] - dealtwith) == 0:
  660. dealtwith.add(task)
  661. todeal.remove(task)
  662. procdep = []
  663. for dep in self.runq_depends[task]:
  664. procdep.append(self.taskData.fn_index[self.runq_fnid[dep]] + "." + self.runq_task[dep])
  665. self.runq_hash[task] = bb.parse.siggen.get_taskhash(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task], procdep, self.dataCache)
  666. self.hashes = {}
  667. self.hash_deps = {}
  668. for task in xrange(len(self.runq_fnid)):
  669. identifier = '%s.%s' % (self.taskData.fn_index[self.runq_fnid[task]],
  670. self.runq_task[task])
  671. self.hashes[identifier] = self.runq_hash[task]
  672. deps = []
  673. for dep in self.runq_depends[task]:
  674. depidentifier = '%s.%s' % (self.taskData.fn_index[self.runq_fnid[dep]],
  675. self.runq_task[dep])
  676. deps.append(depidentifier)
  677. self.hash_deps[identifier] = deps
  678. return len(self.runq_fnid)
  679. def dump_data(self, taskQueue):
  680. """
  681. Dump some debug information on the internal data structures
  682. """
  683. logger.debug(3, "run_tasks:")
  684. for task in xrange(len(self.rqdata.runq_task)):
  685. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  686. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  687. self.rqdata.runq_task[task],
  688. self.rqdata.runq_weight[task],
  689. self.rqdata.runq_depends[task],
  690. self.rqdata.runq_revdeps[task])
  691. logger.debug(3, "sorted_tasks:")
  692. for task1 in xrange(len(self.rqdata.runq_task)):
  693. if task1 in self.prio_map:
  694. task = self.prio_map[task1]
  695. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  696. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  697. self.rqdata.runq_task[task],
  698. self.rqdata.runq_weight[task],
  699. self.rqdata.runq_depends[task],
  700. self.rqdata.runq_revdeps[task])
  701. class RunQueue:
  702. def __init__(self, cooker, cfgData, dataCache, taskData, targets):
  703. self.cooker = cooker
  704. self.cfgData = cfgData
  705. self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets)
  706. self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile"
  707. self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None
  708. self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION", True) or None
  709. self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID", True) or None
  710. self.state = runQueuePrepare
  711. # For disk space monitor
  712. self.dm = monitordisk.diskMonitor(cfgData)
  713. self.rqexe = None
  714. self.worker = None
  715. self.workerpipe = None
  716. self.fakeworker = None
  717. self.fakeworkerpipe = None
  718. def _start_worker(self, fakeroot = False, rqexec = None):
  719. logger.debug(1, "Starting bitbake-worker")
  720. if fakeroot:
  721. fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True)
  722. fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split()
  723. env = os.environ.copy()
  724. for key, value in (var.split('=') for var in fakerootenv):
  725. env[key] = value
  726. worker = subprocess.Popen([fakerootcmd, "bitbake-worker", "decafbad"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
  727. else:
  728. worker = subprocess.Popen(["bitbake-worker", "decafbad"], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
  729. bb.utils.nonblockingfd(worker.stdout)
  730. workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, rqexec)
  731. workerdata = {
  732. "taskdeps" : self.rqdata.dataCache.task_deps,
  733. "fakerootenv" : self.rqdata.dataCache.fakerootenv,
  734. "fakerootdirs" : self.rqdata.dataCache.fakerootdirs,
  735. "fakerootnoenv" : self.rqdata.dataCache.fakerootnoenv,
  736. "hashes" : self.rqdata.hashes,
  737. "hash_deps" : self.rqdata.hash_deps,
  738. "sigchecksums" : bb.parse.siggen.file_checksum_values,
  739. "runq_hash" : self.rqdata.runq_hash,
  740. "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
  741. "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
  742. "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
  743. "logdefaultdomain" : bb.msg.loggerDefaultDomains,
  744. "prhost" : self.cooker.prhost,
  745. "buildname" : self.cfgData.getVar("BUILDNAME", True),
  746. "date" : self.cfgData.getVar("DATE", True),
  747. "time" : self.cfgData.getVar("TIME", True),
  748. }
  749. worker.stdin.write("<cookerconfig>" + pickle.dumps(self.cooker.configuration) + "</cookerconfig>")
  750. worker.stdin.write("<workerdata>" + pickle.dumps(workerdata) + "</workerdata>")
  751. worker.stdin.flush()
  752. return worker, workerpipe
  753. def _teardown_worker(self, worker, workerpipe):
  754. if not worker:
  755. return
  756. logger.debug(1, "Teardown for bitbake-worker")
  757. worker.stdin.write("<quit></quit>")
  758. worker.stdin.flush()
  759. while worker.returncode is None:
  760. workerpipe.read()
  761. worker.poll()
  762. while workerpipe.read():
  763. continue
  764. workerpipe.close()
  765. def start_worker(self):
  766. if self.worker:
  767. self.teardown_workers()
  768. self.worker, self.workerpipe = self._start_worker()
  769. def start_fakeworker(self, rqexec):
  770. if not self.fakeworker:
  771. self.fakeworker, self.fakeworkerpipe = self._start_worker(True, rqexec)
  772. def teardown_workers(self):
  773. self._teardown_worker(self.worker, self.workerpipe)
  774. self.worker = None
  775. self.workerpipe = None
  776. self._teardown_worker(self.fakeworker, self.fakeworkerpipe)
  777. self.fakeworker = None
  778. self.fakeworkerpipe = None
  779. def read_workers(self):
  780. self.workerpipe.read()
  781. if self.fakeworkerpipe:
  782. self.fakeworkerpipe.read()
  783. def active_fds(self):
  784. fds = []
  785. if self.workerpipe:
  786. fds.append(self.workerpipe.input)
  787. if self.fakeworkerpipe:
  788. fds.append(self.fakeworkerpipe.input)
  789. return fds
  790. def check_stamp_task(self, task, taskname = None, recurse = False, cache = None):
  791. def get_timestamp(f):
  792. try:
  793. if not os.access(f, os.F_OK):
  794. return None
  795. return os.stat(f)[stat.ST_MTIME]
  796. except:
  797. return None
  798. if self.stamppolicy == "perfile":
  799. fulldeptree = False
  800. else:
  801. fulldeptree = True
  802. stampwhitelist = []
  803. if self.stamppolicy == "whitelist":
  804. stampwhitelist = self.rqdata.stampfnwhitelist
  805. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  806. if taskname is None:
  807. taskname = self.rqdata.runq_task[task]
  808. stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  809. # If the stamp is missing its not current
  810. if not os.access(stampfile, os.F_OK):
  811. logger.debug(2, "Stampfile %s not available", stampfile)
  812. return False
  813. # If its a 'nostamp' task, it's not current
  814. taskdep = self.rqdata.dataCache.task_deps[fn]
  815. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  816. logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
  817. return False
  818. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  819. return True
  820. if cache is None:
  821. cache = {}
  822. iscurrent = True
  823. t1 = get_timestamp(stampfile)
  824. for dep in self.rqdata.runq_depends[task]:
  825. if iscurrent:
  826. fn2 = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[dep]]
  827. taskname2 = self.rqdata.runq_task[dep]
  828. stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2)
  829. stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCache, fn2)
  830. t2 = get_timestamp(stampfile2)
  831. t3 = get_timestamp(stampfile3)
  832. if t3 and t3 > t2:
  833. continue
  834. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  835. if not t2:
  836. logger.debug(2, 'Stampfile %s does not exist', stampfile2)
  837. iscurrent = False
  838. if t1 < t2:
  839. logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
  840. iscurrent = False
  841. if recurse and iscurrent:
  842. if dep in cache:
  843. iscurrent = cache[dep]
  844. if not iscurrent:
  845. logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
  846. else:
  847. iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
  848. cache[dep] = iscurrent
  849. if recurse:
  850. cache[task] = iscurrent
  851. return iscurrent
  852. def _execute_runqueue(self):
  853. """
  854. Run the tasks in a queue prepared by rqdata.prepare()
  855. Upon failure, optionally try to recover the build using any alternate providers
  856. (if the abort on failure configuration option isn't set)
  857. """
  858. retval = True
  859. if self.state is runQueuePrepare:
  860. self.rqexe = RunQueueExecuteDummy(self)
  861. if self.rqdata.prepare() == 0:
  862. self.state = runQueueComplete
  863. else:
  864. self.state = runQueueSceneInit
  865. # we are ready to run, see if any UI client needs the dependency info
  866. if bb.cooker.CookerFeatures.SEND_DEPENDS_TREE in self.cooker.featureset:
  867. depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
  868. bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
  869. if self.state is runQueueSceneInit:
  870. if self.cooker.configuration.dump_signatures:
  871. self.dump_signatures()
  872. else:
  873. self.start_worker()
  874. self.rqexe = RunQueueExecuteScenequeue(self)
  875. if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp]:
  876. self.dm.check(self)
  877. if self.state is runQueueSceneRun:
  878. retval = self.rqexe.execute()
  879. if self.state is runQueueRunInit:
  880. logger.info("Executing RunQueue Tasks")
  881. self.rqexe = RunQueueExecuteTasks(self)
  882. self.state = runQueueRunning
  883. if self.state is runQueueRunning:
  884. retval = self.rqexe.execute()
  885. if self.state is runQueueCleanUp:
  886. self.rqexe.finish()
  887. if self.state is runQueueComplete or self.state is runQueueFailed:
  888. self.teardown_workers()
  889. if self.rqexe.stats.failed:
  890. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
  891. else:
  892. # Let's avoid the word "failed" if nothing actually did
  893. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
  894. if self.state is runQueueFailed:
  895. if not self.rqdata.taskData.tryaltconfigs:
  896. raise bb.runqueue.TaskFailure(self.rqexe.failed_fnids)
  897. for fnid in self.rqexe.failed_fnids:
  898. self.rqdata.taskData.fail_fnid(fnid)
  899. self.rqdata.reset()
  900. if self.state is runQueueComplete:
  901. # All done
  902. return False
  903. # Loop
  904. return retval
  905. def execute_runqueue(self):
  906. # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
  907. try:
  908. return self._execute_runqueue()
  909. except bb.runqueue.TaskFailure:
  910. raise
  911. except SystemExit:
  912. raise
  913. except:
  914. logger.error("An uncaught exception occured in runqueue, please see the failure below:")
  915. try:
  916. self.teardown_workers()
  917. except:
  918. pass
  919. self.state = runQueueComplete
  920. raise
  921. def finish_runqueue(self, now = False):
  922. if not self.rqexe:
  923. return
  924. if now:
  925. self.rqexe.finish_now()
  926. else:
  927. self.rqexe.finish()
  928. def dump_signatures(self):
  929. self.state = runQueueComplete
  930. done = set()
  931. bb.note("Reparsing files to collect dependency data")
  932. for task in range(len(self.rqdata.runq_fnid)):
  933. if self.rqdata.runq_fnid[task] not in done:
  934. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  935. the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data)
  936. done.add(self.rqdata.runq_fnid[task])
  937. bb.parse.siggen.dump_sigs(self.rqdata.dataCache)
  938. return
  939. class RunQueueExecute:
  940. def __init__(self, rq):
  941. self.rq = rq
  942. self.cooker = rq.cooker
  943. self.cfgData = rq.cfgData
  944. self.rqdata = rq.rqdata
  945. self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS", True) or 1)
  946. self.scheduler = self.cfgData.getVar("BB_SCHEDULER", True) or "speed"
  947. self.runq_buildable = []
  948. self.runq_running = []
  949. self.runq_complete = []
  950. self.build_stamps = {}
  951. self.failed_fnids = []
  952. self.stampcache = {}
  953. rq.workerpipe.setrunqueueexec(self)
  954. if rq.fakeworkerpipe:
  955. rq.fakeworkerpipe.setrunqueueexec(self)
  956. def runqueue_process_waitpid(self, task, status):
  957. # self.build_stamps[pid] may not exist when use shared work directory.
  958. if task in self.build_stamps:
  959. del self.build_stamps[task]
  960. if status != 0:
  961. self.task_fail(task, status)
  962. else:
  963. self.task_complete(task)
  964. return True
  965. def finish_now(self):
  966. self.rq.worker.stdin.write("<finishnow></finishnow>")
  967. self.rq.worker.stdin.flush()
  968. if self.rq.fakeworker:
  969. self.rq.fakeworker.stdin.write("<finishnow></finishnow>")
  970. self.rq.fakeworker.stdin.flush()
  971. if len(self.failed_fnids) != 0:
  972. self.rq.state = runQueueFailed
  973. return
  974. self.rq.state = runQueueComplete
  975. return
  976. def finish(self):
  977. self.rq.state = runQueueCleanUp
  978. if self.stats.active > 0:
  979. bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
  980. self.rq.read_workers()
  981. return
  982. if len(self.failed_fnids) != 0:
  983. self.rq.state = runQueueFailed
  984. return
  985. self.rq.state = runQueueComplete
  986. return
  987. def check_dependencies(self, task, taskdeps, setscene = False):
  988. if not self.rq.depvalidate:
  989. return False
  990. taskdata = {}
  991. taskdeps.add(task)
  992. for dep in taskdeps:
  993. if setscene:
  994. depid = self.rqdata.runq_setscene[dep]
  995. else:
  996. depid = dep
  997. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[depid]]
  998. pn = self.rqdata.dataCache.pkg_fn[fn]
  999. taskname = self.rqdata.runq_task[depid]
  1000. taskdata[dep] = [pn, taskname, fn]
  1001. call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
  1002. locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
  1003. valid = bb.utils.better_eval(call, locs)
  1004. return valid
  1005. class RunQueueExecuteDummy(RunQueueExecute):
  1006. def __init__(self, rq):
  1007. self.rq = rq
  1008. self.stats = RunQueueStats(0)
  1009. def finish(self):
  1010. self.rq.state = runQueueComplete
  1011. return
  1012. class RunQueueExecuteTasks(RunQueueExecute):
  1013. def __init__(self, rq):
  1014. RunQueueExecute.__init__(self, rq)
  1015. self.stats = RunQueueStats(len(self.rqdata.runq_fnid))
  1016. self.stampcache = {}
  1017. initial_covered = self.rq.scenequeue_covered.copy()
  1018. # Mark initial buildable tasks
  1019. for task in xrange(self.stats.total):
  1020. self.runq_running.append(0)
  1021. self.runq_complete.append(0)
  1022. if len(self.rqdata.runq_depends[task]) == 0:
  1023. self.runq_buildable.append(1)
  1024. else:
  1025. self.runq_buildable.append(0)
  1026. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
  1027. self.rq.scenequeue_covered.add(task)
  1028. found = True
  1029. while found:
  1030. found = False
  1031. for task in xrange(self.stats.total):
  1032. if task in self.rq.scenequeue_covered:
  1033. continue
  1034. logger.debug(1, 'Considering %s (%s): %s' % (task, self.rqdata.get_user_idstring(task), str(self.rqdata.runq_revdeps[task])))
  1035. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
  1036. found = True
  1037. self.rq.scenequeue_covered.add(task)
  1038. logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
  1039. # Allow the metadata to elect for setscene tasks to run anyway
  1040. covered_remove = set()
  1041. if self.rq.setsceneverify:
  1042. invalidtasks = []
  1043. for task in xrange(len(self.rqdata.runq_task)):
  1044. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1045. taskname = self.rqdata.runq_task[task]
  1046. taskdep = self.rqdata.dataCache.task_deps[fn]
  1047. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1048. continue
  1049. if self.rq.check_stamp_task(task, taskname + "_setscene", cache=self.stampcache):
  1050. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1051. continue
  1052. if self.rq.check_stamp_task(task, taskname, recurse = True, cache=self.stampcache):
  1053. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1054. continue
  1055. invalidtasks.append(task)
  1056. call = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d, invalidtasks=invalidtasks)"
  1057. call2 = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d)"
  1058. locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : self.rqdata.runq_task, "fnids" : self.rqdata.runq_fnid, "fns" : self.rqdata.taskData.fn_index, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
  1059. # Backwards compatibility with older versions without invalidtasks
  1060. try:
  1061. covered_remove = bb.utils.better_eval(call, locs)
  1062. except TypeError:
  1063. covered_remove = bb.utils.better_eval(call2, locs)
  1064. def removecoveredtask(task):
  1065. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1066. taskname = self.rqdata.runq_task[task] + '_setscene'
  1067. bb.build.del_stamp(taskname, self.rqdata.dataCache, fn)
  1068. self.rq.scenequeue_covered.remove(task)
  1069. toremove = covered_remove
  1070. for task in toremove:
  1071. logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
  1072. while toremove:
  1073. covered_remove = []
  1074. for task in toremove:
  1075. removecoveredtask(task)
  1076. for deptask in self.rqdata.runq_depends[task]:
  1077. if deptask not in self.rq.scenequeue_covered:
  1078. continue
  1079. if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
  1080. continue
  1081. logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
  1082. covered_remove.append(deptask)
  1083. toremove = covered_remove
  1084. logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
  1085. event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData)
  1086. schedulers = self.get_schedulers()
  1087. for scheduler in schedulers:
  1088. if self.scheduler == scheduler.name:
  1089. self.sched = scheduler(self, self.rqdata)
  1090. logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
  1091. break
  1092. else:
  1093. bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
  1094. (self.scheduler, ", ".join(obj.name for obj in schedulers)))
  1095. def get_schedulers(self):
  1096. schedulers = set(obj for obj in globals().values()
  1097. if type(obj) is type and
  1098. issubclass(obj, RunQueueScheduler))
  1099. user_schedulers = self.cfgData.getVar("BB_SCHEDULERS", True)
  1100. if user_schedulers:
  1101. for sched in user_schedulers.split():
  1102. if not "." in sched:
  1103. bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
  1104. continue
  1105. modname, name = sched.rsplit(".", 1)
  1106. try:
  1107. module = __import__(modname, fromlist=(name,))
  1108. except ImportError as exc:
  1109. logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
  1110. raise SystemExit(1)
  1111. else:
  1112. schedulers.add(getattr(module, name))
  1113. return schedulers
  1114. def task_completeoutright(self, task):
  1115. """
  1116. Mark a task as completed
  1117. Look at the reverse dependencies and mark any task with
  1118. completed dependencies as buildable
  1119. """
  1120. self.runq_complete[task] = 1
  1121. for revdep in self.rqdata.runq_revdeps[task]:
  1122. if self.runq_running[revdep] == 1:
  1123. continue
  1124. if self.runq_buildable[revdep] == 1:
  1125. continue
  1126. alldeps = 1
  1127. for dep in self.rqdata.runq_depends[revdep]:
  1128. if self.runq_complete[dep] != 1:
  1129. alldeps = 0
  1130. if alldeps == 1:
  1131. self.runq_buildable[revdep] = 1
  1132. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
  1133. taskname = self.rqdata.runq_task[revdep]
  1134. logger.debug(1, "Marking task %s (%s, %s) as buildable", revdep, fn, taskname)
  1135. def task_complete(self, task):
  1136. self.stats.taskCompleted()
  1137. bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
  1138. self.task_completeoutright(task)
  1139. def task_fail(self, task, exitcode):
  1140. """
  1141. Called when a task has failed
  1142. Updates the state engine with the failure
  1143. """
  1144. self.stats.taskFailed()
  1145. fnid = self.rqdata.runq_fnid[task]
  1146. self.failed_fnids.append(fnid)
  1147. bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
  1148. if self.rqdata.taskData.abort:
  1149. self.rq.state = runQueueCleanUp
  1150. def task_skip(self, task, reason):
  1151. self.runq_running[task] = 1
  1152. self.runq_buildable[task] = 1
  1153. bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
  1154. self.task_completeoutright(task)
  1155. self.stats.taskCompleted()
  1156. self.stats.taskSkipped()
  1157. def execute(self):
  1158. """
  1159. Run the tasks in a queue prepared by rqdata.prepare()
  1160. """
  1161. self.rq.read_workers()
  1162. if self.stats.total == 0:
  1163. # nothing to do
  1164. self.rq.state = runQueueCleanUp
  1165. task = self.sched.next()
  1166. if task is not None:
  1167. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1168. taskname = self.rqdata.runq_task[task]
  1169. if task in self.rq.scenequeue_covered:
  1170. logger.debug(2, "Setscene covered task %s (%s)", task,
  1171. self.rqdata.get_user_idstring(task))
  1172. self.task_skip(task, "covered")
  1173. return True
  1174. if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
  1175. logger.debug(2, "Stamp current task %s (%s)", task,
  1176. self.rqdata.get_user_idstring(task))
  1177. self.task_skip(task, "existing")
  1178. return True
  1179. taskdep = self.rqdata.dataCache.task_deps[fn]
  1180. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1181. startevent = runQueueTaskStarted(task, self.stats, self.rq,
  1182. noexec=True)
  1183. bb.event.fire(startevent, self.cfgData)
  1184. self.runq_running[task] = 1
  1185. self.stats.taskActive()
  1186. bb.build.make_stamp(taskname, self.rqdata.dataCache, fn)
  1187. self.task_complete(task)
  1188. return True
  1189. else:
  1190. startevent = runQueueTaskStarted(task, self.stats, self.rq)
  1191. bb.event.fire(startevent, self.cfgData)
  1192. taskdep = self.rqdata.dataCache.task_deps[fn]
  1193. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
  1194. if not self.rq.fakeworker:
  1195. self.rq.start_fakeworker(self)
  1196. self.rq.fakeworker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1197. self.rq.fakeworker.stdin.flush()
  1198. else:
  1199. self.rq.worker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1200. self.rq.worker.stdin.flush()
  1201. self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  1202. self.runq_running[task] = 1
  1203. self.stats.taskActive()
  1204. if self.stats.active < self.number_tasks:
  1205. return True
  1206. if self.stats.active > 0:
  1207. self.rq.read_workers()
  1208. return self.rq.active_fds()
  1209. if len(self.failed_fnids) != 0:
  1210. self.rq.state = runQueueFailed
  1211. return True
  1212. # Sanity Checks
  1213. for task in xrange(self.stats.total):
  1214. if self.runq_buildable[task] == 0:
  1215. logger.error("Task %s never buildable!", task)
  1216. if self.runq_running[task] == 0:
  1217. logger.error("Task %s never ran!", task)
  1218. if self.runq_complete[task] == 0:
  1219. logger.error("Task %s never completed!", task)
  1220. self.rq.state = runQueueComplete
  1221. return True
  1222. class RunQueueExecuteScenequeue(RunQueueExecute):
  1223. def __init__(self, rq):
  1224. RunQueueExecute.__init__(self, rq)
  1225. self.scenequeue_covered = set()
  1226. self.scenequeue_notcovered = set()
  1227. self.scenequeue_notneeded = set()
  1228. # If we don't have any setscene functions, skip this step
  1229. if len(self.rqdata.runq_setscene) == 0:
  1230. rq.scenequeue_covered = set()
  1231. rq.state = runQueueRunInit
  1232. return
  1233. self.stats = RunQueueStats(len(self.rqdata.runq_setscene))
  1234. sq_revdeps = []
  1235. sq_revdeps_new = []
  1236. sq_revdeps_squash = []
  1237. self.sq_harddeps = []
  1238. # We need to construct a dependency graph for the setscene functions. Intermediate
  1239. # dependencies between the setscene tasks only complicate the code. This code
  1240. # therefore aims to collapse the huge runqueue dependency tree into a smaller one
  1241. # only containing the setscene functions.
  1242. for task in xrange(self.stats.total):
  1243. self.runq_running.append(0)
  1244. self.runq_complete.append(0)
  1245. self.runq_buildable.append(0)
  1246. # First process the chains up to the first setscene task.
  1247. endpoints = {}
  1248. for task in xrange(len(self.rqdata.runq_fnid)):
  1249. sq_revdeps.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1250. sq_revdeps_new.append(set())
  1251. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1252. endpoints[task] = set()
  1253. # Secondly process the chains between setscene tasks.
  1254. for task in self.rqdata.runq_setscene:
  1255. for dep in self.rqdata.runq_depends[task]:
  1256. if dep not in endpoints:
  1257. endpoints[dep] = set()
  1258. endpoints[dep].add(task)
  1259. def process_endpoints(endpoints):
  1260. newendpoints = {}
  1261. for point, task in endpoints.items():
  1262. tasks = set()
  1263. if task:
  1264. tasks |= task
  1265. if sq_revdeps_new[point]:
  1266. tasks |= sq_revdeps_new[point]
  1267. sq_revdeps_new[point] = set()
  1268. if point in self.rqdata.runq_setscene:
  1269. sq_revdeps_new[point] = tasks
  1270. for dep in self.rqdata.runq_depends[point]:
  1271. if point in sq_revdeps[dep]:
  1272. sq_revdeps[dep].remove(point)
  1273. if tasks:
  1274. sq_revdeps_new[dep] |= tasks
  1275. if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1276. newendpoints[dep] = task
  1277. if len(newendpoints) != 0:
  1278. process_endpoints(newendpoints)
  1279. process_endpoints(endpoints)
  1280. # Build a list of setscene tasks which as "unskippable"
  1281. # These are direct endpoints referenced by the build
  1282. endpoints2 = {}
  1283. sq_revdeps2 = []
  1284. sq_revdeps_new2 = []
  1285. def process_endpoints2(endpoints):
  1286. newendpoints = {}
  1287. for point, task in endpoints.items():
  1288. tasks = set([point])
  1289. if task:
  1290. tasks |= task
  1291. if sq_revdeps_new2[point]:
  1292. tasks |= sq_revdeps_new2[point]
  1293. sq_revdeps_new2[point] = set()
  1294. if point in self.rqdata.runq_setscene:
  1295. sq_revdeps_new2[point] = tasks
  1296. for dep in self.rqdata.runq_depends[point]:
  1297. if point in sq_revdeps2[dep]:
  1298. sq_revdeps2[dep].remove(point)
  1299. if tasks:
  1300. sq_revdeps_new2[dep] |= tasks
  1301. if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1302. newendpoints[dep] = tasks
  1303. if len(newendpoints) != 0:
  1304. process_endpoints2(newendpoints)
  1305. for task in xrange(len(self.rqdata.runq_fnid)):
  1306. sq_revdeps2.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1307. sq_revdeps_new2.append(set())
  1308. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1309. endpoints2[task] = set()
  1310. process_endpoints2(endpoints2)
  1311. self.unskippable = []
  1312. for task in self.rqdata.runq_setscene:
  1313. if sq_revdeps_new2[task]:
  1314. self.unskippable.append(self.rqdata.runq_setscene.index(task))
  1315. for task in xrange(len(self.rqdata.runq_fnid)):
  1316. if task in self.rqdata.runq_setscene:
  1317. deps = set()
  1318. for dep in sq_revdeps_new[task]:
  1319. deps.add(self.rqdata.runq_setscene.index(dep))
  1320. sq_revdeps_squash.append(deps)
  1321. elif len(sq_revdeps_new[task]) != 0:
  1322. bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
  1323. # Resolve setscene inter-task dependencies
  1324. # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
  1325. # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
  1326. for task in self.rqdata.runq_setscene:
  1327. realid = self.rqdata.taskData.gettask_id(self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]], self.rqdata.runq_task[task] + "_setscene", False)
  1328. idepends = self.rqdata.taskData.tasks_idepends[realid]
  1329. for (depid, idependtask) in idepends:
  1330. if depid not in self.rqdata.taskData.build_targets:
  1331. continue
  1332. depdata = self.rqdata.taskData.build_targets[depid][0]
  1333. if depdata is None:
  1334. continue
  1335. dep = self.rqdata.taskData.fn_index[depdata]
  1336. taskid = self.rqdata.get_task_id(self.rqdata.taskData.getfn_id(dep), idependtask.replace("_setscene", ""))
  1337. if taskid is None:
  1338. bb.msg.fatal("RunQueue", "Task %s:%s depends upon non-existent task %s:%s" % (self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realid]], self.rqdata.taskData.tasks_name[realid], dep, idependtask))
  1339. self.sq_harddeps.append(self.rqdata.runq_setscene.index(taskid))
  1340. sq_revdeps_squash[self.rqdata.runq_setscene.index(task)].add(self.rqdata.runq_setscene.index(taskid))
  1341. # Have to zero this to avoid circular dependencies
  1342. sq_revdeps_squash[self.rqdata.runq_setscene.index(taskid)] = set()
  1343. #for task in xrange(len(sq_revdeps_squash)):
  1344. # print "Task %s: %s.%s is %s " % (task, self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[self.rqdata.runq_setscene[task]]], self.rqdata.runq_task[self.rqdata.runq_setscene[task]] + "_setscene", sq_revdeps_squash[task])
  1345. self.sq_deps = []
  1346. self.sq_revdeps = sq_revdeps_squash
  1347. self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
  1348. for task in xrange(len(self.sq_revdeps)):
  1349. self.sq_deps.append(set())
  1350. for task in xrange(len(self.sq_revdeps)):
  1351. for dep in self.sq_revdeps[task]:
  1352. self.sq_deps[dep].add(task)
  1353. for task in xrange(len(self.sq_revdeps)):
  1354. if len(self.sq_revdeps[task]) == 0:
  1355. self.runq_buildable[task] = 1
  1356. if self.rq.hashvalidate:
  1357. sq_hash = []
  1358. sq_hashfn = []
  1359. sq_fn = []
  1360. sq_taskname = []
  1361. sq_task = []
  1362. noexec = []
  1363. stamppresent = []
  1364. for task in xrange(len(self.sq_revdeps)):
  1365. realtask = self.rqdata.runq_setscene[task]
  1366. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1367. taskname = self.rqdata.runq_task[realtask]
  1368. taskdep = self.rqdata.dataCache.task_deps[fn]
  1369. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1370. noexec.append(task)
  1371. self.task_skip(task)
  1372. bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
  1373. continue
  1374. if self.rq.check_stamp_task(realtask, taskname + "_setscene", cache=self.stampcache):
  1375. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1376. stamppresent.append(task)
  1377. self.task_skip(task)
  1378. continue
  1379. if self.rq.check_stamp_task(realtask, taskname, recurse = True, cache=self.stampcache):
  1380. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1381. stamppresent.append(task)
  1382. self.task_skip(task)
  1383. continue
  1384. sq_fn.append(fn)
  1385. sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
  1386. sq_hash.append(self.rqdata.runq_hash[realtask])
  1387. sq_taskname.append(taskname)
  1388. sq_task.append(task)
  1389. call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
  1390. locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
  1391. valid = bb.utils.better_eval(call, locs)
  1392. valid_new = stamppresent
  1393. for v in valid:
  1394. valid_new.append(sq_task[v])
  1395. for task in xrange(len(self.sq_revdeps)):
  1396. if task not in valid_new and task not in noexec:
  1397. realtask = self.rqdata.runq_setscene[task]
  1398. logger.debug(2, 'No package found, so skipping setscene task %s',
  1399. self.rqdata.get_user_idstring(realtask))
  1400. self.task_failoutright(task)
  1401. logger.info('Executing SetScene Tasks')
  1402. self.rq.state = runQueueSceneRun
  1403. def scenequeue_updatecounters(self, task, fail = False):
  1404. for dep in self.sq_deps[task]:
  1405. if fail and task in self.sq_harddeps:
  1406. continue
  1407. self.sq_revdeps2[dep].remove(task)
  1408. if len(self.sq_revdeps2[dep]) == 0:
  1409. self.runq_buildable[dep] = 1
  1410. def task_completeoutright(self, task):
  1411. """
  1412. Mark a task as completed
  1413. Look at the reverse dependencies and mark any task with
  1414. completed dependencies as buildable
  1415. """
  1416. index = self.rqdata.runq_setscene[task]
  1417. logger.debug(1, 'Found task %s which could be accelerated',
  1418. self.rqdata.get_user_idstring(index))
  1419. self.scenequeue_covered.add(task)
  1420. self.scenequeue_updatecounters(task)
  1421. def task_complete(self, task):
  1422. self.stats.taskCompleted()
  1423. bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
  1424. self.task_completeoutright(task)
  1425. def task_fail(self, task, result):
  1426. self.stats.taskFailed()
  1427. bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
  1428. self.scenequeue_notcovered.add(task)
  1429. self.scenequeue_updatecounters(task, True)
  1430. def task_failoutright(self, task):
  1431. self.runq_running[task] = 1
  1432. self.runq_buildable[task] = 1
  1433. self.stats.taskCompleted()
  1434. self.stats.taskSkipped()
  1435. index = self.rqdata.runq_setscene[task]
  1436. self.scenequeue_notcovered.add(task)
  1437. self.scenequeue_updatecounters(task, True)
  1438. def task_skip(self, task):
  1439. self.runq_running[task] = 1
  1440. self.runq_buildable[task] = 1
  1441. self.task_completeoutright(task)
  1442. self.stats.taskCompleted()
  1443. self.stats.taskSkipped()
  1444. def execute(self):
  1445. """
  1446. Run the tasks in a queue prepared by prepare_runqueue
  1447. """
  1448. self.rq.read_workers()
  1449. task = None
  1450. if self.stats.active < self.number_tasks:
  1451. # Find the next setscene to run
  1452. for nexttask in xrange(self.stats.total):
  1453. if self.runq_buildable[nexttask] == 1 and self.runq_running[nexttask] != 1:
  1454. if nexttask in self.unskippable:
  1455. logger.debug(2, "Setscene task %s is unskippable" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1456. if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
  1457. logger.debug(2, "Skipping setscene for task %s" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1458. self.task_skip(nexttask)
  1459. self.scenequeue_notneeded.add(nexttask)
  1460. return True
  1461. task = nexttask
  1462. break
  1463. if task is not None:
  1464. realtask = self.rqdata.runq_setscene[task]
  1465. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1466. taskname = self.rqdata.runq_task[realtask] + "_setscene"
  1467. if self.rq.check_stamp_task(realtask, self.rqdata.runq_task[realtask], recurse = True, cache=self.stampcache):
  1468. logger.debug(2, 'Stamp for underlying task %s(%s) is current, so skipping setscene variant',
  1469. task, self.rqdata.get_user_idstring(realtask))
  1470. self.task_failoutright(task)
  1471. return True
  1472. if self.cooker.configuration.force:
  1473. for target in self.rqdata.target_pairs:
  1474. if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
  1475. self.task_failoutright(task)
  1476. return True
  1477. if self.rq.check_stamp_task(realtask, taskname, cache=self.stampcache):
  1478. logger.debug(2, 'Setscene stamp current task %s(%s), so skip it and its dependencies',
  1479. task, self.rqdata.get_user_idstring(realtask))
  1480. self.task_skip(task)
  1481. return True
  1482. startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
  1483. bb.event.fire(startevent, self.cfgData)
  1484. taskdep = self.rqdata.dataCache.task_deps[fn]
  1485. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot']:
  1486. if not self.rq.fakeworker:
  1487. self.rq.start_fakeworker(self)
  1488. self.rq.fakeworker.stdin.write("<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1489. self.rq.fakeworker.stdin.flush()
  1490. else:
  1491. self.rq.worker.stdin.write("<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1492. self.rq.worker.stdin.flush()
  1493. self.runq_running[task] = 1
  1494. self.stats.taskActive()
  1495. if self.stats.active < self.number_tasks:
  1496. return True
  1497. if self.stats.active > 0:
  1498. self.rq.read_workers()
  1499. return self.rq.active_fds()
  1500. # Convert scenequeue_covered task numbers into full taskgraph ids
  1501. oldcovered = self.scenequeue_covered
  1502. self.rq.scenequeue_covered = set()
  1503. for task in oldcovered:
  1504. self.rq.scenequeue_covered.add(self.rqdata.runq_setscene[task])
  1505. self.rq.scenequeue_notcovered = set()
  1506. for task in self.scenequeue_notcovered:
  1507. self.rq.scenequeue_notcovered.add(self.rqdata.runq_setscene[task])
  1508. logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
  1509. self.rq.state = runQueueRunInit
  1510. return True
  1511. def runqueue_process_waitpid(self, task, status):
  1512. task = self.rq.rqdata.runq_setscene.index(task)
  1513. RunQueueExecute.runqueue_process_waitpid(self, task, status)
  1514. class TaskFailure(Exception):
  1515. """
  1516. Exception raised when a task in a runqueue fails
  1517. """
  1518. def __init__(self, x):
  1519. self.args = x
  1520. class runQueueExitWait(bb.event.Event):
  1521. """
  1522. Event when waiting for task processes to exit
  1523. """
  1524. def __init__(self, remain):
  1525. self.remain = remain
  1526. self.message = "Waiting for %s active tasks to finish" % remain
  1527. bb.event.Event.__init__(self)
  1528. class runQueueEvent(bb.event.Event):
  1529. """
  1530. Base runQueue event class
  1531. """
  1532. def __init__(self, task, stats, rq):
  1533. self.taskid = task
  1534. self.taskstring = rq.rqdata.get_user_idstring(task)
  1535. self.taskname = rq.rqdata.get_task_name(task)
  1536. self.taskfile = rq.rqdata.get_task_file(task)
  1537. self.taskhash = rq.rqdata.get_task_hash(task)
  1538. self.stats = stats.copy()
  1539. bb.event.Event.__init__(self)
  1540. class sceneQueueEvent(runQueueEvent):
  1541. """
  1542. Base sceneQueue event class
  1543. """
  1544. def __init__(self, task, stats, rq, noexec=False):
  1545. runQueueEvent.__init__(self, task, stats, rq)
  1546. realtask = rq.rqdata.runq_setscene[task]
  1547. self.taskstring = rq.rqdata.get_user_idstring(realtask, "_setscene")
  1548. self.taskname = rq.rqdata.get_task_name(realtask) + "_setscene"
  1549. self.taskfile = rq.rqdata.get_task_file(realtask)
  1550. self.taskhash = rq.rqdata.get_task_hash(task)
  1551. class runQueueTaskStarted(runQueueEvent):
  1552. """
  1553. Event notifing a task was started
  1554. """
  1555. def __init__(self, task, stats, rq, noexec=False):
  1556. runQueueEvent.__init__(self, task, stats, rq)
  1557. self.noexec = noexec
  1558. class sceneQueueTaskStarted(sceneQueueEvent):
  1559. """
  1560. Event notifing a setscene task was started
  1561. """
  1562. def __init__(self, task, stats, rq, noexec=False):
  1563. sceneQueueEvent.__init__(self, task, stats, rq)
  1564. self.noexec = noexec
  1565. class runQueueTaskFailed(runQueueEvent):
  1566. """
  1567. Event notifing a task failed
  1568. """
  1569. def __init__(self, task, stats, exitcode, rq):
  1570. runQueueEvent.__init__(self, task, stats, rq)
  1571. self.exitcode = exitcode
  1572. class sceneQueueTaskFailed(sceneQueueEvent):
  1573. """
  1574. Event notifing a setscene task failed
  1575. """
  1576. def __init__(self, task, stats, exitcode, rq):
  1577. sceneQueueEvent.__init__(self, task, stats, rq)
  1578. self.exitcode = exitcode
  1579. class runQueueTaskCompleted(runQueueEvent):
  1580. """
  1581. Event notifing a task completed
  1582. """
  1583. class sceneQueueTaskCompleted(sceneQueueEvent):
  1584. """
  1585. Event notifing a setscene task completed
  1586. """
  1587. class runQueueTaskSkipped(runQueueEvent):
  1588. """
  1589. Event notifing a task was skipped
  1590. """
  1591. def __init__(self, task, stats, rq, reason):
  1592. runQueueEvent.__init__(self, task, stats, rq)
  1593. self.reason = reason
  1594. class runQueuePipe():
  1595. """
  1596. Abstraction for a pipe between a worker thread and the server
  1597. """
  1598. def __init__(self, pipein, pipeout, d, rq):
  1599. self.input = pipein
  1600. if pipeout:
  1601. pipeout.close()
  1602. bb.utils.nonblockingfd(self.input)
  1603. self.queue = ""
  1604. self.d = d
  1605. self.rq = rq
  1606. def setrunqueueexec(self, rq):
  1607. self.rq = rq
  1608. def read(self):
  1609. start = len(self.queue)
  1610. try:
  1611. self.queue = self.queue + self.input.read(102400)
  1612. except (OSError, IOError) as e:
  1613. if e.errno != errno.EAGAIN:
  1614. raise
  1615. end = len(self.queue)
  1616. found = True
  1617. while found and len(self.queue):
  1618. found = False
  1619. index = self.queue.find("</event>")
  1620. while index != -1 and self.queue.startswith("<event>"):
  1621. event = pickle.loads(self.queue[7:index])
  1622. bb.event.fire_from_worker(event, self.d)
  1623. found = True
  1624. self.queue = self.queue[index+8:]
  1625. index = self.queue.find("</event>")
  1626. index = self.queue.find("</exitcode>")
  1627. while index != -1 and self.queue.startswith("<exitcode>"):
  1628. task, status = pickle.loads(self.queue[10:index])
  1629. self.rq.runqueue_process_waitpid(task, status)
  1630. found = True
  1631. self.queue = self.queue[index+11:]
  1632. index = self.queue.find("</exitcode>")
  1633. return (end > start)
  1634. def close(self):
  1635. while self.read():
  1636. continue
  1637. if len(self.queue) > 0:
  1638. print("Warning, worker left partial message: %s" % self.queue)
  1639. self.input.close()