runqueue.py 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. #!/usr/bin/env python
  2. # ex:ts=4:sw=4:sts=4:et
  3. # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
  4. """
  5. BitBake 'RunQueue' implementation
  6. Handles preparation and execution of a queue of tasks
  7. """
  8. # Copyright (C) 2006-2007 Richard Purdie
  9. #
  10. # This program is free software; you can redistribute it and/or modify
  11. # it under the terms of the GNU General Public License version 2 as
  12. # published by the Free Software Foundation.
  13. #
  14. # This program is distributed in the hope that it will be useful,
  15. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. # GNU General Public License for more details.
  18. #
  19. # You should have received a copy of the GNU General Public License along
  20. # with this program; if not, write to the Free Software Foundation, Inc.,
  21. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. import copy
  23. import os
  24. import sys
  25. import signal
  26. import stat
  27. import fcntl
  28. import errno
  29. import logging
  30. import bb
  31. from bb import msg, data, event
  32. from bb import monitordisk
  33. import subprocess
  34. try:
  35. import cPickle as pickle
  36. except ImportError:
  37. import pickle
  38. bblogger = logging.getLogger("BitBake")
  39. logger = logging.getLogger("BitBake.RunQueue")
  40. class RunQueueStats:
  41. """
  42. Holds statistics on the tasks handled by the associated runQueue
  43. """
  44. def __init__(self, total):
  45. self.completed = 0
  46. self.skipped = 0
  47. self.failed = 0
  48. self.active = 0
  49. self.total = total
  50. def copy(self):
  51. obj = self.__class__(self.total)
  52. obj.__dict__.update(self.__dict__)
  53. return obj
  54. def taskFailed(self):
  55. self.active = self.active - 1
  56. self.failed = self.failed + 1
  57. def taskCompleted(self, number = 1):
  58. self.active = self.active - number
  59. self.completed = self.completed + number
  60. def taskSkipped(self, number = 1):
  61. self.active = self.active + number
  62. self.skipped = self.skipped + number
  63. def taskActive(self):
  64. self.active = self.active + 1
  65. # These values indicate the next step due to be run in the
  66. # runQueue state machine
  67. runQueuePrepare = 2
  68. runQueueSceneInit = 3
  69. runQueueSceneRun = 4
  70. runQueueRunInit = 5
  71. runQueueRunning = 6
  72. runQueueFailed = 7
  73. runQueueCleanUp = 8
  74. runQueueComplete = 9
  75. class RunQueueScheduler(object):
  76. """
  77. Control the order tasks are scheduled in.
  78. """
  79. name = "basic"
  80. def __init__(self, runqueue, rqdata):
  81. """
  82. The default scheduler just returns the first buildable task (the
  83. priority map is sorted by task numer)
  84. """
  85. self.rq = runqueue
  86. self.rqdata = rqdata
  87. numTasks = len(self.rqdata.runq_fnid)
  88. self.prio_map = []
  89. self.prio_map.extend(range(numTasks))
  90. def next_buildable_task(self):
  91. """
  92. Return the id of the first task we find that is buildable
  93. """
  94. for tasknum in xrange(len(self.rqdata.runq_fnid)):
  95. taskid = self.prio_map[tasknum]
  96. if self.rq.runq_running[taskid] == 1:
  97. continue
  98. if self.rq.runq_buildable[taskid] == 1:
  99. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[taskid]]
  100. taskname = self.rqdata.runq_task[taskid]
  101. stamp = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  102. if stamp in self.rq.build_stamps.values():
  103. continue
  104. return taskid
  105. def next(self):
  106. """
  107. Return the id of the task we should build next
  108. """
  109. if self.rq.stats.active < self.rq.number_tasks:
  110. return self.next_buildable_task()
  111. class RunQueueSchedulerSpeed(RunQueueScheduler):
  112. """
  113. A scheduler optimised for speed. The priority map is sorted by task weight,
  114. heavier weighted tasks (tasks needed by the most other tasks) are run first.
  115. """
  116. name = "speed"
  117. def __init__(self, runqueue, rqdata):
  118. """
  119. The priority map is sorted by task weight.
  120. """
  121. self.rq = runqueue
  122. self.rqdata = rqdata
  123. sortweight = sorted(copy.deepcopy(self.rqdata.runq_weight))
  124. copyweight = copy.deepcopy(self.rqdata.runq_weight)
  125. self.prio_map = []
  126. for weight in sortweight:
  127. idx = copyweight.index(weight)
  128. self.prio_map.append(idx)
  129. copyweight[idx] = -1
  130. self.prio_map.reverse()
  131. class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
  132. """
  133. A scheduler optimised to complete .bb files are quickly as possible. The
  134. priority map is sorted by task weight, but then reordered so once a given
  135. .bb file starts to build, its completed as quickly as possible. This works
  136. well where disk space is at a premium and classes like OE's rm_work are in
  137. force.
  138. """
  139. name = "completion"
  140. def __init__(self, runqueue, rqdata):
  141. RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata)
  142. #FIXME - whilst this groups all fnids together it does not reorder the
  143. #fnid groups optimally.
  144. basemap = copy.deepcopy(self.prio_map)
  145. self.prio_map = []
  146. while (len(basemap) > 0):
  147. entry = basemap.pop(0)
  148. self.prio_map.append(entry)
  149. fnid = self.rqdata.runq_fnid[entry]
  150. todel = []
  151. for entry in basemap:
  152. entry_fnid = self.rqdata.runq_fnid[entry]
  153. if entry_fnid == fnid:
  154. todel.append(basemap.index(entry))
  155. self.prio_map.append(entry)
  156. todel.reverse()
  157. for idx in todel:
  158. del basemap[idx]
  159. class RunQueueData:
  160. """
  161. BitBake Run Queue implementation
  162. """
  163. def __init__(self, rq, cooker, cfgData, dataCache, taskData, targets):
  164. self.cooker = cooker
  165. self.dataCache = dataCache
  166. self.taskData = taskData
  167. self.targets = targets
  168. self.rq = rq
  169. self.warn_multi_bb = False
  170. self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST", True) or ""
  171. self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
  172. self.reset()
  173. def reset(self):
  174. self.runq_fnid = []
  175. self.runq_task = []
  176. self.runq_depends = []
  177. self.runq_revdeps = []
  178. self.runq_hash = []
  179. def runq_depends_names(self, ids):
  180. import re
  181. ret = []
  182. for id in self.runq_depends[ids]:
  183. nam = os.path.basename(self.get_user_idstring(id))
  184. nam = re.sub("_[^,]*,", ",", nam)
  185. ret.extend([nam])
  186. return ret
  187. def get_user_idstring(self, task, task_name_suffix = ""):
  188. fn = self.taskData.fn_index[self.runq_fnid[task]]
  189. taskname = self.runq_task[task] + task_name_suffix
  190. return "%s, %s" % (fn, taskname)
  191. def get_task_id(self, fnid, taskname):
  192. for listid in xrange(len(self.runq_fnid)):
  193. if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname:
  194. return listid
  195. return None
  196. def circular_depchains_handler(self, tasks):
  197. """
  198. Some tasks aren't buildable, likely due to circular dependency issues.
  199. Identify the circular dependencies and print them in a user readable format.
  200. """
  201. from copy import deepcopy
  202. valid_chains = []
  203. explored_deps = {}
  204. msgs = []
  205. def chain_reorder(chain):
  206. """
  207. Reorder a dependency chain so the lowest task id is first
  208. """
  209. lowest = 0
  210. new_chain = []
  211. for entry in xrange(len(chain)):
  212. if chain[entry] < chain[lowest]:
  213. lowest = entry
  214. new_chain.extend(chain[lowest:])
  215. new_chain.extend(chain[:lowest])
  216. return new_chain
  217. def chain_compare_equal(chain1, chain2):
  218. """
  219. Compare two dependency chains and see if they're the same
  220. """
  221. if len(chain1) != len(chain2):
  222. return False
  223. for index in xrange(len(chain1)):
  224. if chain1[index] != chain2[index]:
  225. return False
  226. return True
  227. def chain_array_contains(chain, chain_array):
  228. """
  229. Return True if chain_array contains chain
  230. """
  231. for ch in chain_array:
  232. if chain_compare_equal(ch, chain):
  233. return True
  234. return False
  235. def find_chains(taskid, prev_chain):
  236. prev_chain.append(taskid)
  237. total_deps = []
  238. total_deps.extend(self.runq_revdeps[taskid])
  239. for revdep in self.runq_revdeps[taskid]:
  240. if revdep in prev_chain:
  241. idx = prev_chain.index(revdep)
  242. # To prevent duplicates, reorder the chain to start with the lowest taskid
  243. # and search through an array of those we've already printed
  244. chain = prev_chain[idx:]
  245. new_chain = chain_reorder(chain)
  246. if not chain_array_contains(new_chain, valid_chains):
  247. valid_chains.append(new_chain)
  248. msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
  249. for dep in new_chain:
  250. msgs.append(" Task %s (%s) (dependent Tasks %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends_names(dep)))
  251. msgs.append("\n")
  252. if len(valid_chains) > 10:
  253. msgs.append("Aborted dependency loops search after 10 matches.\n")
  254. return msgs
  255. continue
  256. scan = False
  257. if revdep not in explored_deps:
  258. scan = True
  259. elif revdep in explored_deps[revdep]:
  260. scan = True
  261. else:
  262. for dep in prev_chain:
  263. if dep in explored_deps[revdep]:
  264. scan = True
  265. if scan:
  266. find_chains(revdep, copy.deepcopy(prev_chain))
  267. for dep in explored_deps[revdep]:
  268. if dep not in total_deps:
  269. total_deps.append(dep)
  270. explored_deps[taskid] = total_deps
  271. for task in tasks:
  272. find_chains(task, [])
  273. return msgs
  274. def calculate_task_weights(self, endpoints):
  275. """
  276. Calculate a number representing the "weight" of each task. Heavier weighted tasks
  277. have more dependencies and hence should be executed sooner for maximum speed.
  278. This function also sanity checks the task list finding tasks that are not
  279. possible to execute due to circular dependencies.
  280. """
  281. numTasks = len(self.runq_fnid)
  282. weight = []
  283. deps_left = []
  284. task_done = []
  285. for listid in xrange(numTasks):
  286. task_done.append(False)
  287. weight.append(0)
  288. deps_left.append(len(self.runq_revdeps[listid]))
  289. for listid in endpoints:
  290. weight[listid] = 1
  291. task_done[listid] = True
  292. while True:
  293. next_points = []
  294. for listid in endpoints:
  295. for revdep in self.runq_depends[listid]:
  296. weight[revdep] = weight[revdep] + weight[listid]
  297. deps_left[revdep] = deps_left[revdep] - 1
  298. if deps_left[revdep] == 0:
  299. next_points.append(revdep)
  300. task_done[revdep] = True
  301. endpoints = next_points
  302. if len(next_points) == 0:
  303. break
  304. # Circular dependency sanity check
  305. problem_tasks = []
  306. for task in xrange(numTasks):
  307. if task_done[task] is False or deps_left[task] != 0:
  308. problem_tasks.append(task)
  309. logger.debug(2, "Task %s (%s) is not buildable", task, self.get_user_idstring(task))
  310. logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[task], deps_left[task])
  311. if problem_tasks:
  312. message = "Unbuildable tasks were found.\n"
  313. message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
  314. message = message + "Identifying dependency loops (this may take a short while)...\n"
  315. logger.error(message)
  316. msgs = self.circular_depchains_handler(problem_tasks)
  317. message = "\n"
  318. for msg in msgs:
  319. message = message + msg
  320. bb.msg.fatal("RunQueue", message)
  321. return weight
  322. def prepare(self):
  323. """
  324. Turn a set of taskData into a RunQueue and compute data needed
  325. to optimise the execution order.
  326. """
  327. runq_build = []
  328. recursivetasks = {}
  329. recursivetasksselfref = set()
  330. taskData = self.taskData
  331. if len(taskData.tasks_name) == 0:
  332. # Nothing to do
  333. return 0
  334. logger.info("Preparing runqueue")
  335. # Step A - Work out a list of tasks to run
  336. #
  337. # Taskdata gives us a list of possible providers for every build and run
  338. # target ordered by priority. It also gives information on each of those
  339. # providers.
  340. #
  341. # To create the actual list of tasks to execute we fix the list of
  342. # providers and then resolve the dependencies into task IDs. This
  343. # process is repeated for each type of dependency (tdepends, deptask,
  344. # rdeptast, recrdeptask, idepends).
  345. def add_build_dependencies(depids, tasknames, depends):
  346. for depid in depids:
  347. # Won't be in build_targets if ASSUME_PROVIDED
  348. if depid not in taskData.build_targets:
  349. continue
  350. depdata = taskData.build_targets[depid][0]
  351. if depdata is None:
  352. continue
  353. for taskname in tasknames:
  354. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  355. if taskid is not None:
  356. depends.add(taskid)
  357. def add_runtime_dependencies(depids, tasknames, depends):
  358. for depid in depids:
  359. if depid not in taskData.run_targets:
  360. continue
  361. depdata = taskData.run_targets[depid][0]
  362. if depdata is None:
  363. continue
  364. for taskname in tasknames:
  365. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  366. if taskid is not None:
  367. depends.add(taskid)
  368. def add_resolved_dependencies(depids, tasknames, depends):
  369. for depid in depids:
  370. for taskname in tasknames:
  371. taskid = taskData.gettask_id_fromfnid(depid, taskname)
  372. if taskid is not None:
  373. depends.add(taskid)
  374. for task in xrange(len(taskData.tasks_name)):
  375. depends = set()
  376. fnid = taskData.tasks_fnid[task]
  377. fn = taskData.fn_index[fnid]
  378. task_deps = self.dataCache.task_deps[fn]
  379. logger.debug(2, "Processing %s:%s", fn, taskData.tasks_name[task])
  380. if fnid not in taskData.failed_fnids:
  381. # Resolve task internal dependencies
  382. #
  383. # e.g. addtask before X after Y
  384. depends = set(taskData.tasks_tdepends[task])
  385. # Resolve 'deptask' dependencies
  386. #
  387. # e.g. do_sometask[deptask] = "do_someothertask"
  388. # (makes sure sometask runs after someothertask of all DEPENDS)
  389. if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
  390. tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
  391. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  392. # Resolve 'rdeptask' dependencies
  393. #
  394. # e.g. do_sometask[rdeptask] = "do_someothertask"
  395. # (makes sure sometask runs after someothertask of all RDEPENDS)
  396. if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
  397. tasknames = task_deps['rdeptask'][taskData.tasks_name[task]].split()
  398. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  399. # Resolve inter-task dependencies
  400. #
  401. # e.g. do_sometask[depends] = "targetname:do_someothertask"
  402. # (makes sure sometask runs after targetname's someothertask)
  403. idepends = taskData.tasks_idepends[task]
  404. for (depid, idependtask) in idepends:
  405. if depid in taskData.build_targets and not depid in taskData.failed_deps:
  406. # Won't be in build_targets if ASSUME_PROVIDED
  407. depdata = taskData.build_targets[depid][0]
  408. if depdata is not None:
  409. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  410. if taskid is None:
  411. bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  412. depends.add(taskid)
  413. irdepends = taskData.tasks_irdepends[task]
  414. for (depid, idependtask) in irdepends:
  415. if depid in taskData.run_targets:
  416. # Won't be in run_targets if ASSUME_PROVIDED
  417. depdata = taskData.run_targets[depid][0]
  418. if depdata is not None:
  419. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  420. if taskid is None:
  421. bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  422. depends.add(taskid)
  423. # Resolve recursive 'recrdeptask' dependencies (Part A)
  424. #
  425. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  426. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  427. # We cover the recursive part of the dependencies below
  428. if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
  429. tasknames = task_deps['recrdeptask'][taskData.tasks_name[task]].split()
  430. recursivetasks[task] = tasknames
  431. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  432. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  433. if taskData.tasks_name[task] in tasknames:
  434. recursivetasksselfref.add(task)
  435. self.runq_fnid.append(taskData.tasks_fnid[task])
  436. self.runq_task.append(taskData.tasks_name[task])
  437. self.runq_depends.append(depends)
  438. self.runq_revdeps.append(set())
  439. self.runq_hash.append("")
  440. runq_build.append(0)
  441. # Resolve recursive 'recrdeptask' dependencies (Part B)
  442. #
  443. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  444. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  445. # We need to do this separately since we need all of self.runq_depends to be complete before this is processed
  446. extradeps = {}
  447. for task in recursivetasks:
  448. extradeps[task] = set(self.runq_depends[task])
  449. tasknames = recursivetasks[task]
  450. seendeps = set()
  451. seenfnid = []
  452. def generate_recdeps(t):
  453. newdeps = set()
  454. add_resolved_dependencies([taskData.tasks_fnid[t]], tasknames, newdeps)
  455. extradeps[task].update(newdeps)
  456. seendeps.add(t)
  457. newdeps.add(t)
  458. for i in newdeps:
  459. for n in self.runq_depends[i]:
  460. if n not in seendeps:
  461. generate_recdeps(n)
  462. generate_recdeps(task)
  463. # Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
  464. for task in recursivetasks:
  465. extradeps[task].difference_update(recursivetasksselfref)
  466. for task in xrange(len(taskData.tasks_name)):
  467. # Add in extra dependencies
  468. if task in extradeps:
  469. self.runq_depends[task] = extradeps[task]
  470. # Remove all self references
  471. if task in self.runq_depends[task]:
  472. logger.debug(2, "Task %s (%s %s) contains self reference! %s", task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], self.runq_depends[task])
  473. self.runq_depends[task].remove(task)
  474. # Step B - Mark all active tasks
  475. #
  476. # Start with the tasks we were asked to run and mark all dependencies
  477. # as active too. If the task is to be 'forced', clear its stamp. Once
  478. # all active tasks are marked, prune the ones we don't need.
  479. logger.verbose("Marking Active Tasks")
  480. def mark_active(listid, depth):
  481. """
  482. Mark an item as active along with its depends
  483. (calls itself recursively)
  484. """
  485. if runq_build[listid] == 1:
  486. return
  487. runq_build[listid] = 1
  488. depends = self.runq_depends[listid]
  489. for depend in depends:
  490. mark_active(depend, depth+1)
  491. self.target_pairs = []
  492. for target in self.targets:
  493. targetid = taskData.getbuild_id(target[0])
  494. if targetid not in taskData.build_targets:
  495. continue
  496. if targetid in taskData.failed_deps:
  497. continue
  498. fnid = taskData.build_targets[targetid][0]
  499. fn = taskData.fn_index[fnid]
  500. self.target_pairs.append((fn, target[1]))
  501. if fnid in taskData.failed_fnids:
  502. continue
  503. if target[1] not in taskData.tasks_lookup[fnid]:
  504. bb.msg.fatal("RunQueue", "Task %s does not exist for target %s" % (target[1], target[0]))
  505. listid = taskData.tasks_lookup[fnid][target[1]]
  506. mark_active(listid, 1)
  507. # Step C - Prune all inactive tasks
  508. #
  509. # Once all active tasks are marked, prune the ones we don't need.
  510. maps = []
  511. delcount = 0
  512. for listid in xrange(len(self.runq_fnid)):
  513. if runq_build[listid-delcount] == 1:
  514. maps.append(listid-delcount)
  515. else:
  516. del self.runq_fnid[listid-delcount]
  517. del self.runq_task[listid-delcount]
  518. del self.runq_depends[listid-delcount]
  519. del runq_build[listid-delcount]
  520. del self.runq_revdeps[listid-delcount]
  521. del self.runq_hash[listid-delcount]
  522. delcount = delcount + 1
  523. maps.append(-1)
  524. #
  525. # Step D - Sanity checks and computation
  526. #
  527. # Check to make sure we still have tasks to run
  528. if len(self.runq_fnid) == 0:
  529. if not taskData.abort:
  530. bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
  531. else:
  532. bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
  533. logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runq_fnid))
  534. # Remap the dependencies to account for the deleted tasks
  535. # Check we didn't delete a task we depend on
  536. for listid in xrange(len(self.runq_fnid)):
  537. newdeps = []
  538. origdeps = self.runq_depends[listid]
  539. for origdep in origdeps:
  540. if maps[origdep] == -1:
  541. bb.msg.fatal("RunQueue", "Invalid mapping - Should never happen!")
  542. newdeps.append(maps[origdep])
  543. self.runq_depends[listid] = set(newdeps)
  544. logger.verbose("Assign Weightings")
  545. # Generate a list of reverse dependencies to ease future calculations
  546. for listid in xrange(len(self.runq_fnid)):
  547. for dep in self.runq_depends[listid]:
  548. self.runq_revdeps[dep].add(listid)
  549. # Identify tasks at the end of dependency chains
  550. # Error on circular dependency loops (length two)
  551. endpoints = []
  552. for listid in xrange(len(self.runq_fnid)):
  553. revdeps = self.runq_revdeps[listid]
  554. if len(revdeps) == 0:
  555. endpoints.append(listid)
  556. for dep in revdeps:
  557. if dep in self.runq_depends[listid]:
  558. #self.dump_data(taskData)
  559. bb.msg.fatal("RunQueue", "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
  560. logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
  561. # Calculate task weights
  562. # Check of higher length circular dependencies
  563. self.runq_weight = self.calculate_task_weights(endpoints)
  564. # Sanity Check - Check for multiple tasks building the same provider
  565. prov_list = {}
  566. seen_fn = []
  567. for task in xrange(len(self.runq_fnid)):
  568. fn = taskData.fn_index[self.runq_fnid[task]]
  569. if fn in seen_fn:
  570. continue
  571. seen_fn.append(fn)
  572. for prov in self.dataCache.fn_provides[fn]:
  573. if prov not in prov_list:
  574. prov_list[prov] = [fn]
  575. elif fn not in prov_list[prov]:
  576. prov_list[prov].append(fn)
  577. for prov in prov_list:
  578. if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
  579. msg = "Multiple .bb files are due to be built which each provide %s (%s)." % (prov, " ".join(prov_list[prov]))
  580. if self.warn_multi_bb:
  581. logger.warn(msg)
  582. else:
  583. msg += "\n This usually means one provides something the other doesn't and should."
  584. logger.error(msg)
  585. # Create a whitelist usable by the stamp checks
  586. stampfnwhitelist = []
  587. for entry in self.stampwhitelist.split():
  588. entryid = self.taskData.getbuild_id(entry)
  589. if entryid not in self.taskData.build_targets:
  590. continue
  591. fnid = self.taskData.build_targets[entryid][0]
  592. fn = self.taskData.fn_index[fnid]
  593. stampfnwhitelist.append(fn)
  594. self.stampfnwhitelist = stampfnwhitelist
  595. # Iterate over the task list looking for tasks with a 'setscene' function
  596. self.runq_setscene = []
  597. if not self.cooker.configuration.nosetscene:
  598. for task in range(len(self.runq_fnid)):
  599. setscene = taskData.gettask_id(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task] + "_setscene", False)
  600. if not setscene:
  601. continue
  602. self.runq_setscene.append(task)
  603. def invalidate_task(fn, taskname, error_nostamp):
  604. taskdep = self.dataCache.task_deps[fn]
  605. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  606. if error_nostamp:
  607. bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
  608. else:
  609. bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
  610. else:
  611. logger.verbose("Invalidate task %s, %s", taskname, fn)
  612. bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn)
  613. # Invalidate task if force mode active
  614. if self.cooker.configuration.force:
  615. for (fn, target) in self.target_pairs:
  616. invalidate_task(fn, target, False)
  617. # Invalidate task if invalidate mode active
  618. if self.cooker.configuration.invalidate_stamp:
  619. for (fn, target) in self.target_pairs:
  620. for st in self.cooker.configuration.invalidate_stamp.split(','):
  621. invalidate_task(fn, "do_%s" % st, True)
  622. # Interate over the task list and call into the siggen code
  623. dealtwith = set()
  624. todeal = set(range(len(self.runq_fnid)))
  625. while len(todeal) > 0:
  626. for task in todeal.copy():
  627. if len(self.runq_depends[task] - dealtwith) == 0:
  628. dealtwith.add(task)
  629. todeal.remove(task)
  630. procdep = []
  631. for dep in self.runq_depends[task]:
  632. procdep.append(self.taskData.fn_index[self.runq_fnid[dep]] + "." + self.runq_task[dep])
  633. self.runq_hash[task] = bb.parse.siggen.get_taskhash(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task], procdep, self.dataCache)
  634. self.hashes = {}
  635. self.hash_deps = {}
  636. for task in xrange(len(self.runq_fnid)):
  637. identifier = '%s.%s' % (self.taskData.fn_index[self.runq_fnid[task]],
  638. self.runq_task[task])
  639. self.hashes[identifier] = self.runq_hash[task]
  640. deps = []
  641. for dep in self.runq_depends[task]:
  642. depidentifier = '%s.%s' % (self.taskData.fn_index[self.runq_fnid[dep]],
  643. self.runq_task[dep])
  644. deps.append(depidentifier)
  645. self.hash_deps[identifier] = deps
  646. return len(self.runq_fnid)
  647. def dump_data(self, taskQueue):
  648. """
  649. Dump some debug information on the internal data structures
  650. """
  651. logger.debug(3, "run_tasks:")
  652. for task in xrange(len(self.rqdata.runq_task)):
  653. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  654. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  655. self.rqdata.runq_task[task],
  656. self.rqdata.runq_weight[task],
  657. self.rqdata.runq_depends[task],
  658. self.rqdata.runq_revdeps[task])
  659. logger.debug(3, "sorted_tasks:")
  660. for task1 in xrange(len(self.rqdata.runq_task)):
  661. if task1 in self.prio_map:
  662. task = self.prio_map[task1]
  663. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  664. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  665. self.rqdata.runq_task[task],
  666. self.rqdata.runq_weight[task],
  667. self.rqdata.runq_depends[task],
  668. self.rqdata.runq_revdeps[task])
  669. class RunQueue:
  670. def __init__(self, cooker, cfgData, dataCache, taskData, targets):
  671. self.cooker = cooker
  672. self.cfgData = cfgData
  673. self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets)
  674. self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile"
  675. self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None
  676. self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION", True) or None
  677. self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID", True) or None
  678. self.state = runQueuePrepare
  679. # For disk space monitor
  680. self.dm = monitordisk.diskMonitor(cfgData)
  681. self.rqexe = None
  682. self.worker = None
  683. self.workerpipe = None
  684. self.fakeworker = None
  685. self.fakeworkerpipe = None
  686. def _start_worker(self, fakeroot = False, rqexec = None):
  687. logger.debug(1, "Starting bitbake-worker")
  688. if fakeroot:
  689. fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True)
  690. fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split()
  691. env = os.environ.copy()
  692. for key, value in (var.split('=') for var in fakerootenv):
  693. env[key] = value
  694. worker = subprocess.Popen([fakerootcmd, "bitbake-worker", "decafbad"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
  695. else:
  696. worker = subprocess.Popen(["bitbake-worker", "decafbad"], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
  697. bb.utils.nonblockingfd(worker.stdout)
  698. workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, rqexec)
  699. workerdata = {
  700. "taskdeps" : self.rqdata.dataCache.task_deps,
  701. "fakerootenv" : self.rqdata.dataCache.fakerootenv,
  702. "fakerootdirs" : self.rqdata.dataCache.fakerootdirs,
  703. "fakerootnoenv" : self.rqdata.dataCache.fakerootnoenv,
  704. "hashes" : self.rqdata.hashes,
  705. "hash_deps" : self.rqdata.hash_deps,
  706. "sigchecksums" : bb.parse.siggen.file_checksum_values,
  707. "runq_hash" : self.rqdata.runq_hash,
  708. "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
  709. "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
  710. "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
  711. "logdefaultdomain" : bb.msg.loggerDefaultDomains,
  712. }
  713. worker.stdin.write("<cookerconfig>" + pickle.dumps(self.cooker.configuration) + "</cookerconfig>")
  714. worker.stdin.write("<workerdata>" + pickle.dumps(workerdata) + "</workerdata>")
  715. worker.stdin.flush()
  716. return worker, workerpipe
  717. def _teardown_worker(self, worker, workerpipe):
  718. if not worker:
  719. return
  720. logger.debug(1, "Teardown for bitbake-worker")
  721. worker.stdin.write("<quit></quit>")
  722. worker.stdin.flush()
  723. while worker.returncode is None:
  724. workerpipe.read()
  725. worker.poll()
  726. while workerpipe.read():
  727. continue
  728. workerpipe.close()
  729. def start_worker(self):
  730. if self.worker:
  731. self.teardown_workers()
  732. self.worker, self.workerpipe = self._start_worker()
  733. def start_fakeworker(self, rqexec):
  734. if not self.fakeworker:
  735. self.fakeworker, self.fakeworkerpipe = self._start_worker(True, rqexec)
  736. def teardown_workers(self):
  737. self._teardown_worker(self.worker, self.workerpipe)
  738. self.worker = None
  739. self.workerpipe = None
  740. self._teardown_worker(self.fakeworker, self.fakeworkerpipe)
  741. self.fakeworker = None
  742. self.fakeworkerpipe = None
  743. def read_workers(self):
  744. self.workerpipe.read()
  745. if self.fakeworkerpipe:
  746. self.fakeworkerpipe.read()
  747. def check_stamp_task(self, task, taskname = None, recurse = False, cache = None):
  748. def get_timestamp(f):
  749. try:
  750. if not os.access(f, os.F_OK):
  751. return None
  752. return os.stat(f)[stat.ST_MTIME]
  753. except:
  754. return None
  755. if self.stamppolicy == "perfile":
  756. fulldeptree = False
  757. else:
  758. fulldeptree = True
  759. stampwhitelist = []
  760. if self.stamppolicy == "whitelist":
  761. stampwhitelist = self.rqdata.stampfnwhitelist
  762. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  763. if taskname is None:
  764. taskname = self.rqdata.runq_task[task]
  765. stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  766. # If the stamp is missing its not current
  767. if not os.access(stampfile, os.F_OK):
  768. logger.debug(2, "Stampfile %s not available", stampfile)
  769. return False
  770. # If its a 'nostamp' task, it's not current
  771. taskdep = self.rqdata.dataCache.task_deps[fn]
  772. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  773. logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
  774. return False
  775. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  776. return True
  777. if cache is None:
  778. cache = {}
  779. iscurrent = True
  780. t1 = get_timestamp(stampfile)
  781. for dep in self.rqdata.runq_depends[task]:
  782. if iscurrent:
  783. fn2 = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[dep]]
  784. taskname2 = self.rqdata.runq_task[dep]
  785. stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2)
  786. stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCache, fn2)
  787. t2 = get_timestamp(stampfile2)
  788. t3 = get_timestamp(stampfile3)
  789. if t3 and t3 > t2:
  790. continue
  791. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  792. if not t2:
  793. logger.debug(2, 'Stampfile %s does not exist', stampfile2)
  794. iscurrent = False
  795. if t1 < t2:
  796. logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
  797. iscurrent = False
  798. if recurse and iscurrent:
  799. if dep in cache:
  800. iscurrent = cache[dep]
  801. if not iscurrent:
  802. logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
  803. else:
  804. iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
  805. cache[dep] = iscurrent
  806. if recurse:
  807. cache[task] = iscurrent
  808. return iscurrent
  809. def _execute_runqueue(self):
  810. """
  811. Run the tasks in a queue prepared by rqdata.prepare()
  812. Upon failure, optionally try to recover the build using any alternate providers
  813. (if the abort on failure configuration option isn't set)
  814. """
  815. retval = 0.5
  816. if self.state is runQueuePrepare:
  817. self.rqexe = RunQueueExecuteDummy(self)
  818. if self.rqdata.prepare() == 0:
  819. self.state = runQueueComplete
  820. else:
  821. self.state = runQueueSceneInit
  822. if self.state is runQueueSceneInit:
  823. if self.cooker.configuration.dump_signatures:
  824. self.dump_signatures()
  825. else:
  826. self.start_worker()
  827. self.rqexe = RunQueueExecuteScenequeue(self)
  828. if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp]:
  829. self.dm.check(self)
  830. if self.state is runQueueSceneRun:
  831. retval = self.rqexe.execute()
  832. if self.state is runQueueRunInit:
  833. logger.info("Executing RunQueue Tasks")
  834. self.rqexe = RunQueueExecuteTasks(self)
  835. self.state = runQueueRunning
  836. if self.state is runQueueRunning:
  837. retval = self.rqexe.execute()
  838. if self.state is runQueueCleanUp:
  839. self.rqexe.finish()
  840. if self.state is runQueueComplete or self.state is runQueueFailed:
  841. self.teardown_workers()
  842. if self.rqexe.stats.failed:
  843. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
  844. else:
  845. # Let's avoid the word "failed" if nothing actually did
  846. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
  847. if self.state is runQueueFailed:
  848. if not self.rqdata.taskData.tryaltconfigs:
  849. raise bb.runqueue.TaskFailure(self.rqexe.failed_fnids)
  850. for fnid in self.rqexe.failed_fnids:
  851. self.rqdata.taskData.fail_fnid(fnid)
  852. self.rqdata.reset()
  853. if self.state is runQueueComplete:
  854. # All done
  855. return False
  856. # Loop
  857. return retval
  858. def execute_runqueue(self):
  859. # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
  860. try:
  861. return self._execute_runqueue()
  862. except bb.runqueue.TaskFailure:
  863. raise
  864. except SystemExit:
  865. raise
  866. except:
  867. logger.error("An uncaught exception occured in runqueue, please see the failure below:")
  868. try:
  869. self.teardown_workers()
  870. except:
  871. pass
  872. self.state = runQueueComplete
  873. raise
  874. def finish_runqueue(self, now = False):
  875. if not self.rqexe:
  876. return
  877. if now:
  878. self.rqexe.finish_now()
  879. else:
  880. self.rqexe.finish()
  881. def dump_signatures(self):
  882. self.state = runQueueComplete
  883. done = set()
  884. bb.note("Reparsing files to collect dependency data")
  885. for task in range(len(self.rqdata.runq_fnid)):
  886. if self.rqdata.runq_fnid[task] not in done:
  887. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  888. the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data)
  889. done.add(self.rqdata.runq_fnid[task])
  890. bb.parse.siggen.dump_sigs(self.rqdata.dataCache)
  891. return
  892. class RunQueueExecute:
  893. def __init__(self, rq):
  894. self.rq = rq
  895. self.cooker = rq.cooker
  896. self.cfgData = rq.cfgData
  897. self.rqdata = rq.rqdata
  898. self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS", True) or 1)
  899. self.scheduler = self.cfgData.getVar("BB_SCHEDULER", True) or "speed"
  900. self.runq_buildable = []
  901. self.runq_running = []
  902. self.runq_complete = []
  903. self.build_stamps = {}
  904. self.failed_fnids = []
  905. self.stampcache = {}
  906. rq.workerpipe.setrunqueueexec(self)
  907. if rq.fakeworkerpipe:
  908. rq.fakeworkerpipe.setrunqueueexec(self)
  909. def runqueue_process_waitpid(self, task, status):
  910. # self.build_stamps[pid] may not exist when use shared work directory.
  911. if task in self.build_stamps:
  912. del self.build_stamps[task]
  913. if status != 0:
  914. self.task_fail(task, status)
  915. else:
  916. self.task_complete(task)
  917. return True
  918. def finish_now(self):
  919. self.rq.worker.stdin.write("<finishnow></finishnow>")
  920. self.rq.worker.stdin.flush()
  921. if self.rq.fakeworker:
  922. self.rq.fakeworker.stdin.write("<finishnow></finishnow>")
  923. self.rq.fakeworker.stdin.flush()
  924. if len(self.failed_fnids) != 0:
  925. self.rq.state = runQueueFailed
  926. return
  927. self.rq.state = runQueueComplete
  928. return
  929. def finish(self):
  930. self.rq.state = runQueueCleanUp
  931. if self.stats.active > 0:
  932. bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
  933. self.rq.read_workers()
  934. return
  935. if len(self.failed_fnids) != 0:
  936. self.rq.state = runQueueFailed
  937. return
  938. self.rq.state = runQueueComplete
  939. return
  940. def check_dependencies(self, task, taskdeps, setscene = False):
  941. if not self.rq.depvalidate:
  942. return False
  943. taskdata = {}
  944. taskdeps.add(task)
  945. for dep in taskdeps:
  946. if setscene:
  947. depid = self.rqdata.runq_setscene[dep]
  948. else:
  949. depid = dep
  950. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[depid]]
  951. pn = self.rqdata.dataCache.pkg_fn[fn]
  952. taskname = self.rqdata.runq_task[depid]
  953. taskdata[dep] = [pn, taskname, fn]
  954. call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
  955. locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
  956. valid = bb.utils.better_eval(call, locs)
  957. return valid
  958. class RunQueueExecuteDummy(RunQueueExecute):
  959. def __init__(self, rq):
  960. self.rq = rq
  961. self.stats = RunQueueStats(0)
  962. def finish(self):
  963. self.rq.state = runQueueComplete
  964. return
  965. class RunQueueExecuteTasks(RunQueueExecute):
  966. def __init__(self, rq):
  967. RunQueueExecute.__init__(self, rq)
  968. self.stats = RunQueueStats(len(self.rqdata.runq_fnid))
  969. self.stampcache = {}
  970. # Mark initial buildable tasks
  971. for task in xrange(self.stats.total):
  972. self.runq_running.append(0)
  973. self.runq_complete.append(0)
  974. if len(self.rqdata.runq_depends[task]) == 0:
  975. self.runq_buildable.append(1)
  976. else:
  977. self.runq_buildable.append(0)
  978. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
  979. self.rq.scenequeue_covered.add(task)
  980. found = True
  981. while found:
  982. found = False
  983. for task in xrange(self.stats.total):
  984. if task in self.rq.scenequeue_covered:
  985. continue
  986. logger.debug(1, 'Considering %s (%s): %s' % (task, self.rqdata.get_user_idstring(task), str(self.rqdata.runq_revdeps[task])))
  987. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
  988. found = True
  989. self.rq.scenequeue_covered.add(task)
  990. logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
  991. # Allow the metadata to elect for setscene tasks to run anyway
  992. covered_remove = set()
  993. if self.rq.setsceneverify:
  994. invalidtasks = []
  995. for task in xrange(len(self.rqdata.runq_task)):
  996. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  997. taskname = self.rqdata.runq_task[task]
  998. taskdep = self.rqdata.dataCache.task_deps[fn]
  999. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1000. continue
  1001. if self.rq.check_stamp_task(task, taskname + "_setscene", cache=self.stampcache):
  1002. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1003. continue
  1004. if self.rq.check_stamp_task(task, taskname, recurse = True, cache=self.stampcache):
  1005. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1006. continue
  1007. invalidtasks.append(task)
  1008. call = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d, invalidtasks=invalidtasks)"
  1009. call2 = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d)"
  1010. locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : self.rqdata.runq_task, "fnids" : self.rqdata.runq_fnid, "fns" : self.rqdata.taskData.fn_index, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
  1011. # Backwards compatibility with older versions without invalidtasks
  1012. try:
  1013. covered_remove = bb.utils.better_eval(call, locs)
  1014. except TypeError:
  1015. covered_remove = bb.utils.better_eval(call2, locs)
  1016. for task in covered_remove:
  1017. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1018. taskname = self.rqdata.runq_task[task] + '_setscene'
  1019. bb.build.del_stamp(taskname, self.rqdata.dataCache, fn)
  1020. logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
  1021. self.rq.scenequeue_covered.remove(task)
  1022. logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
  1023. event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData)
  1024. schedulers = self.get_schedulers()
  1025. for scheduler in schedulers:
  1026. if self.scheduler == scheduler.name:
  1027. self.sched = scheduler(self, self.rqdata)
  1028. logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
  1029. break
  1030. else:
  1031. bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
  1032. (self.scheduler, ", ".join(obj.name for obj in schedulers)))
  1033. def get_schedulers(self):
  1034. schedulers = set(obj for obj in globals().values()
  1035. if type(obj) is type and
  1036. issubclass(obj, RunQueueScheduler))
  1037. user_schedulers = self.cfgData.getVar("BB_SCHEDULERS", True)
  1038. if user_schedulers:
  1039. for sched in user_schedulers.split():
  1040. if not "." in sched:
  1041. bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
  1042. continue
  1043. modname, name = sched.rsplit(".", 1)
  1044. try:
  1045. module = __import__(modname, fromlist=(name,))
  1046. except ImportError as exc:
  1047. logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
  1048. raise SystemExit(1)
  1049. else:
  1050. schedulers.add(getattr(module, name))
  1051. return schedulers
  1052. def task_completeoutright(self, task):
  1053. """
  1054. Mark a task as completed
  1055. Look at the reverse dependencies and mark any task with
  1056. completed dependencies as buildable
  1057. """
  1058. self.runq_complete[task] = 1
  1059. for revdep in self.rqdata.runq_revdeps[task]:
  1060. if self.runq_running[revdep] == 1:
  1061. continue
  1062. if self.runq_buildable[revdep] == 1:
  1063. continue
  1064. alldeps = 1
  1065. for dep in self.rqdata.runq_depends[revdep]:
  1066. if self.runq_complete[dep] != 1:
  1067. alldeps = 0
  1068. if alldeps == 1:
  1069. self.runq_buildable[revdep] = 1
  1070. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
  1071. taskname = self.rqdata.runq_task[revdep]
  1072. logger.debug(1, "Marking task %s (%s, %s) as buildable", revdep, fn, taskname)
  1073. def task_complete(self, task):
  1074. self.stats.taskCompleted()
  1075. bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
  1076. self.task_completeoutright(task)
  1077. def task_fail(self, task, exitcode):
  1078. """
  1079. Called when a task has failed
  1080. Updates the state engine with the failure
  1081. """
  1082. self.stats.taskFailed()
  1083. fnid = self.rqdata.runq_fnid[task]
  1084. self.failed_fnids.append(fnid)
  1085. bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
  1086. if self.rqdata.taskData.abort:
  1087. self.rq.state = runQueueCleanUp
  1088. def task_skip(self, task):
  1089. self.runq_running[task] = 1
  1090. self.runq_buildable[task] = 1
  1091. self.task_completeoutright(task)
  1092. self.stats.taskCompleted()
  1093. self.stats.taskSkipped()
  1094. def execute(self):
  1095. """
  1096. Run the tasks in a queue prepared by rqdata.prepare()
  1097. """
  1098. self.rq.read_workers()
  1099. if self.stats.total == 0:
  1100. # nothing to do
  1101. self.rq.state = runQueueCleanUp
  1102. task = self.sched.next()
  1103. if task is not None:
  1104. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1105. taskname = self.rqdata.runq_task[task]
  1106. if task in self.rq.scenequeue_covered:
  1107. logger.debug(2, "Setscene covered task %s (%s)", task,
  1108. self.rqdata.get_user_idstring(task))
  1109. self.task_skip(task)
  1110. return True
  1111. if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
  1112. logger.debug(2, "Stamp current task %s (%s)", task,
  1113. self.rqdata.get_user_idstring(task))
  1114. self.task_skip(task)
  1115. return True
  1116. taskdep = self.rqdata.dataCache.task_deps[fn]
  1117. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1118. startevent = runQueueTaskStarted(task, self.stats, self.rq,
  1119. noexec=True)
  1120. bb.event.fire(startevent, self.cfgData)
  1121. self.runq_running[task] = 1
  1122. self.stats.taskActive()
  1123. bb.build.make_stamp(taskname, self.rqdata.dataCache, fn)
  1124. self.task_complete(task)
  1125. return True
  1126. else:
  1127. startevent = runQueueTaskStarted(task, self.stats, self.rq)
  1128. bb.event.fire(startevent, self.cfgData)
  1129. taskdep = self.rqdata.dataCache.task_deps[fn]
  1130. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot']:
  1131. if not self.rq.fakeworker:
  1132. self.rq.start_fakeworker(self)
  1133. self.rq.fakeworker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1134. self.rq.fakeworker.stdin.flush()
  1135. else:
  1136. self.rq.worker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1137. self.rq.worker.stdin.flush()
  1138. self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  1139. self.runq_running[task] = 1
  1140. self.stats.taskActive()
  1141. if self.stats.active < self.number_tasks:
  1142. return True
  1143. if self.stats.active > 0:
  1144. self.rq.read_workers()
  1145. return 0.5
  1146. if len(self.failed_fnids) != 0:
  1147. self.rq.state = runQueueFailed
  1148. return True
  1149. # Sanity Checks
  1150. for task in xrange(self.stats.total):
  1151. if self.runq_buildable[task] == 0:
  1152. logger.error("Task %s never buildable!", task)
  1153. if self.runq_running[task] == 0:
  1154. logger.error("Task %s never ran!", task)
  1155. if self.runq_complete[task] == 0:
  1156. logger.error("Task %s never completed!", task)
  1157. self.rq.state = runQueueComplete
  1158. return True
  1159. class RunQueueExecuteScenequeue(RunQueueExecute):
  1160. def __init__(self, rq):
  1161. RunQueueExecute.__init__(self, rq)
  1162. self.scenequeue_covered = set()
  1163. self.scenequeue_notcovered = set()
  1164. self.scenequeue_notneeded = set()
  1165. # If we don't have any setscene functions, skip this step
  1166. if len(self.rqdata.runq_setscene) == 0:
  1167. rq.scenequeue_covered = set()
  1168. rq.state = runQueueRunInit
  1169. return
  1170. self.stats = RunQueueStats(len(self.rqdata.runq_setscene))
  1171. sq_revdeps = []
  1172. sq_revdeps_new = []
  1173. sq_revdeps_squash = []
  1174. # We need to construct a dependency graph for the setscene functions. Intermediate
  1175. # dependencies between the setscene tasks only complicate the code. This code
  1176. # therefore aims to collapse the huge runqueue dependency tree into a smaller one
  1177. # only containing the setscene functions.
  1178. for task in xrange(self.stats.total):
  1179. self.runq_running.append(0)
  1180. self.runq_complete.append(0)
  1181. self.runq_buildable.append(0)
  1182. # First process the chains up to the first setscene task.
  1183. endpoints = {}
  1184. for task in xrange(len(self.rqdata.runq_fnid)):
  1185. sq_revdeps.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1186. sq_revdeps_new.append(set())
  1187. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1188. endpoints[task] = set()
  1189. # Secondly process the chains between setscene tasks.
  1190. for task in self.rqdata.runq_setscene:
  1191. for dep in self.rqdata.runq_depends[task]:
  1192. if dep not in endpoints:
  1193. endpoints[dep] = set()
  1194. endpoints[dep].add(task)
  1195. def process_endpoints(endpoints):
  1196. newendpoints = {}
  1197. for point, task in endpoints.items():
  1198. tasks = set()
  1199. if task:
  1200. tasks |= task
  1201. if sq_revdeps_new[point]:
  1202. tasks |= sq_revdeps_new[point]
  1203. sq_revdeps_new[point] = set()
  1204. if point in self.rqdata.runq_setscene:
  1205. sq_revdeps_new[point] = tasks
  1206. for dep in self.rqdata.runq_depends[point]:
  1207. if point in sq_revdeps[dep]:
  1208. sq_revdeps[dep].remove(point)
  1209. if tasks:
  1210. sq_revdeps_new[dep] |= tasks
  1211. if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1212. newendpoints[dep] = task
  1213. if len(newendpoints) != 0:
  1214. process_endpoints(newendpoints)
  1215. process_endpoints(endpoints)
  1216. # Build a list of setscene tasks which as "unskippable"
  1217. # These are direct endpoints referenced by the build
  1218. endpoints2 = {}
  1219. sq_revdeps2 = []
  1220. sq_revdeps_new2 = []
  1221. def process_endpoints2(endpoints):
  1222. newendpoints = {}
  1223. for point, task in endpoints.items():
  1224. tasks = set([point])
  1225. if task:
  1226. tasks |= task
  1227. if sq_revdeps_new2[point]:
  1228. tasks |= sq_revdeps_new2[point]
  1229. sq_revdeps_new2[point] = set()
  1230. if point in self.rqdata.runq_setscene:
  1231. sq_revdeps_new2[point] = tasks
  1232. for dep in self.rqdata.runq_depends[point]:
  1233. if point in sq_revdeps2[dep]:
  1234. sq_revdeps2[dep].remove(point)
  1235. if tasks:
  1236. sq_revdeps_new2[dep] |= tasks
  1237. if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1238. newendpoints[dep] = tasks
  1239. if len(newendpoints) != 0:
  1240. process_endpoints2(newendpoints)
  1241. for task in xrange(len(self.rqdata.runq_fnid)):
  1242. sq_revdeps2.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1243. sq_revdeps_new2.append(set())
  1244. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1245. endpoints2[task] = set()
  1246. process_endpoints2(endpoints2)
  1247. self.unskippable = []
  1248. for task in self.rqdata.runq_setscene:
  1249. if sq_revdeps_new2[task]:
  1250. self.unskippable.append(self.rqdata.runq_setscene.index(task))
  1251. for task in xrange(len(self.rqdata.runq_fnid)):
  1252. if task in self.rqdata.runq_setscene:
  1253. deps = set()
  1254. for dep in sq_revdeps_new[task]:
  1255. deps.add(self.rqdata.runq_setscene.index(dep))
  1256. sq_revdeps_squash.append(deps)
  1257. elif len(sq_revdeps_new[task]) != 0:
  1258. bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
  1259. # Resolve setscene inter-task dependencies
  1260. # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
  1261. # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
  1262. for task in self.rqdata.runq_setscene:
  1263. realid = self.rqdata.taskData.gettask_id(self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]], self.rqdata.runq_task[task] + "_setscene", False)
  1264. idepends = self.rqdata.taskData.tasks_idepends[realid]
  1265. for (depid, idependtask) in idepends:
  1266. if depid not in self.rqdata.taskData.build_targets:
  1267. continue
  1268. depdata = self.rqdata.taskData.build_targets[depid][0]
  1269. if depdata is None:
  1270. continue
  1271. dep = self.rqdata.taskData.fn_index[depdata]
  1272. taskid = self.rqdata.get_task_id(self.rqdata.taskData.getfn_id(dep), idependtask.replace("_setscene", ""))
  1273. if taskid is None:
  1274. bb.msg.fatal("RunQueue", "Task %s:%s depends upon non-existent task %s:%s" % (self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realid]], self.rqdata.taskData.tasks_name[realid], dep, idependtask))
  1275. sq_revdeps_squash[self.rqdata.runq_setscene.index(task)].add(self.rqdata.runq_setscene.index(taskid))
  1276. # Have to zero this to avoid circular dependencies
  1277. sq_revdeps_squash[self.rqdata.runq_setscene.index(taskid)] = set()
  1278. #for task in xrange(len(sq_revdeps_squash)):
  1279. # print "Task %s: %s.%s is %s " % (task, self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[self.rqdata.runq_setscene[task]]], self.rqdata.runq_task[self.rqdata.runq_setscene[task]] + "_setscene", sq_revdeps_squash[task])
  1280. self.sq_deps = []
  1281. self.sq_revdeps = sq_revdeps_squash
  1282. self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
  1283. for task in xrange(len(self.sq_revdeps)):
  1284. self.sq_deps.append(set())
  1285. for task in xrange(len(self.sq_revdeps)):
  1286. for dep in self.sq_revdeps[task]:
  1287. self.sq_deps[dep].add(task)
  1288. for task in xrange(len(self.sq_revdeps)):
  1289. if len(self.sq_revdeps[task]) == 0:
  1290. self.runq_buildable[task] = 1
  1291. if self.rq.hashvalidate:
  1292. sq_hash = []
  1293. sq_hashfn = []
  1294. sq_fn = []
  1295. sq_taskname = []
  1296. sq_task = []
  1297. noexec = []
  1298. stamppresent = []
  1299. for task in xrange(len(self.sq_revdeps)):
  1300. realtask = self.rqdata.runq_setscene[task]
  1301. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1302. taskname = self.rqdata.runq_task[realtask]
  1303. taskdep = self.rqdata.dataCache.task_deps[fn]
  1304. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1305. noexec.append(task)
  1306. self.task_skip(task)
  1307. bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
  1308. continue
  1309. if self.rq.check_stamp_task(realtask, taskname + "_setscene", cache=self.stampcache):
  1310. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1311. stamppresent.append(task)
  1312. self.task_skip(task)
  1313. continue
  1314. if self.rq.check_stamp_task(realtask, taskname, recurse = True, cache=self.stampcache):
  1315. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1316. stamppresent.append(task)
  1317. self.task_skip(task)
  1318. continue
  1319. sq_fn.append(fn)
  1320. sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
  1321. sq_hash.append(self.rqdata.runq_hash[realtask])
  1322. sq_taskname.append(taskname)
  1323. sq_task.append(task)
  1324. call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
  1325. locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
  1326. valid = bb.utils.better_eval(call, locs)
  1327. valid_new = stamppresent
  1328. for v in valid:
  1329. valid_new.append(sq_task[v])
  1330. for task in xrange(len(self.sq_revdeps)):
  1331. if task not in valid_new and task not in noexec:
  1332. realtask = self.rqdata.runq_setscene[task]
  1333. logger.debug(2, 'No package found, so skipping setscene task %s',
  1334. self.rqdata.get_user_idstring(realtask))
  1335. self.task_failoutright(task)
  1336. logger.info('Executing SetScene Tasks')
  1337. self.rq.state = runQueueSceneRun
  1338. def scenequeue_updatecounters(self, task):
  1339. for dep in self.sq_deps[task]:
  1340. self.sq_revdeps2[dep].remove(task)
  1341. if len(self.sq_revdeps2[dep]) == 0:
  1342. self.runq_buildable[dep] = 1
  1343. def task_completeoutright(self, task):
  1344. """
  1345. Mark a task as completed
  1346. Look at the reverse dependencies and mark any task with
  1347. completed dependencies as buildable
  1348. """
  1349. index = self.rqdata.runq_setscene[task]
  1350. logger.debug(1, 'Found task %s which could be accelerated',
  1351. self.rqdata.get_user_idstring(index))
  1352. self.scenequeue_covered.add(task)
  1353. self.scenequeue_updatecounters(task)
  1354. def task_complete(self, task):
  1355. self.stats.taskCompleted()
  1356. self.task_completeoutright(task)
  1357. def task_fail(self, task, result):
  1358. self.stats.taskFailed()
  1359. bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
  1360. self.scenequeue_notcovered.add(task)
  1361. self.scenequeue_updatecounters(task)
  1362. def task_failoutright(self, task):
  1363. self.runq_running[task] = 1
  1364. self.runq_buildable[task] = 1
  1365. self.stats.taskCompleted()
  1366. self.stats.taskSkipped()
  1367. index = self.rqdata.runq_setscene[task]
  1368. self.scenequeue_notcovered.add(task)
  1369. self.scenequeue_updatecounters(task)
  1370. def task_skip(self, task):
  1371. self.runq_running[task] = 1
  1372. self.runq_buildable[task] = 1
  1373. self.task_completeoutright(task)
  1374. self.stats.taskCompleted()
  1375. self.stats.taskSkipped()
  1376. def execute(self):
  1377. """
  1378. Run the tasks in a queue prepared by prepare_runqueue
  1379. """
  1380. self.rq.read_workers()
  1381. task = None
  1382. if self.stats.active < self.number_tasks:
  1383. # Find the next setscene to run
  1384. for nexttask in xrange(self.stats.total):
  1385. if self.runq_buildable[nexttask] == 1 and self.runq_running[nexttask] != 1:
  1386. if nexttask in self.unskippable:
  1387. logger.debug(2, "Setscene task %s is unskippable" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1388. if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
  1389. logger.debug(2, "Skipping setscene for task %s" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1390. self.task_skip(nexttask)
  1391. self.scenequeue_notneeded.add(nexttask)
  1392. return True
  1393. task = nexttask
  1394. break
  1395. if task is not None:
  1396. realtask = self.rqdata.runq_setscene[task]
  1397. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1398. taskname = self.rqdata.runq_task[realtask] + "_setscene"
  1399. if self.rq.check_stamp_task(realtask, self.rqdata.runq_task[realtask], recurse = True, cache=self.stampcache):
  1400. logger.debug(2, 'Stamp for underlying task %s(%s) is current, so skipping setscene variant',
  1401. task, self.rqdata.get_user_idstring(realtask))
  1402. self.task_failoutright(task)
  1403. return True
  1404. if self.cooker.configuration.force:
  1405. for target in self.rqdata.target_pairs:
  1406. if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
  1407. self.task_failoutright(task)
  1408. return True
  1409. if self.rq.check_stamp_task(realtask, taskname, cache=self.stampcache):
  1410. logger.debug(2, 'Setscene stamp current task %s(%s), so skip it and its dependencies',
  1411. task, self.rqdata.get_user_idstring(realtask))
  1412. self.task_skip(task)
  1413. return True
  1414. startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
  1415. bb.event.fire(startevent, self.cfgData)
  1416. taskdep = self.rqdata.dataCache.task_deps[fn]
  1417. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot']:
  1418. if not self.rq.fakeworker:
  1419. self.rq.start_fakeworker(self)
  1420. self.rq.fakeworker.stdin.write("<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1421. self.rq.fakeworker.stdin.flush()
  1422. else:
  1423. self.rq.worker.stdin.write("<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn))) + "</runtask>")
  1424. self.rq.worker.stdin.flush()
  1425. self.runq_running[task] = 1
  1426. self.stats.taskActive()
  1427. if self.stats.active < self.number_tasks:
  1428. return True
  1429. if self.stats.active > 0:
  1430. self.rq.read_workers()
  1431. return 0.5
  1432. # Convert scenequeue_covered task numbers into full taskgraph ids
  1433. oldcovered = self.scenequeue_covered
  1434. self.rq.scenequeue_covered = set()
  1435. for task in oldcovered:
  1436. self.rq.scenequeue_covered.add(self.rqdata.runq_setscene[task])
  1437. self.rq.scenequeue_notcovered = set()
  1438. for task in self.scenequeue_notcovered:
  1439. self.rq.scenequeue_notcovered.add(self.rqdata.runq_setscene[task])
  1440. logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
  1441. self.rq.state = runQueueRunInit
  1442. return True
  1443. def runqueue_process_waitpid(self, task, status):
  1444. task = self.rq.rqdata.runq_setscene.index(task)
  1445. RunQueueExecute.runqueue_process_waitpid(self, task, status)
  1446. class TaskFailure(Exception):
  1447. """
  1448. Exception raised when a task in a runqueue fails
  1449. """
  1450. def __init__(self, x):
  1451. self.args = x
  1452. class runQueueExitWait(bb.event.Event):
  1453. """
  1454. Event when waiting for task processes to exit
  1455. """
  1456. def __init__(self, remain):
  1457. self.remain = remain
  1458. self.message = "Waiting for %s active tasks to finish" % remain
  1459. bb.event.Event.__init__(self)
  1460. class runQueueEvent(bb.event.Event):
  1461. """
  1462. Base runQueue event class
  1463. """
  1464. def __init__(self, task, stats, rq):
  1465. self.taskid = task
  1466. self.taskstring = rq.rqdata.get_user_idstring(task)
  1467. self.stats = stats.copy()
  1468. bb.event.Event.__init__(self)
  1469. class sceneQueueEvent(runQueueEvent):
  1470. """
  1471. Base sceneQueue event class
  1472. """
  1473. def __init__(self, task, stats, rq, noexec=False):
  1474. runQueueEvent.__init__(self, task, stats, rq)
  1475. realtask = rq.rqdata.runq_setscene[task]
  1476. self.taskstring = rq.rqdata.get_user_idstring(realtask, "_setscene")
  1477. class runQueueTaskStarted(runQueueEvent):
  1478. """
  1479. Event notifing a task was started
  1480. """
  1481. def __init__(self, task, stats, rq, noexec=False):
  1482. runQueueEvent.__init__(self, task, stats, rq)
  1483. self.noexec = noexec
  1484. class sceneQueueTaskStarted(sceneQueueEvent):
  1485. """
  1486. Event notifing a setscene task was started
  1487. """
  1488. def __init__(self, task, stats, rq, noexec=False):
  1489. sceneQueueEvent.__init__(self, task, stats, rq)
  1490. self.noexec = noexec
  1491. class runQueueTaskFailed(runQueueEvent):
  1492. """
  1493. Event notifing a task failed
  1494. """
  1495. def __init__(self, task, stats, exitcode, rq):
  1496. runQueueEvent.__init__(self, task, stats, rq)
  1497. self.exitcode = exitcode
  1498. class sceneQueueTaskFailed(sceneQueueEvent):
  1499. """
  1500. Event notifing a setscene task failed
  1501. """
  1502. def __init__(self, task, stats, exitcode, rq):
  1503. sceneQueueEvent.__init__(self, task, stats, rq)
  1504. self.exitcode = exitcode
  1505. class runQueueTaskCompleted(runQueueEvent):
  1506. """
  1507. Event notifing a task completed
  1508. """
  1509. class runQueuePipe():
  1510. """
  1511. Abstraction for a pipe between a worker thread and the server
  1512. """
  1513. def __init__(self, pipein, pipeout, d, rq):
  1514. self.input = pipein
  1515. if pipeout:
  1516. pipeout.close()
  1517. bb.utils.nonblockingfd(self.input)
  1518. self.queue = ""
  1519. self.d = d
  1520. self.rq = rq
  1521. def setrunqueueexec(self, rq):
  1522. self.rq = rq
  1523. def read(self):
  1524. start = len(self.queue)
  1525. try:
  1526. self.queue = self.queue + self.input.read(102400)
  1527. except (OSError, IOError) as e:
  1528. if e.errno != errno.EAGAIN:
  1529. raise
  1530. end = len(self.queue)
  1531. found = True
  1532. while found and len(self.queue):
  1533. found = False
  1534. index = self.queue.find("</event>")
  1535. while index != -1 and self.queue.startswith("<event>"):
  1536. event = pickle.loads(self.queue[7:index])
  1537. bb.event.fire_from_worker(event, self.d)
  1538. found = True
  1539. self.queue = self.queue[index+8:]
  1540. index = self.queue.find("</event>")
  1541. index = self.queue.find("</exitcode>")
  1542. while index != -1 and self.queue.startswith("<exitcode>"):
  1543. task, status = pickle.loads(self.queue[10:index])
  1544. self.rq.runqueue_process_waitpid(task, status)
  1545. found = True
  1546. self.queue = self.queue[index+11:]
  1547. index = self.queue.find("</exitcode>")
  1548. return (end > start)
  1549. def close(self):
  1550. while self.read():
  1551. continue
  1552. if len(self.queue) > 0:
  1553. print("Warning, worker left partial message: %s" % self.queue)
  1554. self.input.close()