runqueue.py 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. #!/usr/bin/env python
  2. # ex:ts=4:sw=4:sts=4:et
  3. # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
  4. """
  5. BitBake 'RunQueue' implementation
  6. Handles preparation and execution of a queue of tasks
  7. """
  8. # Copyright (C) 2006-2007 Richard Purdie
  9. #
  10. # This program is free software; you can redistribute it and/or modify
  11. # it under the terms of the GNU General Public License version 2 as
  12. # published by the Free Software Foundation.
  13. #
  14. # This program is distributed in the hope that it will be useful,
  15. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. # GNU General Public License for more details.
  18. #
  19. # You should have received a copy of the GNU General Public License along
  20. # with this program; if not, write to the Free Software Foundation, Inc.,
  21. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. from bb import msg, data, event, mkdirhier, utils
  23. from sets import Set
  24. import bb, os, sys
  25. import signal
  26. import stat
  27. class TaskFailure(Exception):
  28. """Exception raised when a task in a runqueue fails"""
  29. def __init__(self, x):
  30. self.args = x
  31. class RunQueueStats:
  32. """
  33. Holds statistics on the tasks handled by the associated runQueue
  34. """
  35. def __init__(self):
  36. self.completed = 0
  37. self.skipped = 0
  38. self.failed = 0
  39. def taskFailed(self):
  40. self.failed = self.failed + 1
  41. def taskCompleted(self, number = 1):
  42. self.completed = self.completed + number
  43. def taskSkipped(self, number = 1):
  44. self.skipped = self.skipped + number
  45. class RunQueueScheduler:
  46. """
  47. Control the order tasks are scheduled in.
  48. """
  49. def __init__(self, runqueue):
  50. """
  51. The default scheduler just returns the first buildable task (the
  52. priority map is sorted by task numer)
  53. """
  54. self.rq = runqueue
  55. numTasks = len(self.rq.runq_fnid)
  56. self.prio_map = []
  57. self.prio_map.extend(range(numTasks))
  58. def next(self):
  59. """
  60. Return the id of the first task we find that is buildable
  61. """
  62. for task1 in range(len(self.rq.runq_fnid)):
  63. task = self.prio_map[task1]
  64. if self.rq.runq_running[task] == 1:
  65. continue
  66. if self.rq.runq_buildable[task] == 1:
  67. return task
  68. class RunQueueSchedulerSpeed(RunQueueScheduler):
  69. """
  70. A scheduler optimised for speed. The priority map is sorted by task weight,
  71. heavier weighted tasks (tasks needed by the most other tasks) are run first.
  72. """
  73. def __init__(self, runqueue):
  74. """
  75. The priority map is sorted by task weight.
  76. """
  77. from copy import deepcopy
  78. self.rq = runqueue
  79. sortweight = deepcopy(self.rq.runq_weight)
  80. sortweight.sort()
  81. copyweight = deepcopy(self.rq.runq_weight)
  82. self.prio_map = []
  83. for weight in sortweight:
  84. idx = copyweight.index(weight)
  85. self.prio_map.append(idx)
  86. copyweight[idx] = -1
  87. self.prio_map.reverse()
  88. class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
  89. """
  90. A scheduler optimised to complete .bb files are quickly as possible. The
  91. priority map is sorted by task weight, but then reordered so once a given
  92. .bb file starts to build, its completed as quickly as possible. This works
  93. well where disk space is at a premium and classes like OE's rm_work are in
  94. force.
  95. """
  96. def __init__(self, runqueue):
  97. RunQueueSchedulerSpeed.__init__(self, runqueue)
  98. from copy import deepcopy
  99. #FIXME - whilst this groups all fnids together it does not reorder the
  100. #fnid groups optimally.
  101. basemap = deepcopy(self.prio_map)
  102. self.prio_map = []
  103. while (len(basemap) > 0):
  104. entry = basemap.pop(0)
  105. self.prio_map.append(entry)
  106. fnid = self.rq.runq_fnid[entry]
  107. todel = []
  108. for entry in basemap:
  109. entry_fnid = self.rq.runq_fnid[entry]
  110. if entry_fnid == fnid:
  111. todel.append(basemap.index(entry))
  112. self.prio_map.append(entry)
  113. todel.reverse()
  114. for idx in todel:
  115. del basemap[idx]
  116. class RunQueue:
  117. """
  118. BitBake Run Queue implementation
  119. """
  120. def __init__(self, cooker, cfgData, dataCache, taskData, targets):
  121. self.reset_runqueue()
  122. self.cooker = cooker
  123. self.dataCache = dataCache
  124. self.taskData = taskData
  125. self.targets = targets
  126. self.cfgdata = cfgData
  127. self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData, 1) or 1)
  128. self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split()
  129. self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed"
  130. self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile"
  131. self.stampwhitelist = bb.data.getVar("BB_STAMP_WHITELIST", cfgData, 1) or []
  132. def reset_runqueue(self):
  133. self.runq_fnid = []
  134. self.runq_task = []
  135. self.runq_depends = []
  136. self.runq_revdeps = []
  137. def get_user_idstring(self, task):
  138. fn = self.taskData.fn_index[self.runq_fnid[task]]
  139. taskname = self.runq_task[task]
  140. return "%s, %s" % (fn, taskname)
  141. def circular_depchains_handler(self, tasks):
  142. """
  143. Some tasks aren't buildable, likely due to circular dependency issues.
  144. Identify the circular dependencies and print them in a user readable format.
  145. """
  146. from copy import deepcopy
  147. valid_chains = []
  148. explored_deps = {}
  149. msgs = []
  150. def chain_reorder(chain):
  151. """
  152. Reorder a dependency chain so the lowest task id is first
  153. """
  154. lowest = 0
  155. new_chain = []
  156. for entry in range(len(chain)):
  157. if chain[entry] < chain[lowest]:
  158. lowest = entry
  159. new_chain.extend(chain[lowest:])
  160. new_chain.extend(chain[:lowest])
  161. return new_chain
  162. def chain_compare_equal(chain1, chain2):
  163. """
  164. Compare two dependency chains and see if they're the same
  165. """
  166. if len(chain1) != len(chain2):
  167. return False
  168. for index in range(len(chain1)):
  169. if chain1[index] != chain2[index]:
  170. return False
  171. return True
  172. def chain_array_contains(chain, chain_array):
  173. """
  174. Return True if chain_array contains chain
  175. """
  176. for ch in chain_array:
  177. if chain_compare_equal(ch, chain):
  178. return True
  179. return False
  180. def find_chains(taskid, prev_chain):
  181. prev_chain.append(taskid)
  182. total_deps = []
  183. total_deps.extend(self.runq_revdeps[taskid])
  184. for revdep in self.runq_revdeps[taskid]:
  185. if revdep in prev_chain:
  186. idx = prev_chain.index(revdep)
  187. # To prevent duplicates, reorder the chain to start with the lowest taskid
  188. # and search through an array of those we've already printed
  189. chain = prev_chain[idx:]
  190. new_chain = chain_reorder(chain)
  191. if not chain_array_contains(new_chain, valid_chains):
  192. valid_chains.append(new_chain)
  193. msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
  194. for dep in new_chain:
  195. msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep]))
  196. msgs.append("\n")
  197. if len(valid_chains) > 10:
  198. msgs.append("Aborted dependency loops search after 10 matches.\n")
  199. return msgs
  200. continue
  201. scan = False
  202. if revdep not in explored_deps:
  203. scan = True
  204. elif revdep in explored_deps[revdep]:
  205. scan = True
  206. else:
  207. for dep in prev_chain:
  208. if dep in explored_deps[revdep]:
  209. scan = True
  210. if scan:
  211. find_chains(revdep, deepcopy(prev_chain))
  212. for dep in explored_deps[revdep]:
  213. if dep not in total_deps:
  214. total_deps.append(dep)
  215. explored_deps[taskid] = total_deps
  216. for task in tasks:
  217. find_chains(task, [])
  218. return msgs
  219. def calculate_task_weights(self, endpoints):
  220. """
  221. Calculate a number representing the "weight" of each task. Heavier weighted tasks
  222. have more dependencies and hence should be executed sooner for maximum speed.
  223. This function also sanity checks the task list finding tasks that its not
  224. possible to execute due to circular dependencies.
  225. """
  226. numTasks = len(self.runq_fnid)
  227. weight = []
  228. deps_left = []
  229. task_done = []
  230. for listid in range(numTasks):
  231. task_done.append(False)
  232. weight.append(0)
  233. deps_left.append(len(self.runq_revdeps[listid]))
  234. for listid in endpoints:
  235. weight[listid] = 1
  236. task_done[listid] = True
  237. while 1:
  238. next_points = []
  239. for listid in endpoints:
  240. for revdep in self.runq_depends[listid]:
  241. weight[revdep] = weight[revdep] + weight[listid]
  242. deps_left[revdep] = deps_left[revdep] - 1
  243. if deps_left[revdep] == 0:
  244. next_points.append(revdep)
  245. task_done[revdep] = True
  246. endpoints = next_points
  247. if len(next_points) == 0:
  248. break
  249. # Circular dependency sanity check
  250. problem_tasks = []
  251. for task in range(numTasks):
  252. if task_done[task] is False or deps_left[task] != 0:
  253. problem_tasks.append(task)
  254. bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task)))
  255. bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task]))
  256. if problem_tasks:
  257. message = "Unbuildable tasks were found.\n"
  258. message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
  259. message = message + "Identifying dependency loops (this may take a short while)...\n"
  260. bb.msg.error(bb.msg.domain.RunQueue, message)
  261. msgs = self.circular_depchains_handler(problem_tasks)
  262. message = "\n"
  263. for msg in msgs:
  264. message = message + msg
  265. bb.msg.fatal(bb.msg.domain.RunQueue, message)
  266. return weight
  267. def prepare_runqueue(self):
  268. """
  269. Turn a set of taskData into a RunQueue and compute data needed
  270. to optimise the execution order.
  271. """
  272. depends = []
  273. runq_build = []
  274. recursive_tdepends = {}
  275. taskData = self.taskData
  276. if len(taskData.tasks_name) == 0:
  277. # Nothing to do
  278. return
  279. bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue")
  280. # Step A - Work out a list of tasks to run
  281. #
  282. # Taskdata gives us a list of possible providers for a every target
  283. # ordered by priority (build_targets, run_targets). It also gives
  284. # information on each of those providers.
  285. #
  286. # To create the actual list of tasks to execute we fix the list of
  287. # providers and then resolve the dependencies into task IDs. This
  288. # process is repeated for each type of dependency (tdepends, deptask,
  289. # rdeptast, recrdeptask, idepends).
  290. for task in range(len(taskData.tasks_name)):
  291. fnid = taskData.tasks_fnid[task]
  292. fn = taskData.fn_index[fnid]
  293. task_deps = self.dataCache.task_deps[fn]
  294. if fnid not in taskData.failed_fnids:
  295. # Resolve task internal dependencies
  296. #
  297. # e.g. addtask before X after Y
  298. depends = taskData.tasks_tdepends[task]
  299. # Resolve 'deptask' dependencies
  300. #
  301. # e.g. do_sometask[deptask] = "do_someothertask"
  302. # (makes sure sometask runs after someothertask of all DEPENDS)
  303. if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
  304. tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
  305. for depid in taskData.depids[fnid]:
  306. # Won't be in build_targets if ASSUME_PROVIDED
  307. if depid in taskData.build_targets:
  308. depdata = taskData.build_targets[depid][0]
  309. if depdata is not None:
  310. dep = taskData.fn_index[depdata]
  311. for taskname in tasknames:
  312. depends.append(taskData.gettask_id(dep, taskname))
  313. # Resolve 'rdeptask' dependencies
  314. #
  315. # e.g. do_sometask[rdeptask] = "do_someothertask"
  316. # (makes sure sometask runs after someothertask of all RDEPENDS)
  317. if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
  318. taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
  319. for depid in taskData.rdepids[fnid]:
  320. if depid in taskData.run_targets:
  321. depdata = taskData.run_targets[depid][0]
  322. if depdata is not None:
  323. dep = taskData.fn_index[depdata]
  324. depends.append(taskData.gettask_id(dep, taskname))
  325. # Resolve inter-task dependencies
  326. #
  327. # e.g. do_sometask[depends] = "targetname:do_someothertask"
  328. # (makes sure sometask runs after targetname's someothertask)
  329. idepends = taskData.tasks_idepends[task]
  330. for (depid, idependtask) in idepends:
  331. if depid in taskData.build_targets:
  332. # Won't be in build_targets if ASSUME_PROVIDED
  333. depdata = taskData.build_targets[depid][0]
  334. if depdata is not None:
  335. dep = taskData.fn_index[depdata]
  336. depends.append(taskData.gettask_id(dep, idependtask))
  337. # Create a list of recursive dependent tasks (from tdepends) and cache
  338. def get_recursive_tdepends(task):
  339. if not task:
  340. return []
  341. if task in recursive_tdepends:
  342. return recursive_tdepends[task]
  343. rectdepends = [task]
  344. nextdeps = [task]
  345. while len(nextdeps) != 0:
  346. newdeps = []
  347. for nextdep in nextdeps:
  348. for tdepend in taskData.tasks_tdepends[nextdep]:
  349. if tdepend not in rectdepends:
  350. rectdepends.append(tdepend)
  351. newdeps.append(tdepend)
  352. nextdeps = newdeps
  353. recursive_tdepends[task] = rectdepends
  354. return rectdepends
  355. # Using the list of tdepends for this task create a list of
  356. # the recursive idepends we have
  357. def get_recursive_idepends(task):
  358. if not task:
  359. return []
  360. rectdepends = get_recursive_tdepends(task)
  361. recidepends = []
  362. for tdepend in rectdepends:
  363. for idepend in taskData.tasks_idepends[tdepend]:
  364. recidepends.append(idepend)
  365. return recidepends
  366. def add_recursive_build(depid, depfnid):
  367. """
  368. Add build depends of depid to depends
  369. (if we've not see it before)
  370. (calls itself recursively)
  371. """
  372. if str(depid) in dep_seen:
  373. return
  374. dep_seen.append(depid)
  375. if depid in taskData.build_targets:
  376. depdata = taskData.build_targets[depid][0]
  377. if depdata is not None:
  378. dep = taskData.fn_index[depdata]
  379. # Need to avoid creating new tasks here
  380. taskid = taskData.gettask_id(dep, taskname, False)
  381. if taskid is not None:
  382. depends.append(taskid)
  383. fnid = taskData.tasks_fnid[taskid]
  384. #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
  385. else:
  386. fnid = taskData.getfn_id(dep)
  387. for nextdepid in taskData.depids[fnid]:
  388. if nextdepid not in dep_seen:
  389. add_recursive_build(nextdepid, fnid)
  390. for nextdepid in taskData.rdepids[fnid]:
  391. if nextdepid not in rdep_seen:
  392. add_recursive_run(nextdepid, fnid)
  393. for (idependid, idependtask) in get_recursive_idepends(taskid):
  394. if idependid not in dep_seen:
  395. add_recursive_build(idependid, fnid)
  396. def add_recursive_run(rdepid, depfnid):
  397. """
  398. Add runtime depends of rdepid to depends
  399. (if we've not see it before)
  400. (calls itself recursively)
  401. """
  402. if str(rdepid) in rdep_seen:
  403. return
  404. rdep_seen.append(rdepid)
  405. if rdepid in taskData.run_targets:
  406. depdata = taskData.run_targets[rdepid][0]
  407. if depdata is not None:
  408. dep = taskData.fn_index[depdata]
  409. # Need to avoid creating new tasks here
  410. taskid = taskData.gettask_id(dep, taskname, False)
  411. if taskid is not None:
  412. depends.append(taskid)
  413. fnid = taskData.tasks_fnid[taskid]
  414. #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
  415. else:
  416. fnid = taskData.getfn_id(dep)
  417. for nextdepid in taskData.depids[fnid]:
  418. if nextdepid not in dep_seen:
  419. add_recursive_build(nextdepid, fnid)
  420. for nextdepid in taskData.rdepids[fnid]:
  421. if nextdepid not in rdep_seen:
  422. add_recursive_run(nextdepid, fnid)
  423. for (idependid, idependtask) in get_recursive_idepends(taskid):
  424. if idependid not in dep_seen:
  425. add_recursive_build(idependid, fnid)
  426. # Resolve recursive 'recrdeptask' dependencies
  427. #
  428. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  429. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  430. if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
  431. for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split():
  432. dep_seen = []
  433. rdep_seen = []
  434. idep_seen = []
  435. for depid in taskData.depids[fnid]:
  436. add_recursive_build(depid, fnid)
  437. for rdepid in taskData.rdepids[fnid]:
  438. add_recursive_run(rdepid, fnid)
  439. deptaskid = taskData.gettask_id(fn, taskname, False)
  440. for (idependid, idependtask) in get_recursive_idepends(deptaskid):
  441. add_recursive_build(idependid, fnid)
  442. # Rmove all self references
  443. if task in depends:
  444. newdep = []
  445. bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
  446. for dep in depends:
  447. if task != dep:
  448. newdep.append(dep)
  449. depends = newdep
  450. self.runq_fnid.append(taskData.tasks_fnid[task])
  451. self.runq_task.append(taskData.tasks_name[task])
  452. self.runq_depends.append(Set(depends))
  453. self.runq_revdeps.append(Set())
  454. runq_build.append(0)
  455. # Step B - Mark all active tasks
  456. #
  457. # Start with the tasks we were asked to run and mark all dependencies
  458. # as active too. If the task is to be 'forced', clear its stamp. Once
  459. # all active tasks are marked, prune the ones we don't need.
  460. bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks")
  461. def mark_active(listid, depth):
  462. """
  463. Mark an item as active along with its depends
  464. (calls itself recursively)
  465. """
  466. if runq_build[listid] == 1:
  467. return
  468. runq_build[listid] = 1
  469. depends = self.runq_depends[listid]
  470. for depend in depends:
  471. mark_active(depend, depth+1)
  472. self.target_pairs = []
  473. for target in self.targets:
  474. targetid = taskData.getbuild_id(target[0])
  475. if targetid not in taskData.build_targets:
  476. continue
  477. if targetid in taskData.failed_deps:
  478. continue
  479. fnid = taskData.build_targets[targetid][0]
  480. fn = taskData.fn_index[fnid]
  481. self.target_pairs.append((fn, target[1]))
  482. # Remove stamps for targets if force mode active
  483. if self.cooker.configuration.force:
  484. bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn))
  485. bb.build.del_stamp(target[1], self.dataCache, fn)
  486. if fnid in taskData.failed_fnids:
  487. continue
  488. if target[1] not in taskData.tasks_lookup[fnid]:
  489. bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0]))
  490. listid = taskData.tasks_lookup[fnid][target[1]]
  491. mark_active(listid, 1)
  492. # Step C - Prune all inactive tasks
  493. #
  494. # Once all active tasks are marked, prune the ones we don't need.
  495. maps = []
  496. delcount = 0
  497. for listid in range(len(self.runq_fnid)):
  498. if runq_build[listid-delcount] == 1:
  499. maps.append(listid-delcount)
  500. else:
  501. del self.runq_fnid[listid-delcount]
  502. del self.runq_task[listid-delcount]
  503. del self.runq_depends[listid-delcount]
  504. del runq_build[listid-delcount]
  505. del self.runq_revdeps[listid-delcount]
  506. delcount = delcount + 1
  507. maps.append(-1)
  508. #
  509. # Step D - Sanity checks and computation
  510. #
  511. # Check to make sure we still have tasks to run
  512. if len(self.runq_fnid) == 0:
  513. if not taskData.abort:
  514. bb.msg.fatal(bb.msg.domain.RunQueue, "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
  515. else:
  516. bb.msg.fatal(bb.msg.domain.RunQueue, "No active tasks and not in --continue mode?! Please report this bug.")
  517. bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid)))
  518. # Remap the dependencies to account for the deleted tasks
  519. # Check we didn't delete a task we depend on
  520. for listid in range(len(self.runq_fnid)):
  521. newdeps = []
  522. origdeps = self.runq_depends[listid]
  523. for origdep in origdeps:
  524. if maps[origdep] == -1:
  525. bb.msg.fatal(bb.msg.domain.RunQueue, "Invalid mapping - Should never happen!")
  526. newdeps.append(maps[origdep])
  527. self.runq_depends[listid] = Set(newdeps)
  528. bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings")
  529. # Generate a list of reverse dependencies to ease future calculations
  530. for listid in range(len(self.runq_fnid)):
  531. for dep in self.runq_depends[listid]:
  532. self.runq_revdeps[dep].add(listid)
  533. # Identify tasks at the end of dependency chains
  534. # Error on circular dependency loops (length two)
  535. endpoints = []
  536. for listid in range(len(self.runq_fnid)):
  537. revdeps = self.runq_revdeps[listid]
  538. if len(revdeps) == 0:
  539. endpoints.append(listid)
  540. for dep in revdeps:
  541. if dep in self.runq_depends[listid]:
  542. #self.dump_data(taskData)
  543. bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
  544. bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
  545. # Calculate task weights
  546. # Check of higher length circular dependencies
  547. self.runq_weight = self.calculate_task_weights(endpoints)
  548. # Decide what order to execute the tasks in, pick a scheduler
  549. #self.sched = RunQueueScheduler(self)
  550. if self.scheduler == "completion":
  551. self.sched = RunQueueSchedulerCompletion(self)
  552. else:
  553. self.sched = RunQueueSchedulerSpeed(self)
  554. # Sanity Check - Check for multiple tasks building the same provider
  555. prov_list = {}
  556. seen_fn = []
  557. for task in range(len(self.runq_fnid)):
  558. fn = taskData.fn_index[self.runq_fnid[task]]
  559. if fn in seen_fn:
  560. continue
  561. seen_fn.append(fn)
  562. for prov in self.dataCache.fn_provides[fn]:
  563. if prov not in prov_list:
  564. prov_list[prov] = [fn]
  565. elif fn not in prov_list[prov]:
  566. prov_list[prov].append(fn)
  567. error = False
  568. for prov in prov_list:
  569. if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
  570. error = True
  571. bb.msg.error(bb.msg.domain.RunQueue, "Multiple .bb files are due to be built which each provide %s (%s).\n This usually means one provides something the other doesn't and should." % (prov, " ".join(prov_list[prov])))
  572. #if error:
  573. # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...")
  574. # Create a whitelist usable by the stamp checks
  575. stampfnwhitelist = []
  576. for entry in self.stampwhitelist.split():
  577. entryid = self.taskData.getbuild_id(entry)
  578. if entryid not in self.taskData.build_targets:
  579. continue
  580. fnid = self.taskData.build_targets[entryid][0]
  581. fn = self.taskData.fn_index[fnid]
  582. stampfnwhitelist.append(fn)
  583. self.stampfnwhitelist = stampfnwhitelist
  584. #self.dump_data(taskData)
  585. def check_stamps(self):
  586. unchecked = {}
  587. current = []
  588. notcurrent = []
  589. buildable = []
  590. if self.stamppolicy == "perfile":
  591. fulldeptree = False
  592. else:
  593. fulldeptree = True
  594. stampwhitelist = []
  595. if self.stamppolicy == "whitelist":
  596. stampwhitelist = self.self.stampfnwhitelist
  597. for task in range(len(self.runq_fnid)):
  598. unchecked[task] = ""
  599. if len(self.runq_depends[task]) == 0:
  600. buildable.append(task)
  601. def check_buildable(self, task, buildable):
  602. for revdep in self.runq_revdeps[task]:
  603. alldeps = 1
  604. for dep in self.runq_depends[revdep]:
  605. if dep in unchecked:
  606. alldeps = 0
  607. if alldeps == 1:
  608. if revdep in unchecked:
  609. buildable.append(revdep)
  610. for task in range(len(self.runq_fnid)):
  611. if task not in unchecked:
  612. continue
  613. fn = self.taskData.fn_index[self.runq_fnid[task]]
  614. taskname = self.runq_task[task]
  615. stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
  616. # If the stamp is missing its not current
  617. if not os.access(stampfile, os.F_OK):
  618. del unchecked[task]
  619. notcurrent.append(task)
  620. check_buildable(self, task, buildable)
  621. continue
  622. # If its a 'nostamp' task, it's not current
  623. taskdep = self.dataCache.task_deps[fn]
  624. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  625. del unchecked[task]
  626. notcurrent.append(task)
  627. check_buildable(self, task, buildable)
  628. continue
  629. while (len(buildable) > 0):
  630. nextbuildable = []
  631. for task in buildable:
  632. if task in unchecked:
  633. fn = self.taskData.fn_index[self.runq_fnid[task]]
  634. taskname = self.runq_task[task]
  635. stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
  636. iscurrent = True
  637. t1 = os.stat(stampfile)[stat.ST_MTIME]
  638. for dep in self.runq_depends[task]:
  639. if iscurrent:
  640. fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
  641. taskname2 = self.runq_task[dep]
  642. stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
  643. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  644. if dep in notcurrent:
  645. iscurrent = False
  646. else:
  647. t2 = os.stat(stampfile2)[stat.ST_MTIME]
  648. if t1 < t2:
  649. iscurrent = False
  650. del unchecked[task]
  651. if iscurrent:
  652. current.append(task)
  653. else:
  654. notcurrent.append(task)
  655. check_buildable(self, task, nextbuildable)
  656. buildable = nextbuildable
  657. #for task in range(len(self.runq_fnid)):
  658. # fn = self.taskData.fn_index[self.runq_fnid[task]]
  659. # taskname = self.runq_task[task]
  660. # print "%s %s.%s" % (task, taskname, fn)
  661. #print "Unchecked: %s" % unchecked
  662. #print "Current: %s" % current
  663. #print "Not current: %s" % notcurrent
  664. if len(unchecked) > 0:
  665. bb.fatal("check_stamps fatal internal error")
  666. return current
  667. def check_stamp(self, task):
  668. if self.stamppolicy == "perfile":
  669. fulldeptree = False
  670. else:
  671. fulldeptree = True
  672. stampwhitelist = []
  673. if self.stamppolicy == "whitelist":
  674. stampwhitelist = self.stampfnwhitelist
  675. fn = self.taskData.fn_index[self.runq_fnid[task]]
  676. taskname = self.runq_task[task]
  677. stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
  678. # If the stamp is missing its not current
  679. if not os.access(stampfile, os.F_OK):
  680. return False
  681. # If its a 'nostamp' task, it's not current
  682. taskdep = self.dataCache.task_deps[fn]
  683. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  684. return False
  685. iscurrent = True
  686. t1 = os.stat(stampfile)[stat.ST_MTIME]
  687. for dep in self.runq_depends[task]:
  688. if iscurrent:
  689. fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
  690. taskname2 = self.runq_task[dep]
  691. stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
  692. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  693. try:
  694. t2 = os.stat(stampfile2)[stat.ST_MTIME]
  695. if t1 < t2:
  696. iscurrent = False
  697. except:
  698. iscurrent = False
  699. return iscurrent
  700. def execute_runqueue(self):
  701. """
  702. Run the tasks in a queue prepared by prepare_runqueue
  703. Upon failure, optionally try to recover the build using any alternate providers
  704. (if the abort on failure configuration option isn't set)
  705. """
  706. failures = 0
  707. while 1:
  708. failed_fnids = []
  709. try:
  710. self.execute_runqueue_internal()
  711. finally:
  712. if self.master_process:
  713. failed_fnids = self.finish_runqueue()
  714. if len(failed_fnids) == 0:
  715. return failures
  716. if self.taskData.abort:
  717. raise bb.runqueue.TaskFailure(failed_fnids)
  718. for fnid in failed_fnids:
  719. #print "Failure: %s %s %s" % (fnid, self.taskData.fn_index[fnid], self.runq_task[fnid])
  720. self.taskData.fail_fnid(fnid)
  721. failures = failures + 1
  722. self.reset_runqueue()
  723. self.prepare_runqueue()
  724. def execute_runqueue_initVars(self):
  725. self.stats = RunQueueStats()
  726. self.active_builds = 0
  727. self.runq_buildable = []
  728. self.runq_running = []
  729. self.runq_complete = []
  730. self.build_pids = {}
  731. self.failed_fnids = []
  732. self.master_process = True
  733. # Mark initial buildable tasks
  734. for task in range(len(self.runq_fnid)):
  735. self.runq_running.append(0)
  736. self.runq_complete.append(0)
  737. if len(self.runq_depends[task]) == 0:
  738. self.runq_buildable.append(1)
  739. else:
  740. self.runq_buildable.append(0)
  741. def task_complete(self, task):
  742. """
  743. Mark a task as completed
  744. Look at the reverse dependencies and mark any task with
  745. completed dependencies as buildable
  746. """
  747. self.runq_complete[task] = 1
  748. for revdep in self.runq_revdeps[task]:
  749. if self.runq_running[revdep] == 1:
  750. continue
  751. if self.runq_buildable[revdep] == 1:
  752. continue
  753. alldeps = 1
  754. for dep in self.runq_depends[revdep]:
  755. if self.runq_complete[dep] != 1:
  756. alldeps = 0
  757. if alldeps == 1:
  758. self.runq_buildable[revdep] = 1
  759. fn = self.taskData.fn_index[self.runq_fnid[revdep]]
  760. taskname = self.runq_task[revdep]
  761. bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
  762. def execute_runqueue_internal(self):
  763. """
  764. Run the tasks in a queue prepared by prepare_runqueue
  765. """
  766. bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
  767. self.execute_runqueue_initVars()
  768. if len(self.runq_fnid) == 0:
  769. # nothing to do
  770. return []
  771. def sigint_handler(signum, frame):
  772. raise KeyboardInterrupt
  773. event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgdata))
  774. while True:
  775. task = self.sched.next()
  776. if task is not None:
  777. fn = self.taskData.fn_index[self.runq_fnid[task]]
  778. taskname = self.runq_task[task]
  779. if self.check_stamp(task):
  780. bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
  781. self.runq_running[task] = 1
  782. self.task_complete(task)
  783. self.stats.taskCompleted()
  784. self.stats.taskSkipped()
  785. continue
  786. bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task)))
  787. try:
  788. pid = os.fork()
  789. except OSError, e:
  790. bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
  791. if pid == 0:
  792. # Bypass master process' handling
  793. self.master_process = False
  794. # Stop Ctrl+C being sent to children
  795. # signal.signal(signal.SIGINT, signal.SIG_IGN)
  796. # Make the child the process group leader
  797. os.setpgid(0, 0)
  798. newsi = os.open('/dev/null', os.O_RDWR)
  799. os.dup2(newsi, sys.stdin.fileno())
  800. self.cooker.configuration.cmd = taskname[3:]
  801. try:
  802. self.cooker.tryBuild(fn)
  803. except bb.build.EventException:
  804. bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
  805. sys.exit(1)
  806. except:
  807. bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
  808. raise
  809. sys.exit(0)
  810. self.build_pids[pid] = task
  811. self.runq_running[task] = 1
  812. self.active_builds = self.active_builds + 1
  813. if self.active_builds < self.number_tasks:
  814. continue
  815. if self.active_builds > 0:
  816. result = os.waitpid(-1, 0)
  817. self.active_builds = self.active_builds - 1
  818. task = self.build_pids[result[0]]
  819. if result[1] != 0:
  820. del self.build_pids[result[0]]
  821. bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
  822. self.failed_fnids.append(self.runq_fnid[task])
  823. self.stats.taskFailed()
  824. break
  825. self.task_complete(task)
  826. self.stats.taskCompleted()
  827. del self.build_pids[result[0]]
  828. continue
  829. return
  830. def finish_runqueue(self):
  831. try:
  832. while self.active_builds > 0:
  833. bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % self.active_builds)
  834. tasknum = 1
  835. for k, v in self.build_pids.iteritems():
  836. bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v), k))
  837. tasknum = tasknum + 1
  838. result = os.waitpid(-1, 0)
  839. task = self.build_pids[result[0]]
  840. if result[1] != 0:
  841. bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
  842. self.failed_fnids.append(self.runq_fnid[task])
  843. self.stats.taskFailed()
  844. del self.build_pids[result[0]]
  845. self.active_builds = self.active_builds - 1
  846. bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed))
  847. return self.failed_fnids
  848. except KeyboardInterrupt:
  849. bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.active_builds)
  850. for k, v in self.build_pids.iteritems():
  851. try:
  852. os.kill(-k, signal.SIGINT)
  853. except:
  854. pass
  855. raise
  856. # Sanity Checks
  857. for task in range(len(self.runq_fnid)):
  858. if self.runq_buildable[task] == 0:
  859. bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task)
  860. if self.runq_running[task] == 0:
  861. bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task)
  862. if self.runq_complete[task] == 0:
  863. bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
  864. bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed))
  865. return self.failed_fnids
  866. def dump_data(self, taskQueue):
  867. """
  868. Dump some debug information on the internal data structures
  869. """
  870. bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:")
  871. for task in range(len(self.runq_fnid)):
  872. bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
  873. taskQueue.fn_index[self.runq_fnid[task]],
  874. self.runq_task[task],
  875. self.runq_weight[task],
  876. self.runq_depends[task],
  877. self.runq_revdeps[task]))
  878. bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:")
  879. for task1 in range(len(self.runq_fnid)):
  880. if task1 in self.prio_map:
  881. task = self.prio_map[task1]
  882. bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
  883. taskQueue.fn_index[self.runq_fnid[task]],
  884. self.runq_task[task],
  885. self.runq_weight[task],
  886. self.runq_depends[task],
  887. self.runq_revdeps[task]))