runqueue.py 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. #!/usr/bin/env python
  2. # ex:ts=4:sw=4:sts=4:et
  3. # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
  4. """
  5. BitBake 'RunQueue' implementation
  6. Handles preparation and execution of a queue of tasks
  7. """
  8. # Copyright (C) 2006-2007 Richard Purdie
  9. #
  10. # This program is free software; you can redistribute it and/or modify
  11. # it under the terms of the GNU General Public License version 2 as
  12. # published by the Free Software Foundation.
  13. #
  14. # This program is distributed in the hope that it will be useful,
  15. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. # GNU General Public License for more details.
  18. #
  19. # You should have received a copy of the GNU General Public License along
  20. # with this program; if not, write to the Free Software Foundation, Inc.,
  21. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. from bb import msg, data, event, mkdirhier, utils
  23. from sets import Set
  24. import bb, os, sys
  25. import signal
  26. import stat
  27. class TaskFailure(Exception):
  28. """Exception raised when a task in a runqueue fails"""
  29. def __init__(self, x):
  30. self.args = x
  31. class RunQueueStats:
  32. """
  33. Holds statistics on the tasks handled by the associated runQueue
  34. """
  35. def __init__(self):
  36. self.completed = 0
  37. self.skipped = 0
  38. self.failed = 0
  39. def taskFailed(self):
  40. self.failed = self.failed + 1
  41. def taskCompleted(self, number = 1):
  42. self.completed = self.completed + number
  43. def taskSkipped(self, number = 1):
  44. self.skipped = self.skipped + number
  45. class RunQueueScheduler:
  46. """
  47. Control the order tasks are scheduled in.
  48. """
  49. def __init__(self, runqueue):
  50. """
  51. The default scheduler just returns the first buildable task (the
  52. priority map is sorted by task numer)
  53. """
  54. self.rq = runqueue
  55. numTasks = len(self.rq.runq_fnid)
  56. self.prio_map = []
  57. self.prio_map.extend(range(numTasks))
  58. def next(self):
  59. """
  60. Return the id of the first task we find that is buildable
  61. """
  62. for task1 in range(len(self.rq.runq_fnid)):
  63. task = self.prio_map[task1]
  64. if self.rq.runq_running[task] == 1:
  65. continue
  66. if self.rq.runq_buildable[task] == 1:
  67. return task
  68. class RunQueueSchedulerSpeed(RunQueueScheduler):
  69. """
  70. A scheduler optimised for speed. The priority map is sorted by task weight,
  71. heavier weighted tasks (tasks needed by the most other tasks) are run first.
  72. """
  73. def __init__(self, runqueue):
  74. """
  75. The priority map is sorted by task weight.
  76. """
  77. from copy import deepcopy
  78. self.rq = runqueue
  79. sortweight = deepcopy(self.rq.runq_weight)
  80. sortweight.sort()
  81. copyweight = deepcopy(self.rq.runq_weight)
  82. self.prio_map = []
  83. for weight in sortweight:
  84. idx = copyweight.index(weight)
  85. self.prio_map.append(idx)
  86. copyweight[idx] = -1
  87. self.prio_map.reverse()
  88. class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
  89. """
  90. A scheduler optimised to complete .bb files are quickly as possible. The
  91. priority map is sorted by task weight, but then reordered so once a given
  92. .bb file starts to build, its completed as quickly as possible. This works
  93. well where disk space is at a premium and classes like OE's rm_work are in
  94. force.
  95. """
  96. def __init__(self, runqueue):
  97. RunQueueSchedulerSpeed.__init__(self, runqueue)
  98. from copy import deepcopy
  99. #FIXME - whilst this groups all fnids together it does not reorder the
  100. #fnid groups optimally.
  101. basemap = deepcopy(self.prio_map)
  102. self.prio_map = []
  103. while (len(basemap) > 0):
  104. entry = basemap.pop(0)
  105. self.prio_map.append(entry)
  106. fnid = self.rq.runq_fnid[entry]
  107. todel = []
  108. for entry in basemap:
  109. entry_fnid = self.rq.runq_fnid[entry]
  110. if entry_fnid == fnid:
  111. todel.append(basemap.index(entry))
  112. self.prio_map.append(entry)
  113. todel.reverse()
  114. for idx in todel:
  115. del basemap[idx]
  116. class RunQueue:
  117. """
  118. BitBake Run Queue implementation
  119. """
  120. def __init__(self, cooker, cfgData, dataCache, taskData, targets):
  121. self.reset_runqueue()
  122. self.cooker = cooker
  123. self.dataCache = dataCache
  124. self.taskData = taskData
  125. self.targets = targets
  126. self.cfgdata = cfgData
  127. self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData, 1) or 1)
  128. self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split()
  129. self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed"
  130. self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile"
  131. self.stampwhitelist = bb.data.getVar("BB_STAMP_WHITELIST", cfgData, 1) or ""
  132. def reset_runqueue(self):
  133. self.runq_fnid = []
  134. self.runq_task = []
  135. self.runq_depends = []
  136. self.runq_revdeps = []
  137. def get_user_idstring(self, task):
  138. fn = self.taskData.fn_index[self.runq_fnid[task]]
  139. taskname = self.runq_task[task]
  140. return "%s, %s" % (fn, taskname)
  141. def get_task_id(self, fnid, taskname):
  142. for listid in range(len(self.runq_fnid)):
  143. if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname:
  144. return listid
  145. return None
  146. def circular_depchains_handler(self, tasks):
  147. """
  148. Some tasks aren't buildable, likely due to circular dependency issues.
  149. Identify the circular dependencies and print them in a user readable format.
  150. """
  151. from copy import deepcopy
  152. valid_chains = []
  153. explored_deps = {}
  154. msgs = []
  155. def chain_reorder(chain):
  156. """
  157. Reorder a dependency chain so the lowest task id is first
  158. """
  159. lowest = 0
  160. new_chain = []
  161. for entry in range(len(chain)):
  162. if chain[entry] < chain[lowest]:
  163. lowest = entry
  164. new_chain.extend(chain[lowest:])
  165. new_chain.extend(chain[:lowest])
  166. return new_chain
  167. def chain_compare_equal(chain1, chain2):
  168. """
  169. Compare two dependency chains and see if they're the same
  170. """
  171. if len(chain1) != len(chain2):
  172. return False
  173. for index in range(len(chain1)):
  174. if chain1[index] != chain2[index]:
  175. return False
  176. return True
  177. def chain_array_contains(chain, chain_array):
  178. """
  179. Return True if chain_array contains chain
  180. """
  181. for ch in chain_array:
  182. if chain_compare_equal(ch, chain):
  183. return True
  184. return False
  185. def find_chains(taskid, prev_chain):
  186. prev_chain.append(taskid)
  187. total_deps = []
  188. total_deps.extend(self.runq_revdeps[taskid])
  189. for revdep in self.runq_revdeps[taskid]:
  190. if revdep in prev_chain:
  191. idx = prev_chain.index(revdep)
  192. # To prevent duplicates, reorder the chain to start with the lowest taskid
  193. # and search through an array of those we've already printed
  194. chain = prev_chain[idx:]
  195. new_chain = chain_reorder(chain)
  196. if not chain_array_contains(new_chain, valid_chains):
  197. valid_chains.append(new_chain)
  198. msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
  199. for dep in new_chain:
  200. msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep]))
  201. msgs.append("\n")
  202. if len(valid_chains) > 10:
  203. msgs.append("Aborted dependency loops search after 10 matches.\n")
  204. return msgs
  205. continue
  206. scan = False
  207. if revdep not in explored_deps:
  208. scan = True
  209. elif revdep in explored_deps[revdep]:
  210. scan = True
  211. else:
  212. for dep in prev_chain:
  213. if dep in explored_deps[revdep]:
  214. scan = True
  215. if scan:
  216. find_chains(revdep, deepcopy(prev_chain))
  217. for dep in explored_deps[revdep]:
  218. if dep not in total_deps:
  219. total_deps.append(dep)
  220. explored_deps[taskid] = total_deps
  221. for task in tasks:
  222. find_chains(task, [])
  223. return msgs
  224. def calculate_task_weights(self, endpoints):
  225. """
  226. Calculate a number representing the "weight" of each task. Heavier weighted tasks
  227. have more dependencies and hence should be executed sooner for maximum speed.
  228. This function also sanity checks the task list finding tasks that its not
  229. possible to execute due to circular dependencies.
  230. """
  231. numTasks = len(self.runq_fnid)
  232. weight = []
  233. deps_left = []
  234. task_done = []
  235. for listid in range(numTasks):
  236. task_done.append(False)
  237. weight.append(0)
  238. deps_left.append(len(self.runq_revdeps[listid]))
  239. for listid in endpoints:
  240. weight[listid] = 1
  241. task_done[listid] = True
  242. while 1:
  243. next_points = []
  244. for listid in endpoints:
  245. for revdep in self.runq_depends[listid]:
  246. weight[revdep] = weight[revdep] + weight[listid]
  247. deps_left[revdep] = deps_left[revdep] - 1
  248. if deps_left[revdep] == 0:
  249. next_points.append(revdep)
  250. task_done[revdep] = True
  251. endpoints = next_points
  252. if len(next_points) == 0:
  253. break
  254. # Circular dependency sanity check
  255. problem_tasks = []
  256. for task in range(numTasks):
  257. if task_done[task] is False or deps_left[task] != 0:
  258. problem_tasks.append(task)
  259. bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task)))
  260. bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task]))
  261. if problem_tasks:
  262. message = "Unbuildable tasks were found.\n"
  263. message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
  264. message = message + "Identifying dependency loops (this may take a short while)...\n"
  265. bb.msg.error(bb.msg.domain.RunQueue, message)
  266. msgs = self.circular_depchains_handler(problem_tasks)
  267. message = "\n"
  268. for msg in msgs:
  269. message = message + msg
  270. bb.msg.fatal(bb.msg.domain.RunQueue, message)
  271. return weight
  272. def prepare_runqueue(self):
  273. """
  274. Turn a set of taskData into a RunQueue and compute data needed
  275. to optimise the execution order.
  276. """
  277. depends = []
  278. runq_build = []
  279. recursive_tdepends = {}
  280. taskData = self.taskData
  281. if len(taskData.tasks_name) == 0:
  282. # Nothing to do
  283. return
  284. bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue")
  285. # Step A - Work out a list of tasks to run
  286. #
  287. # Taskdata gives us a list of possible providers for a every target
  288. # ordered by priority (build_targets, run_targets). It also gives
  289. # information on each of those providers.
  290. #
  291. # To create the actual list of tasks to execute we fix the list of
  292. # providers and then resolve the dependencies into task IDs. This
  293. # process is repeated for each type of dependency (tdepends, deptask,
  294. # rdeptast, recrdeptask, idepends).
  295. for task in range(len(taskData.tasks_name)):
  296. fnid = taskData.tasks_fnid[task]
  297. fn = taskData.fn_index[fnid]
  298. task_deps = self.dataCache.task_deps[fn]
  299. if fnid not in taskData.failed_fnids:
  300. # Resolve task internal dependencies
  301. #
  302. # e.g. addtask before X after Y
  303. depends = taskData.tasks_tdepends[task]
  304. # Resolve 'deptask' dependencies
  305. #
  306. # e.g. do_sometask[deptask] = "do_someothertask"
  307. # (makes sure sometask runs after someothertask of all DEPENDS)
  308. if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
  309. tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
  310. for depid in taskData.depids[fnid]:
  311. # Won't be in build_targets if ASSUME_PROVIDED
  312. if depid in taskData.build_targets:
  313. depdata = taskData.build_targets[depid][0]
  314. if depdata is not None:
  315. dep = taskData.fn_index[depdata]
  316. for taskname in tasknames:
  317. depends.append(taskData.gettask_id(dep, taskname))
  318. # Resolve 'rdeptask' dependencies
  319. #
  320. # e.g. do_sometask[rdeptask] = "do_someothertask"
  321. # (makes sure sometask runs after someothertask of all RDEPENDS)
  322. if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
  323. taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
  324. for depid in taskData.rdepids[fnid]:
  325. if depid in taskData.run_targets:
  326. depdata = taskData.run_targets[depid][0]
  327. if depdata is not None:
  328. dep = taskData.fn_index[depdata]
  329. depends.append(taskData.gettask_id(dep, taskname))
  330. # Resolve inter-task dependencies
  331. #
  332. # e.g. do_sometask[depends] = "targetname:do_someothertask"
  333. # (makes sure sometask runs after targetname's someothertask)
  334. idepends = taskData.tasks_idepends[task]
  335. for (depid, idependtask) in idepends:
  336. if depid in taskData.build_targets:
  337. # Won't be in build_targets if ASSUME_PROVIDED
  338. depdata = taskData.build_targets[depid][0]
  339. if depdata is not None:
  340. dep = taskData.fn_index[depdata]
  341. depends.append(taskData.gettask_id(dep, idependtask))
  342. # Create a list of recursive dependent tasks (from tdepends) and cache
  343. def get_recursive_tdepends(task):
  344. if not task:
  345. return []
  346. if task in recursive_tdepends:
  347. return recursive_tdepends[task]
  348. fnid = taskData.tasks_fnid[task]
  349. taskids = taskData.gettask_ids(fnid)
  350. rectdepends = taskids
  351. nextdeps = taskids
  352. while len(nextdeps) != 0:
  353. newdeps = []
  354. for nextdep in nextdeps:
  355. for tdepend in taskData.tasks_tdepends[nextdep]:
  356. if tdepend not in rectdepends:
  357. rectdepends.append(tdepend)
  358. newdeps.append(tdepend)
  359. nextdeps = newdeps
  360. recursive_tdepends[task] = rectdepends
  361. return rectdepends
  362. # Using the list of tdepends for this task create a list of
  363. # the recursive idepends we have
  364. def get_recursive_idepends(task):
  365. if not task:
  366. return []
  367. rectdepends = get_recursive_tdepends(task)
  368. recidepends = []
  369. for tdepend in rectdepends:
  370. for idepend in taskData.tasks_idepends[tdepend]:
  371. recidepends.append(idepend)
  372. return recidepends
  373. def add_recursive_build(depid, depfnid):
  374. """
  375. Add build depends of depid to depends
  376. (if we've not see it before)
  377. (calls itself recursively)
  378. """
  379. if str(depid) in dep_seen:
  380. return
  381. dep_seen.append(depid)
  382. if depid in taskData.build_targets:
  383. depdata = taskData.build_targets[depid][0]
  384. if depdata is not None:
  385. dep = taskData.fn_index[depdata]
  386. # Need to avoid creating new tasks here
  387. taskid = taskData.gettask_id(dep, taskname, False)
  388. if taskid is not None:
  389. depends.append(taskid)
  390. fnid = taskData.tasks_fnid[taskid]
  391. #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
  392. else:
  393. fnid = taskData.getfn_id(dep)
  394. for nextdepid in taskData.depids[fnid]:
  395. if nextdepid not in dep_seen:
  396. add_recursive_build(nextdepid, fnid)
  397. for nextdepid in taskData.rdepids[fnid]:
  398. if nextdepid not in rdep_seen:
  399. add_recursive_run(nextdepid, fnid)
  400. for (idependid, idependtask) in get_recursive_idepends(taskid):
  401. if idependid not in dep_seen:
  402. add_recursive_build(idependid, fnid)
  403. def add_recursive_run(rdepid, depfnid):
  404. """
  405. Add runtime depends of rdepid to depends
  406. (if we've not see it before)
  407. (calls itself recursively)
  408. """
  409. if str(rdepid) in rdep_seen:
  410. return
  411. rdep_seen.append(rdepid)
  412. if rdepid in taskData.run_targets:
  413. depdata = taskData.run_targets[rdepid][0]
  414. if depdata is not None:
  415. dep = taskData.fn_index[depdata]
  416. # Need to avoid creating new tasks here
  417. taskid = taskData.gettask_id(dep, taskname, False)
  418. if taskid is not None:
  419. depends.append(taskid)
  420. fnid = taskData.tasks_fnid[taskid]
  421. #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
  422. else:
  423. fnid = taskData.getfn_id(dep)
  424. for nextdepid in taskData.depids[fnid]:
  425. if nextdepid not in dep_seen:
  426. add_recursive_build(nextdepid, fnid)
  427. for nextdepid in taskData.rdepids[fnid]:
  428. if nextdepid not in rdep_seen:
  429. add_recursive_run(nextdepid, fnid)
  430. for (idependid, idependtask) in get_recursive_idepends(taskid):
  431. if idependid not in dep_seen:
  432. add_recursive_build(idependid, fnid)
  433. # Resolve recursive 'recrdeptask' dependencies
  434. #
  435. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  436. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  437. if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
  438. for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split():
  439. dep_seen = []
  440. rdep_seen = []
  441. idep_seen = []
  442. for depid in taskData.depids[fnid]:
  443. add_recursive_build(depid, fnid)
  444. for rdepid in taskData.rdepids[fnid]:
  445. add_recursive_run(rdepid, fnid)
  446. deptaskid = taskData.gettask_id(fn, taskname, False)
  447. for (idependid, idependtask) in get_recursive_idepends(deptaskid):
  448. add_recursive_build(idependid, fnid)
  449. # Rmove all self references
  450. if task in depends:
  451. newdep = []
  452. bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
  453. for dep in depends:
  454. if task != dep:
  455. newdep.append(dep)
  456. depends = newdep
  457. self.runq_fnid.append(taskData.tasks_fnid[task])
  458. self.runq_task.append(taskData.tasks_name[task])
  459. self.runq_depends.append(Set(depends))
  460. self.runq_revdeps.append(Set())
  461. runq_build.append(0)
  462. # Step B - Mark all active tasks
  463. #
  464. # Start with the tasks we were asked to run and mark all dependencies
  465. # as active too. If the task is to be 'forced', clear its stamp. Once
  466. # all active tasks are marked, prune the ones we don't need.
  467. bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks")
  468. def mark_active(listid, depth):
  469. """
  470. Mark an item as active along with its depends
  471. (calls itself recursively)
  472. """
  473. if runq_build[listid] == 1:
  474. return
  475. runq_build[listid] = 1
  476. depends = self.runq_depends[listid]
  477. for depend in depends:
  478. mark_active(depend, depth+1)
  479. self.target_pairs = []
  480. for target in self.targets:
  481. targetid = taskData.getbuild_id(target[0])
  482. if targetid not in taskData.build_targets:
  483. continue
  484. if targetid in taskData.failed_deps:
  485. continue
  486. fnid = taskData.build_targets[targetid][0]
  487. fn = taskData.fn_index[fnid]
  488. self.target_pairs.append((fn, target[1]))
  489. # Remove stamps for targets if force mode active
  490. if self.cooker.configuration.force:
  491. bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn))
  492. bb.build.del_stamp(target[1], self.dataCache, fn)
  493. if fnid in taskData.failed_fnids:
  494. continue
  495. if target[1] not in taskData.tasks_lookup[fnid]:
  496. bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0]))
  497. listid = taskData.tasks_lookup[fnid][target[1]]
  498. mark_active(listid, 1)
  499. # Step C - Prune all inactive tasks
  500. #
  501. # Once all active tasks are marked, prune the ones we don't need.
  502. maps = []
  503. delcount = 0
  504. for listid in range(len(self.runq_fnid)):
  505. if runq_build[listid-delcount] == 1:
  506. maps.append(listid-delcount)
  507. else:
  508. del self.runq_fnid[listid-delcount]
  509. del self.runq_task[listid-delcount]
  510. del self.runq_depends[listid-delcount]
  511. del runq_build[listid-delcount]
  512. del self.runq_revdeps[listid-delcount]
  513. delcount = delcount + 1
  514. maps.append(-1)
  515. #
  516. # Step D - Sanity checks and computation
  517. #
  518. # Check to make sure we still have tasks to run
  519. if len(self.runq_fnid) == 0:
  520. if not taskData.abort:
  521. bb.msg.fatal(bb.msg.domain.RunQueue, "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
  522. else:
  523. bb.msg.fatal(bb.msg.domain.RunQueue, "No active tasks and not in --continue mode?! Please report this bug.")
  524. bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid)))
  525. # Remap the dependencies to account for the deleted tasks
  526. # Check we didn't delete a task we depend on
  527. for listid in range(len(self.runq_fnid)):
  528. newdeps = []
  529. origdeps = self.runq_depends[listid]
  530. for origdep in origdeps:
  531. if maps[origdep] == -1:
  532. bb.msg.fatal(bb.msg.domain.RunQueue, "Invalid mapping - Should never happen!")
  533. newdeps.append(maps[origdep])
  534. self.runq_depends[listid] = Set(newdeps)
  535. bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings")
  536. # Generate a list of reverse dependencies to ease future calculations
  537. for listid in range(len(self.runq_fnid)):
  538. for dep in self.runq_depends[listid]:
  539. self.runq_revdeps[dep].add(listid)
  540. # Identify tasks at the end of dependency chains
  541. # Error on circular dependency loops (length two)
  542. endpoints = []
  543. for listid in range(len(self.runq_fnid)):
  544. revdeps = self.runq_revdeps[listid]
  545. if len(revdeps) == 0:
  546. endpoints.append(listid)
  547. for dep in revdeps:
  548. if dep in self.runq_depends[listid]:
  549. #self.dump_data(taskData)
  550. bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
  551. bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
  552. # Calculate task weights
  553. # Check of higher length circular dependencies
  554. self.runq_weight = self.calculate_task_weights(endpoints)
  555. # Decide what order to execute the tasks in, pick a scheduler
  556. #self.sched = RunQueueScheduler(self)
  557. if self.scheduler == "completion":
  558. self.sched = RunQueueSchedulerCompletion(self)
  559. else:
  560. self.sched = RunQueueSchedulerSpeed(self)
  561. # Sanity Check - Check for multiple tasks building the same provider
  562. prov_list = {}
  563. seen_fn = []
  564. for task in range(len(self.runq_fnid)):
  565. fn = taskData.fn_index[self.runq_fnid[task]]
  566. if fn in seen_fn:
  567. continue
  568. seen_fn.append(fn)
  569. for prov in self.dataCache.fn_provides[fn]:
  570. if prov not in prov_list:
  571. prov_list[prov] = [fn]
  572. elif fn not in prov_list[prov]:
  573. prov_list[prov].append(fn)
  574. error = False
  575. for prov in prov_list:
  576. if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
  577. error = True
  578. bb.msg.error(bb.msg.domain.RunQueue, "Multiple .bb files are due to be built which each provide %s (%s).\n This usually means one provides something the other doesn't and should." % (prov, " ".join(prov_list[prov])))
  579. #if error:
  580. # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...")
  581. # Create a whitelist usable by the stamp checks
  582. stampfnwhitelist = []
  583. for entry in self.stampwhitelist.split():
  584. entryid = self.taskData.getbuild_id(entry)
  585. if entryid not in self.taskData.build_targets:
  586. continue
  587. fnid = self.taskData.build_targets[entryid][0]
  588. fn = self.taskData.fn_index[fnid]
  589. stampfnwhitelist.append(fn)
  590. self.stampfnwhitelist = stampfnwhitelist
  591. #self.dump_data(taskData)
  592. def check_stamps(self):
  593. unchecked = {}
  594. current = []
  595. notcurrent = []
  596. buildable = []
  597. if self.stamppolicy == "perfile":
  598. fulldeptree = False
  599. else:
  600. fulldeptree = True
  601. stampwhitelist = []
  602. if self.stamppolicy == "whitelist":
  603. stampwhitelist = self.self.stampfnwhitelist
  604. for task in range(len(self.runq_fnid)):
  605. unchecked[task] = ""
  606. if len(self.runq_depends[task]) == 0:
  607. buildable.append(task)
  608. def check_buildable(self, task, buildable):
  609. for revdep in self.runq_revdeps[task]:
  610. alldeps = 1
  611. for dep in self.runq_depends[revdep]:
  612. if dep in unchecked:
  613. alldeps = 0
  614. if alldeps == 1:
  615. if revdep in unchecked:
  616. buildable.append(revdep)
  617. for task in range(len(self.runq_fnid)):
  618. if task not in unchecked:
  619. continue
  620. fn = self.taskData.fn_index[self.runq_fnid[task]]
  621. taskname = self.runq_task[task]
  622. stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
  623. # If the stamp is missing its not current
  624. if not os.access(stampfile, os.F_OK):
  625. del unchecked[task]
  626. notcurrent.append(task)
  627. check_buildable(self, task, buildable)
  628. continue
  629. # If its a 'nostamp' task, it's not current
  630. taskdep = self.dataCache.task_deps[fn]
  631. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  632. del unchecked[task]
  633. notcurrent.append(task)
  634. check_buildable(self, task, buildable)
  635. continue
  636. while (len(buildable) > 0):
  637. nextbuildable = []
  638. for task in buildable:
  639. if task in unchecked:
  640. fn = self.taskData.fn_index[self.runq_fnid[task]]
  641. taskname = self.runq_task[task]
  642. stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
  643. iscurrent = True
  644. t1 = os.stat(stampfile)[stat.ST_MTIME]
  645. for dep in self.runq_depends[task]:
  646. if iscurrent:
  647. fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
  648. taskname2 = self.runq_task[dep]
  649. stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
  650. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  651. if dep in notcurrent:
  652. iscurrent = False
  653. else:
  654. t2 = os.stat(stampfile2)[stat.ST_MTIME]
  655. if t1 < t2:
  656. iscurrent = False
  657. del unchecked[task]
  658. if iscurrent:
  659. current.append(task)
  660. else:
  661. notcurrent.append(task)
  662. check_buildable(self, task, nextbuildable)
  663. buildable = nextbuildable
  664. #for task in range(len(self.runq_fnid)):
  665. # fn = self.taskData.fn_index[self.runq_fnid[task]]
  666. # taskname = self.runq_task[task]
  667. # print "%s %s.%s" % (task, taskname, fn)
  668. #print "Unchecked: %s" % unchecked
  669. #print "Current: %s" % current
  670. #print "Not current: %s" % notcurrent
  671. if len(unchecked) > 0:
  672. bb.fatal("check_stamps fatal internal error")
  673. return current
  674. def check_stamp_task(self, task):
  675. if self.stamppolicy == "perfile":
  676. fulldeptree = False
  677. else:
  678. fulldeptree = True
  679. stampwhitelist = []
  680. if self.stamppolicy == "whitelist":
  681. stampwhitelist = self.stampfnwhitelist
  682. fn = self.taskData.fn_index[self.runq_fnid[task]]
  683. taskname = self.runq_task[task]
  684. stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
  685. # If the stamp is missing its not current
  686. if not os.access(stampfile, os.F_OK):
  687. bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s not available\n" % stampfile)
  688. return False
  689. # If its a 'nostamp' task, it's not current
  690. taskdep = self.dataCache.task_deps[fn]
  691. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  692. bb.msg.debug(2, bb.msg.domain.RunQueue, "%s.%s is nostamp\n" % (fn, taskname))
  693. return False
  694. iscurrent = True
  695. t1 = os.stat(stampfile)[stat.ST_MTIME]
  696. for dep in self.runq_depends[task]:
  697. if iscurrent:
  698. fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
  699. taskname2 = self.runq_task[dep]
  700. stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
  701. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  702. try:
  703. t2 = os.stat(stampfile2)[stat.ST_MTIME]
  704. if t1 < t2:
  705. bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile,stampfile2))
  706. iscurrent = False
  707. except:
  708. bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 ,stampfile))
  709. iscurrent = False
  710. return iscurrent
  711. def execute_runqueue(self):
  712. """
  713. Run the tasks in a queue prepared by prepare_runqueue
  714. Upon failure, optionally try to recover the build using any alternate providers
  715. (if the abort on failure configuration option isn't set)
  716. """
  717. failures = 0
  718. while 1:
  719. failed_fnids = []
  720. try:
  721. self.execute_runqueue_internal()
  722. finally:
  723. if self.master_process:
  724. failed_fnids = self.finish_runqueue()
  725. if len(failed_fnids) == 0:
  726. return failures
  727. if self.taskData.abort:
  728. raise bb.runqueue.TaskFailure(failed_fnids)
  729. for fnid in failed_fnids:
  730. #print "Failure: %s %s %s" % (fnid, self.taskData.fn_index[fnid], self.runq_task[fnid])
  731. self.taskData.fail_fnid(fnid)
  732. failures = failures + 1
  733. self.reset_runqueue()
  734. self.prepare_runqueue()
  735. def execute_runqueue_initVars(self):
  736. self.stats = RunQueueStats()
  737. self.active_builds = 0
  738. self.runq_buildable = []
  739. self.runq_running = []
  740. self.runq_complete = []
  741. self.build_pids = {}
  742. self.failed_fnids = []
  743. self.master_process = True
  744. # Mark initial buildable tasks
  745. for task in range(len(self.runq_fnid)):
  746. self.runq_running.append(0)
  747. self.runq_complete.append(0)
  748. if len(self.runq_depends[task]) == 0:
  749. self.runq_buildable.append(1)
  750. else:
  751. self.runq_buildable.append(0)
  752. def task_complete(self, task):
  753. """
  754. Mark a task as completed
  755. Look at the reverse dependencies and mark any task with
  756. completed dependencies as buildable
  757. """
  758. self.runq_complete[task] = 1
  759. for revdep in self.runq_revdeps[task]:
  760. if self.runq_running[revdep] == 1:
  761. continue
  762. if self.runq_buildable[revdep] == 1:
  763. continue
  764. alldeps = 1
  765. for dep in self.runq_depends[revdep]:
  766. if self.runq_complete[dep] != 1:
  767. alldeps = 0
  768. if alldeps == 1:
  769. self.runq_buildable[revdep] = 1
  770. fn = self.taskData.fn_index[self.runq_fnid[revdep]]
  771. taskname = self.runq_task[revdep]
  772. bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
  773. def execute_runqueue_internal(self):
  774. """
  775. Run the tasks in a queue prepared by prepare_runqueue
  776. """
  777. bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
  778. self.execute_runqueue_initVars()
  779. if len(self.runq_fnid) == 0:
  780. # nothing to do
  781. return []
  782. def sigint_handler(signum, frame):
  783. raise KeyboardInterrupt
  784. event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgdata))
  785. while True:
  786. task = self.sched.next()
  787. if task is not None:
  788. fn = self.taskData.fn_index[self.runq_fnid[task]]
  789. taskname = self.runq_task[task]
  790. if self.check_stamp_task(task):
  791. bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
  792. self.runq_running[task] = 1
  793. self.task_complete(task)
  794. self.stats.taskCompleted()
  795. self.stats.taskSkipped()
  796. continue
  797. bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task)))
  798. sys.stdout.flush()
  799. sys.stderr.flush()
  800. try:
  801. pid = os.fork()
  802. except OSError, e:
  803. bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
  804. if pid == 0:
  805. # Bypass master process' handling
  806. self.master_process = False
  807. # Stop Ctrl+C being sent to children
  808. # signal.signal(signal.SIGINT, signal.SIG_IGN)
  809. # Make the child the process group leader
  810. os.setpgid(0, 0)
  811. newsi = os.open('/dev/null', os.O_RDWR)
  812. os.dup2(newsi, sys.stdin.fileno())
  813. self.cooker.configuration.cmd = taskname[3:]
  814. bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data)
  815. try:
  816. self.cooker.tryBuild(fn)
  817. except bb.build.EventException:
  818. bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
  819. sys.exit(1)
  820. except:
  821. bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
  822. raise
  823. sys.exit(0)
  824. self.build_pids[pid] = task
  825. self.runq_running[task] = 1
  826. self.active_builds = self.active_builds + 1
  827. if self.active_builds < self.number_tasks:
  828. continue
  829. if self.active_builds > 0:
  830. result = os.waitpid(-1, 0)
  831. self.active_builds = self.active_builds - 1
  832. task = self.build_pids[result[0]]
  833. if result[1] != 0:
  834. del self.build_pids[result[0]]
  835. bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
  836. self.failed_fnids.append(self.runq_fnid[task])
  837. self.stats.taskFailed()
  838. break
  839. self.task_complete(task)
  840. self.stats.taskCompleted()
  841. del self.build_pids[result[0]]
  842. continue
  843. return
  844. def finish_runqueue(self):
  845. try:
  846. while self.active_builds > 0:
  847. bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % self.active_builds)
  848. tasknum = 1
  849. for k, v in self.build_pids.iteritems():
  850. bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v), k))
  851. tasknum = tasknum + 1
  852. result = os.waitpid(-1, 0)
  853. task = self.build_pids[result[0]]
  854. if result[1] != 0:
  855. bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
  856. self.failed_fnids.append(self.runq_fnid[task])
  857. self.stats.taskFailed()
  858. del self.build_pids[result[0]]
  859. self.active_builds = self.active_builds - 1
  860. bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed))
  861. return self.failed_fnids
  862. except KeyboardInterrupt:
  863. bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.active_builds)
  864. for k, v in self.build_pids.iteritems():
  865. try:
  866. os.kill(-k, signal.SIGINT)
  867. except:
  868. pass
  869. raise
  870. # Sanity Checks
  871. for task in range(len(self.runq_fnid)):
  872. if self.runq_buildable[task] == 0:
  873. bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task)
  874. if self.runq_running[task] == 0:
  875. bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task)
  876. if self.runq_complete[task] == 0:
  877. bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
  878. bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed))
  879. return self.failed_fnids
  880. def dump_data(self, taskQueue):
  881. """
  882. Dump some debug information on the internal data structures
  883. """
  884. bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:")
  885. for task in range(len(self.runq_fnid)):
  886. bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
  887. taskQueue.fn_index[self.runq_fnid[task]],
  888. self.runq_task[task],
  889. self.runq_weight[task],
  890. self.runq_depends[task],
  891. self.runq_revdeps[task]))
  892. bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:")
  893. for task1 in range(len(self.runq_fnid)):
  894. if task1 in self.prio_map:
  895. task = self.prio_map[task1]
  896. bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
  897. taskQueue.fn_index[self.runq_fnid[task]],
  898. self.runq_task[task],
  899. self.runq_weight[task],
  900. self.runq_depends[task],
  901. self.runq_revdeps[task]))
  902. def check_stamp_fn(fn, taskname, d):
  903. rq = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", d)
  904. fnid = rq.taskData.getfn_id(fn)
  905. taskid = rq.get_task_id(fnid, taskname)
  906. if taskid is not None:
  907. return rq.check_stamp_task(taskid)
  908. return None