|
@@ -51,6 +51,88 @@ class RunQueueStats:
|
|
|
def taskSkipped(self):
|
|
|
self.skipped = self.skipped + 1
|
|
|
|
|
|
+class RunQueueScheduler:
|
|
|
+ """
|
|
|
+ Control the order tasks are scheduled in.
|
|
|
+ """
|
|
|
+ def __init__(self, runqueue):
|
|
|
+ """
|
|
|
+ The default scheduler just returns the first buildable task (the
|
|
|
+ priority map is sorted by task numer)
|
|
|
+ """
|
|
|
+ self.rq = runqueue
|
|
|
+ numTasks = len(self.rq.runq_fnid)
|
|
|
+
|
|
|
+ self.prio_map = []
|
|
|
+ self.prio_map.extend(range(numTasks))
|
|
|
+
|
|
|
+ def next(self):
|
|
|
+ """
|
|
|
+ Return the id of the first task we find that is buildable
|
|
|
+ """
|
|
|
+ for task1 in range(len(self.rq.runq_fnid)):
|
|
|
+ task = self.prio_map[task1]
|
|
|
+ if self.rq.runq_running[task] == 1:
|
|
|
+ continue
|
|
|
+ if self.rq.runq_buildable[task] == 1:
|
|
|
+ return task
|
|
|
+
|
|
|
+class RunQueueSchedulerSpeed(RunQueueScheduler):
|
|
|
+ """
|
|
|
+ A scheduler optimised for speed. The priority map is sorted by task weight,
|
|
|
+ heavier weighted tasks (tasks needed by the most other tasks) are run first.
|
|
|
+ """
|
|
|
+ def __init__(self, runqueue):
|
|
|
+ """
|
|
|
+ The priority map is sorted by task weight.
|
|
|
+ """
|
|
|
+ from copy import deepcopy
|
|
|
+
|
|
|
+ self.rq = runqueue
|
|
|
+
|
|
|
+ sortweight = deepcopy(self.rq.runq_weight)
|
|
|
+ sortweight.sort()
|
|
|
+ copyweight = deepcopy(self.rq.runq_weight)
|
|
|
+ self.prio_map = []
|
|
|
+
|
|
|
+ for weight in sortweight:
|
|
|
+ idx = copyweight.index(weight)
|
|
|
+ self.prio_map.append(idx)
|
|
|
+ copyweight[idx] = -1
|
|
|
+
|
|
|
+ self.prio_map.reverse()
|
|
|
+
|
|
|
+class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
|
|
|
+ """
|
|
|
+ A scheduler optimised to complete .bb files are quickly as possible. The
|
|
|
+ priority map is sorted by task weight, but then reordered so once a given
|
|
|
+ .bb file starts to build, its completed as quickly as possible. This works
|
|
|
+ well where disk space is at a premium and classes like OE's rm_work are in
|
|
|
+ force.
|
|
|
+ """
|
|
|
+ def __init__(self, runqueue):
|
|
|
+ RunQueueSchedulerSpeed.__init__(self, runqueue)
|
|
|
+ from copy import deepcopy
|
|
|
+
|
|
|
+ #FIXME - whilst this groups all fnids together it does not reorder the
|
|
|
+ #fnid groups optimally.
|
|
|
+
|
|
|
+ basemap = deepcopy(self.prio_map)
|
|
|
+ self.prio_map = []
|
|
|
+ while (len(basemap) > 0):
|
|
|
+ entry = basemap.pop(0)
|
|
|
+ self.prio_map.append(entry)
|
|
|
+ fnid = self.rq.runq_fnid[entry]
|
|
|
+ todel = []
|
|
|
+ for entry in basemap:
|
|
|
+ entry_fnid = self.rq.runq_fnid[entry]
|
|
|
+ if entry_fnid == fnid:
|
|
|
+ todel.append(basemap.index(entry))
|
|
|
+ self.prio_map.append(entry)
|
|
|
+ todel.reverse()
|
|
|
+ for idx in todel:
|
|
|
+ del basemap[idx]
|
|
|
+
|
|
|
class RunQueue:
|
|
|
"""
|
|
|
BitBake Run Queue implementation
|
|
@@ -71,14 +153,158 @@ class RunQueue:
|
|
|
self.runq_task = []
|
|
|
self.runq_depends = []
|
|
|
self.runq_revdeps = []
|
|
|
- self.runq_weight = []
|
|
|
- self.prio_map = []
|
|
|
|
|
|
def get_user_idstring(self, task):
|
|
|
fn = self.taskData.fn_index[self.runq_fnid[task]]
|
|
|
taskname = self.runq_task[task]
|
|
|
return "%s, %s" % (fn, taskname)
|
|
|
|
|
|
+ def circular_depchains_handler(self, tasks):
|
|
|
+ """
|
|
|
+ Some tasks aren't buildable, likely due to circular dependency issues.
|
|
|
+ Identify the circular dependencies and print them in a user readable format.
|
|
|
+ """
|
|
|
+ from copy import deepcopy
|
|
|
+
|
|
|
+ valid_chains = []
|
|
|
+ explored_deps = {}
|
|
|
+ msgs = []
|
|
|
+
|
|
|
+ def chain_reorder(chain):
|
|
|
+ """
|
|
|
+ Reorder a dependency chain so the lowest task id is first
|
|
|
+ """
|
|
|
+ lowest = 0
|
|
|
+ new_chain = []
|
|
|
+ for entry in range(len(chain)):
|
|
|
+ if chain[entry] < chain[lowest]:
|
|
|
+ lowest = entry
|
|
|
+ new_chain.extend(chain[lowest:])
|
|
|
+ new_chain.extend(chain[:lowest])
|
|
|
+ return new_chain
|
|
|
+
|
|
|
+ def chain_compare_equal(chain1, chain2):
|
|
|
+ """
|
|
|
+ Compare two dependency chains and see if they're the same
|
|
|
+ """
|
|
|
+ if len(chain1) != len(chain2):
|
|
|
+ return False
|
|
|
+ for index in range(len(chain1)):
|
|
|
+ if chain1[index] != chain2[index]:
|
|
|
+ return False
|
|
|
+ return True
|
|
|
+
|
|
|
+ def chain_array_contains(chain, chain_array):
|
|
|
+ """
|
|
|
+ Return True if chain_array contains chain
|
|
|
+ """
|
|
|
+ for ch in chain_array:
|
|
|
+ if chain_compare_equal(ch, chain):
|
|
|
+ return True
|
|
|
+ return False
|
|
|
+
|
|
|
+ def find_chains(taskid, prev_chain):
|
|
|
+ prev_chain.append(taskid)
|
|
|
+ total_deps = []
|
|
|
+ total_deps.extend(self.runq_revdeps[taskid])
|
|
|
+ for revdep in self.runq_revdeps[taskid]:
|
|
|
+ if revdep in prev_chain:
|
|
|
+ idx = prev_chain.index(revdep)
|
|
|
+ # To prevent duplicates, reorder the chain to start with the lowest taskid
|
|
|
+ # and search through an array of those we've already printed
|
|
|
+ chain = prev_chain[idx:]
|
|
|
+ new_chain = chain_reorder(chain)
|
|
|
+ if not chain_array_contains(new_chain, valid_chains):
|
|
|
+ valid_chains.append(new_chain)
|
|
|
+ msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
|
|
|
+ for dep in new_chain:
|
|
|
+ msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep]))
|
|
|
+ msgs.append("\n")
|
|
|
+ if len(valid_chains) > 10:
|
|
|
+ msgs.append("Aborted dependency loops search after 10 matches.\n")
|
|
|
+ return msgs
|
|
|
+ continue
|
|
|
+ scan = False
|
|
|
+ if revdep not in explored_deps:
|
|
|
+ scan = True
|
|
|
+ elif revdep in explored_deps[revdep]:
|
|
|
+ scan = True
|
|
|
+ else:
|
|
|
+ for dep in prev_chain:
|
|
|
+ if dep in explored_deps[revdep]:
|
|
|
+ scan = True
|
|
|
+ if scan:
|
|
|
+ find_chains(revdep, deepcopy(prev_chain))
|
|
|
+ for dep in explored_deps[revdep]:
|
|
|
+ if dep not in total_deps:
|
|
|
+ total_deps.append(dep)
|
|
|
+
|
|
|
+ explored_deps[taskid] = total_deps
|
|
|
+
|
|
|
+ for task in tasks:
|
|
|
+ find_chains(task, [])
|
|
|
+
|
|
|
+ return msgs
|
|
|
+
|
|
|
+ def calculate_task_weights(self, endpoints):
|
|
|
+ """
|
|
|
+ Calculate a number representing the "weight" of each task. Heavier weighted tasks
|
|
|
+ have more dependencies and hence should be executed sooner for maximum speed.
|
|
|
+
|
|
|
+ This function also sanity checks the task list finding tasks that its not
|
|
|
+ possible to execute due to circular dependencies.
|
|
|
+ """
|
|
|
+
|
|
|
+ numTasks = len(self.runq_fnid)
|
|
|
+ weight = []
|
|
|
+ deps_left = []
|
|
|
+ task_done = []
|
|
|
+
|
|
|
+ for listid in range(numTasks):
|
|
|
+ task_done.append(False)
|
|
|
+ weight.append(0)
|
|
|
+ deps_left.append(len(self.runq_revdeps[listid]))
|
|
|
+
|
|
|
+ for listid in endpoints:
|
|
|
+ weight[listid] = 1
|
|
|
+ task_done[listid] = True
|
|
|
+
|
|
|
+ while 1:
|
|
|
+ next_points = []
|
|
|
+ for listid in endpoints:
|
|
|
+ for revdep in self.runq_depends[listid]:
|
|
|
+ weight[revdep] = weight[revdep] + weight[listid]
|
|
|
+ deps_left[revdep] = deps_left[revdep] - 1
|
|
|
+ if deps_left[revdep] == 0:
|
|
|
+ next_points.append(revdep)
|
|
|
+ task_done[revdep] = True
|
|
|
+ endpoints = next_points
|
|
|
+ if len(next_points) == 0:
|
|
|
+ break
|
|
|
+
|
|
|
+ # Circular dependency sanity check
|
|
|
+ problem_tasks = []
|
|
|
+ for task in range(numTasks):
|
|
|
+ if task_done[task] is False or deps_left[task] != 0:
|
|
|
+ problem_tasks.append(task)
|
|
|
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task)))
|
|
|
+ bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task]))
|
|
|
+
|
|
|
+ if problem_tasks:
|
|
|
+ message = "Unbuildable tasks were found.\n"
|
|
|
+ message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
|
|
|
+ message = message + "Identifying dependency loops (this may take a short while)...\n"
|
|
|
+ bb.msg.error(bb.msg.domain.RunQueue, message)
|
|
|
+
|
|
|
+ msgs = self.circular_depchains_handler(problem_tasks)
|
|
|
+
|
|
|
+ message = "\n"
|
|
|
+ for msg in msgs:
|
|
|
+ message = message + msg
|
|
|
+ bb.msg.fatal(bb.msg.domain.RunQueue, message)
|
|
|
+
|
|
|
+ return weight
|
|
|
+
|
|
|
def prepare_runqueue(self):
|
|
|
"""
|
|
|
Turn a set of taskData into a RunQueue and compute data needed
|
|
@@ -86,9 +312,7 @@ class RunQueue:
|
|
|
"""
|
|
|
|
|
|
depends = []
|
|
|
- runq_weight1 = []
|
|
|
runq_build = []
|
|
|
- runq_done = []
|
|
|
|
|
|
taskData = self.taskData
|
|
|
|
|
@@ -98,6 +322,17 @@ class RunQueue:
|
|
|
|
|
|
bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue")
|
|
|
|
|
|
+ # Step A - Work out a list of tasks to run
|
|
|
+ #
|
|
|
+ # Taskdata gives us a list of possible providers for a every target
|
|
|
+ # ordered by priority (build_targets, run_targets). It also gives
|
|
|
+ # information on each of those providers.
|
|
|
+ #
|
|
|
+ # To create the actual list of tasks to execute we fix the list of
|
|
|
+ # providers and then resolve the dependencies into task IDs. This
|
|
|
+ # process is repeated for each type of dependency (tdepends, deptask,
|
|
|
+ # rdeptast, recrdeptask, idepends).
|
|
|
+
|
|
|
for task in range(len(taskData.tasks_name)):
|
|
|
fnid = taskData.tasks_fnid[task]
|
|
|
fn = taskData.fn_index[fnid]
|
|
@@ -105,9 +340,15 @@ class RunQueue:
|
|
|
|
|
|
if fnid not in taskData.failed_fnids:
|
|
|
|
|
|
+ # Resolve task internal dependencies
|
|
|
+ #
|
|
|
+ # e.g. addtask before X after Y
|
|
|
depends = taskData.tasks_tdepends[task]
|
|
|
|
|
|
- # Resolve Depends
|
|
|
+ # Resolve 'deptask' dependencies
|
|
|
+ #
|
|
|
+ # e.g. do_sometask[deptask] = "do_someothertask"
|
|
|
+ # (makes sure sometask runs after someothertask of all DEPENDS)
|
|
|
if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
|
|
|
tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
|
|
|
for depid in taskData.depids[fnid]:
|
|
@@ -119,7 +360,10 @@ class RunQueue:
|
|
|
for taskname in tasknames:
|
|
|
depends.append(taskData.gettask_id(dep, taskname))
|
|
|
|
|
|
- # Resolve Runtime Depends
|
|
|
+ # Resolve 'rdeptask' dependencies
|
|
|
+ #
|
|
|
+ # e.g. do_sometask[rdeptask] = "do_someothertask"
|
|
|
+ # (makes sure sometask runs after someothertask of all RDEPENDS)
|
|
|
if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
|
|
|
taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
|
|
|
for depid in taskData.rdepids[fnid]:
|
|
@@ -129,6 +373,10 @@ class RunQueue:
|
|
|
dep = taskData.fn_index[depdata]
|
|
|
depends.append(taskData.gettask_id(dep, taskname))
|
|
|
|
|
|
+ # Resolve inter-task dependencies
|
|
|
+ #
|
|
|
+ # e.g. do_sometask[depends] = "targetname:do_someothertask"
|
|
|
+ # (makes sure sometask runs after targetname's someothertask)
|
|
|
idepends = taskData.tasks_idepends[task]
|
|
|
for idepend in idepends:
|
|
|
depid = int(idepend.split(":")[0])
|
|
@@ -207,9 +455,10 @@ class RunQueue:
|
|
|
if nextdepid not in dep_seen:
|
|
|
add_recursive_build(nextdepid, fnid)
|
|
|
|
|
|
-
|
|
|
- # Resolve Recursive Runtime Depends
|
|
|
- # Also includes all thier build depends, intertask depends and runtime depends
|
|
|
+ # Resolve recursive 'recrdeptask' dependencies
|
|
|
+ #
|
|
|
+ # e.g. do_sometask[recrdeptask] = "do_someothertask"
|
|
|
+ # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
|
|
|
if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
|
|
|
for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split():
|
|
|
dep_seen = []
|
|
@@ -223,7 +472,7 @@ class RunQueue:
|
|
|
depid = int(idepend.split(":")[0])
|
|
|
add_recursive_build(depid, fnid)
|
|
|
|
|
|
- #Prune self references
|
|
|
+ # Rmove all self references
|
|
|
if task in depends:
|
|
|
newdep = []
|
|
|
bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
|
|
@@ -237,11 +486,14 @@ class RunQueue:
|
|
|
self.runq_task.append(taskData.tasks_name[task])
|
|
|
self.runq_depends.append(Set(depends))
|
|
|
self.runq_revdeps.append(Set())
|
|
|
- self.runq_weight.append(0)
|
|
|
|
|
|
- runq_weight1.append(0)
|
|
|
runq_build.append(0)
|
|
|
- runq_done.append(0)
|
|
|
+
|
|
|
+ # Step B - Mark all active tasks
|
|
|
+ #
|
|
|
+ # Start with the tasks we were asked to run and mark all dependencies
|
|
|
+ # as active too. If the task is to be 'forced', clear its stamp. Once
|
|
|
+ # all active tasks are marked, prune the ones we don't need.
|
|
|
|
|
|
bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks")
|
|
|
|
|
@@ -280,11 +532,17 @@ class RunQueue:
|
|
|
if fnid in taskData.failed_fnids:
|
|
|
continue
|
|
|
|
|
|
+ if target[1] not in taskData.tasks_lookup[fnid]:
|
|
|
+ bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0]))
|
|
|
+
|
|
|
listid = taskData.tasks_lookup[fnid][target[1]]
|
|
|
|
|
|
mark_active(listid, 1)
|
|
|
|
|
|
- # Prune inactive tasks
|
|
|
+ # Step C - Prune all inactive tasks
|
|
|
+ #
|
|
|
+ # Once all active tasks are marked, prune the ones we don't need.
|
|
|
+
|
|
|
maps = []
|
|
|
delcount = 0
|
|
|
for listid in range(len(self.runq_fnid)):
|
|
@@ -294,14 +552,16 @@ class RunQueue:
|
|
|
del self.runq_fnid[listid-delcount]
|
|
|
del self.runq_task[listid-delcount]
|
|
|
del self.runq_depends[listid-delcount]
|
|
|
- del self.runq_weight[listid-delcount]
|
|
|
- del runq_weight1[listid-delcount]
|
|
|
del runq_build[listid-delcount]
|
|
|
- del runq_done[listid-delcount]
|
|
|
del self.runq_revdeps[listid-delcount]
|
|
|
delcount = delcount + 1
|
|
|
maps.append(-1)
|
|
|
|
|
|
+ #
|
|
|
+ # Step D - Sanity checks and computation
|
|
|
+ #
|
|
|
+
|
|
|
+ # Check to make sure we still have tasks to run
|
|
|
if len(self.runq_fnid) == 0:
|
|
|
if not taskData.abort:
|
|
|
bb.msg.note(1, bb.msg.domain.RunQueue, "All possible tasks have been run but build incomplete (--continue mode). See errors above for incomplete tasks.")
|
|
@@ -310,6 +570,8 @@ class RunQueue:
|
|
|
|
|
|
bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid)))
|
|
|
|
|
|
+ # Remap the dependencies to account for the deleted tasks
|
|
|
+ # Check we didn't delete a task we depend on
|
|
|
for listid in range(len(self.runq_fnid)):
|
|
|
newdeps = []
|
|
|
origdeps = self.runq_depends[listid]
|
|
@@ -321,62 +583,37 @@ class RunQueue:
|
|
|
|
|
|
bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings")
|
|
|
|
|
|
+ # Generate a list of reverse dependencies to ease future calculations
|
|
|
for listid in range(len(self.runq_fnid)):
|
|
|
for dep in self.runq_depends[listid]:
|
|
|
self.runq_revdeps[dep].add(listid)
|
|
|
|
|
|
+ # Identify tasks at the end of dependency chains
|
|
|
+ # Error on circular dependency loops (length two)
|
|
|
endpoints = []
|
|
|
for listid in range(len(self.runq_fnid)):
|
|
|
revdeps = self.runq_revdeps[listid]
|
|
|
if len(revdeps) == 0:
|
|
|
- runq_done[listid] = 1
|
|
|
- self.runq_weight[listid] = 1
|
|
|
endpoints.append(listid)
|
|
|
for dep in revdeps:
|
|
|
if dep in self.runq_depends[listid]:
|
|
|
#self.dump_data(taskData)
|
|
|
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
|
|
|
- runq_weight1[listid] = len(revdeps)
|
|
|
|
|
|
bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
|
|
|
|
|
|
- while 1:
|
|
|
- next_points = []
|
|
|
- for listid in endpoints:
|
|
|
- for revdep in self.runq_depends[listid]:
|
|
|
- self.runq_weight[revdep] = self.runq_weight[revdep] + self.runq_weight[listid]
|
|
|
- runq_weight1[revdep] = runq_weight1[revdep] - 1
|
|
|
- if runq_weight1[revdep] == 0:
|
|
|
- next_points.append(revdep)
|
|
|
- runq_done[revdep] = 1
|
|
|
- endpoints = next_points
|
|
|
- if len(next_points) == 0:
|
|
|
- break
|
|
|
|
|
|
- # Sanity Checks
|
|
|
- for task in range(len(self.runq_fnid)):
|
|
|
- if runq_done[task] == 0:
|
|
|
- seen = []
|
|
|
- deps_seen = []
|
|
|
- def print_chain(taskid, finish):
|
|
|
- seen.append(taskid)
|
|
|
- for revdep in self.runq_revdeps[taskid]:
|
|
|
- if runq_done[revdep] == 0 and revdep not in seen and not finish:
|
|
|
- bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep), self.runq_depends[revdep]))
|
|
|
- if revdep in deps_seen:
|
|
|
- bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep)))
|
|
|
- finish = True
|
|
|
- return
|
|
|
- for dep in self.runq_depends[revdep]:
|
|
|
- deps_seen.append(dep)
|
|
|
- print_chain(revdep, finish)
|
|
|
- print_chain(task, False)
|
|
|
- bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task)))
|
|
|
- if runq_weight1[task] != 0:
|
|
|
- bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task)))
|
|
|
-
|
|
|
-
|
|
|
- # Check for multiple tasks building the same provider
|
|
|
+ # Calculate task weights
|
|
|
+ # Check of higher length circular dependencies
|
|
|
+ self.runq_weight = self.calculate_task_weights(endpoints)
|
|
|
+
|
|
|
+ # Decide what order to execute the tasks in, pick a scheduler
|
|
|
+ # FIXME - Allow user selection
|
|
|
+ #self.sched = RunQueueScheduler(self)
|
|
|
+ self.sched = RunQueueSchedulerSpeed(self)
|
|
|
+ #self.sched = RunQueueSchedulerCompletion(self)
|
|
|
+
|
|
|
+ # Sanity Check - Check for multiple tasks building the same provider
|
|
|
prov_list = {}
|
|
|
seen_fn = []
|
|
|
for task in range(len(self.runq_fnid)):
|
|
@@ -397,21 +634,6 @@ class RunQueue:
|
|
|
#if error:
|
|
|
# bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...")
|
|
|
|
|
|
-
|
|
|
- # Make a weight sorted map
|
|
|
- from copy import deepcopy
|
|
|
-
|
|
|
- sortweight = deepcopy(self.runq_weight)
|
|
|
- sortweight.sort()
|
|
|
- copyweight = deepcopy(self.runq_weight)
|
|
|
- self.prio_map = []
|
|
|
-
|
|
|
- for weight in sortweight:
|
|
|
- idx = copyweight.index(weight)
|
|
|
- self.prio_map.append(idx)
|
|
|
- copyweight[idx] = -1
|
|
|
- self.prio_map.reverse()
|
|
|
-
|
|
|
#self.dump_data(taskData)
|
|
|
|
|
|
def execute_runqueue(self):
|
|
@@ -483,18 +705,6 @@ class RunQueue:
|
|
|
taskname = self.runq_task[revdep]
|
|
|
bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
|
|
|
|
|
|
- def get_next_task(self):
|
|
|
- """
|
|
|
- Return the id of the highest priority task that is buildable
|
|
|
- """
|
|
|
- for task1 in range(len(self.runq_fnid)):
|
|
|
- task = self.prio_map[task1]
|
|
|
- if self.runq_running[task] == 1:
|
|
|
- continue
|
|
|
- if self.runq_buildable[task] == 1:
|
|
|
- return task
|
|
|
- return None
|
|
|
-
|
|
|
def execute_runqueue_internal(self):
|
|
|
"""
|
|
|
Run the tasks in a queue prepared by prepare_runqueue
|
|
@@ -511,20 +721,21 @@ class RunQueue:
|
|
|
def sigint_handler(signum, frame):
|
|
|
raise KeyboardInterrupt
|
|
|
|
|
|
+ # RP - this code allows tasks to run out of the correct order - disabled, FIXME
|
|
|
# Find any tasks with current stamps and remove them from the queue
|
|
|
- for task1 in range(len(self.runq_fnid)):
|
|
|
- task = self.prio_map[task1]
|
|
|
- fn = self.taskData.fn_index[self.runq_fnid[task]]
|
|
|
- taskname = self.runq_task[task]
|
|
|
- if bb.build.stamp_is_current(taskname, self.dataCache, fn):
|
|
|
- bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
|
|
|
- self.runq_running[task] = 1
|
|
|
- self.task_complete(task)
|
|
|
- self.stats.taskCompleted()
|
|
|
- self.stats.taskSkipped()
|
|
|
+ #for task1 in range(len(self.runq_fnid)):
|
|
|
+ # task = self.prio_map[task1]
|
|
|
+ # fn = self.taskData.fn_index[self.runq_fnid[task]]
|
|
|
+ # taskname = self.runq_task[task]
|
|
|
+ # if bb.build.stamp_is_current(taskname, self.dataCache, fn):
|
|
|
+ # bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
|
|
|
+ # self.runq_running[task] = 1
|
|
|
+ # self.task_complete(task)
|
|
|
+ # self.stats.taskCompleted()
|
|
|
+ # self.stats.taskSkipped()
|
|
|
|
|
|
while True:
|
|
|
- task = self.get_next_task()
|
|
|
+ task = self.sched.next()
|
|
|
if task is not None:
|
|
|
fn = self.taskData.fn_index[self.runq_fnid[task]]
|
|
|
|