1*4882a593Smuzhiyun""" 2*4882a593SmuzhiyunBitBake 'RunQueue' implementation 3*4882a593Smuzhiyun 4*4882a593SmuzhiyunHandles preparation and execution of a queue of tasks 5*4882a593Smuzhiyun""" 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun# Copyright (C) 2006-2007 Richard Purdie 8*4882a593Smuzhiyun# 9*4882a593Smuzhiyun# SPDX-License-Identifier: GPL-2.0-only 10*4882a593Smuzhiyun# 11*4882a593Smuzhiyun 12*4882a593Smuzhiyunimport copy 13*4882a593Smuzhiyunimport os 14*4882a593Smuzhiyunimport sys 15*4882a593Smuzhiyunimport stat 16*4882a593Smuzhiyunimport errno 17*4882a593Smuzhiyunimport logging 18*4882a593Smuzhiyunimport re 19*4882a593Smuzhiyunimport bb 20*4882a593Smuzhiyunfrom bb import msg, event 21*4882a593Smuzhiyunfrom bb import monitordisk 22*4882a593Smuzhiyunimport subprocess 23*4882a593Smuzhiyunimport pickle 24*4882a593Smuzhiyunfrom multiprocessing import Process 25*4882a593Smuzhiyunimport shlex 26*4882a593Smuzhiyunimport pprint 27*4882a593Smuzhiyunimport time 28*4882a593Smuzhiyun 29*4882a593Smuzhiyunbblogger = logging.getLogger("BitBake") 30*4882a593Smuzhiyunlogger = logging.getLogger("BitBake.RunQueue") 31*4882a593Smuzhiyunhashequiv_logger = logging.getLogger("BitBake.RunQueue.HashEquiv") 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' ) 34*4882a593Smuzhiyun 35*4882a593Smuzhiyundef fn_from_tid(tid): 36*4882a593Smuzhiyun return tid.rsplit(":", 1)[0] 37*4882a593Smuzhiyun 38*4882a593Smuzhiyundef taskname_from_tid(tid): 39*4882a593Smuzhiyun return tid.rsplit(":", 1)[1] 40*4882a593Smuzhiyun 41*4882a593Smuzhiyundef mc_from_tid(tid): 42*4882a593Smuzhiyun if tid.startswith('mc:') and tid.count(':') >= 2: 43*4882a593Smuzhiyun return tid.split(':')[1] 44*4882a593Smuzhiyun return "" 45*4882a593Smuzhiyun 46*4882a593Smuzhiyundef split_tid(tid): 47*4882a593Smuzhiyun (mc, fn, taskname, _) = split_tid_mcfn(tid) 48*4882a593Smuzhiyun return (mc, fn, taskname) 49*4882a593Smuzhiyun 50*4882a593Smuzhiyundef split_mc(n): 51*4882a593Smuzhiyun if n.startswith("mc:") and n.count(':') >= 2: 52*4882a593Smuzhiyun _, mc, n = n.split(":", 2) 53*4882a593Smuzhiyun return (mc, n) 54*4882a593Smuzhiyun return ('', n) 55*4882a593Smuzhiyun 56*4882a593Smuzhiyundef split_tid_mcfn(tid): 57*4882a593Smuzhiyun if tid.startswith('mc:') and tid.count(':') >= 2: 58*4882a593Smuzhiyun elems = tid.split(':') 59*4882a593Smuzhiyun mc = elems[1] 60*4882a593Smuzhiyun fn = ":".join(elems[2:-1]) 61*4882a593Smuzhiyun taskname = elems[-1] 62*4882a593Smuzhiyun mcfn = "mc:" + mc + ":" + fn 63*4882a593Smuzhiyun else: 64*4882a593Smuzhiyun tid = tid.rsplit(":", 1) 65*4882a593Smuzhiyun mc = "" 66*4882a593Smuzhiyun fn = tid[0] 67*4882a593Smuzhiyun taskname = tid[1] 68*4882a593Smuzhiyun mcfn = fn 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun return (mc, fn, taskname, mcfn) 71*4882a593Smuzhiyun 72*4882a593Smuzhiyundef build_tid(mc, fn, taskname): 73*4882a593Smuzhiyun if mc: 74*4882a593Smuzhiyun return "mc:" + mc + ":" + fn + ":" + taskname 75*4882a593Smuzhiyun return fn + ":" + taskname 76*4882a593Smuzhiyun 77*4882a593Smuzhiyun# Index used to pair up potentially matching multiconfig tasks 78*4882a593Smuzhiyun# We match on PN, taskname and hash being equal 79*4882a593Smuzhiyundef pending_hash_index(tid, rqdata): 80*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 81*4882a593Smuzhiyun pn = rqdata.dataCaches[mc].pkg_fn[taskfn] 82*4882a593Smuzhiyun h = rqdata.runtaskentries[tid].unihash 83*4882a593Smuzhiyun return pn + ":" + "taskname" + h 84*4882a593Smuzhiyun 85*4882a593Smuzhiyunclass RunQueueStats: 86*4882a593Smuzhiyun """ 87*4882a593Smuzhiyun Holds statistics on the tasks handled by the associated runQueue 88*4882a593Smuzhiyun """ 89*4882a593Smuzhiyun def __init__(self, total, setscene_total): 90*4882a593Smuzhiyun self.completed = 0 91*4882a593Smuzhiyun self.skipped = 0 92*4882a593Smuzhiyun self.failed = 0 93*4882a593Smuzhiyun self.active = 0 94*4882a593Smuzhiyun self.setscene_active = 0 95*4882a593Smuzhiyun self.setscene_covered = 0 96*4882a593Smuzhiyun self.setscene_notcovered = 0 97*4882a593Smuzhiyun self.setscene_total = setscene_total 98*4882a593Smuzhiyun self.total = total 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun def copy(self): 101*4882a593Smuzhiyun obj = self.__class__(self.total, self.setscene_total) 102*4882a593Smuzhiyun obj.__dict__.update(self.__dict__) 103*4882a593Smuzhiyun return obj 104*4882a593Smuzhiyun 105*4882a593Smuzhiyun def taskFailed(self): 106*4882a593Smuzhiyun self.active = self.active - 1 107*4882a593Smuzhiyun self.failed = self.failed + 1 108*4882a593Smuzhiyun 109*4882a593Smuzhiyun def taskCompleted(self): 110*4882a593Smuzhiyun self.active = self.active - 1 111*4882a593Smuzhiyun self.completed = self.completed + 1 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun def taskSkipped(self): 114*4882a593Smuzhiyun self.active = self.active + 1 115*4882a593Smuzhiyun self.skipped = self.skipped + 1 116*4882a593Smuzhiyun 117*4882a593Smuzhiyun def taskActive(self): 118*4882a593Smuzhiyun self.active = self.active + 1 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun def updateCovered(self, covered, notcovered): 121*4882a593Smuzhiyun self.setscene_covered = covered 122*4882a593Smuzhiyun self.setscene_notcovered = notcovered 123*4882a593Smuzhiyun 124*4882a593Smuzhiyun def updateActiveSetscene(self, active): 125*4882a593Smuzhiyun self.setscene_active = active 126*4882a593Smuzhiyun 127*4882a593Smuzhiyun# These values indicate the next step due to be run in the 128*4882a593Smuzhiyun# runQueue state machine 129*4882a593SmuzhiyunrunQueuePrepare = 2 130*4882a593SmuzhiyunrunQueueSceneInit = 3 131*4882a593SmuzhiyunrunQueueRunning = 6 132*4882a593SmuzhiyunrunQueueFailed = 7 133*4882a593SmuzhiyunrunQueueCleanUp = 8 134*4882a593SmuzhiyunrunQueueComplete = 9 135*4882a593Smuzhiyun 136*4882a593Smuzhiyunclass RunQueueScheduler(object): 137*4882a593Smuzhiyun """ 138*4882a593Smuzhiyun Control the order tasks are scheduled in. 139*4882a593Smuzhiyun """ 140*4882a593Smuzhiyun name = "basic" 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun def __init__(self, runqueue, rqdata): 143*4882a593Smuzhiyun """ 144*4882a593Smuzhiyun The default scheduler just returns the first buildable task (the 145*4882a593Smuzhiyun priority map is sorted by task number) 146*4882a593Smuzhiyun """ 147*4882a593Smuzhiyun self.rq = runqueue 148*4882a593Smuzhiyun self.rqdata = rqdata 149*4882a593Smuzhiyun self.numTasks = len(self.rqdata.runtaskentries) 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun self.prio_map = [self.rqdata.runtaskentries.keys()] 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun self.buildable = set() 154*4882a593Smuzhiyun self.skip_maxthread = {} 155*4882a593Smuzhiyun self.stamps = {} 156*4882a593Smuzhiyun for tid in self.rqdata.runtaskentries: 157*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 158*4882a593Smuzhiyun self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) 159*4882a593Smuzhiyun if tid in self.rq.runq_buildable: 160*4882a593Smuzhiyun self.buildable.append(tid) 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun self.rev_prio_map = None 163*4882a593Smuzhiyun self.is_pressure_usable() 164*4882a593Smuzhiyun 165*4882a593Smuzhiyun def is_pressure_usable(self): 166*4882a593Smuzhiyun """ 167*4882a593Smuzhiyun If monitoring pressure, return True if pressure files can be open and read. For example 168*4882a593Smuzhiyun openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported) 169*4882a593Smuzhiyun is returned. 170*4882a593Smuzhiyun """ 171*4882a593Smuzhiyun if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure: 172*4882a593Smuzhiyun try: 173*4882a593Smuzhiyun with open("/proc/pressure/cpu") as cpu_pressure_fds, \ 174*4882a593Smuzhiyun open("/proc/pressure/io") as io_pressure_fds, \ 175*4882a593Smuzhiyun open("/proc/pressure/memory") as memory_pressure_fds: 176*4882a593Smuzhiyun 177*4882a593Smuzhiyun self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] 178*4882a593Smuzhiyun self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] 179*4882a593Smuzhiyun self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] 180*4882a593Smuzhiyun self.prev_pressure_time = time.time() 181*4882a593Smuzhiyun self.check_pressure = True 182*4882a593Smuzhiyun except: 183*4882a593Smuzhiyun bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure") 184*4882a593Smuzhiyun self.check_pressure = False 185*4882a593Smuzhiyun else: 186*4882a593Smuzhiyun self.check_pressure = False 187*4882a593Smuzhiyun 188*4882a593Smuzhiyun def exceeds_max_pressure(self): 189*4882a593Smuzhiyun """ 190*4882a593Smuzhiyun Monitor the difference in total pressure at least once per second, if 191*4882a593Smuzhiyun BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold. 192*4882a593Smuzhiyun """ 193*4882a593Smuzhiyun if self.check_pressure: 194*4882a593Smuzhiyun with open("/proc/pressure/cpu") as cpu_pressure_fds, \ 195*4882a593Smuzhiyun open("/proc/pressure/io") as io_pressure_fds, \ 196*4882a593Smuzhiyun open("/proc/pressure/memory") as memory_pressure_fds: 197*4882a593Smuzhiyun # extract "total" from /proc/pressure/{cpu|io} 198*4882a593Smuzhiyun curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] 199*4882a593Smuzhiyun curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] 200*4882a593Smuzhiyun curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] 201*4882a593Smuzhiyun exceeds_cpu_pressure = self.rq.max_cpu_pressure and (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) > self.rq.max_cpu_pressure 202*4882a593Smuzhiyun exceeds_io_pressure = self.rq.max_io_pressure and (float(curr_io_pressure) - float(self.prev_io_pressure)) > self.rq.max_io_pressure 203*4882a593Smuzhiyun exceeds_memory_pressure = self.rq.max_memory_pressure and (float(curr_memory_pressure) - float(self.prev_memory_pressure)) > self.rq.max_memory_pressure 204*4882a593Smuzhiyun now = time.time() 205*4882a593Smuzhiyun if now - self.prev_pressure_time > 1.0: 206*4882a593Smuzhiyun self.prev_cpu_pressure = curr_cpu_pressure 207*4882a593Smuzhiyun self.prev_io_pressure = curr_io_pressure 208*4882a593Smuzhiyun self.prev_memory_pressure = curr_memory_pressure 209*4882a593Smuzhiyun self.prev_pressure_time = now 210*4882a593Smuzhiyun return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure) 211*4882a593Smuzhiyun return False 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun def next_buildable_task(self): 214*4882a593Smuzhiyun """ 215*4882a593Smuzhiyun Return the id of the first task we find that is buildable 216*4882a593Smuzhiyun """ 217*4882a593Smuzhiyun # Once tasks are running we don't need to worry about them again 218*4882a593Smuzhiyun self.buildable.difference_update(self.rq.runq_running) 219*4882a593Smuzhiyun buildable = set(self.buildable) 220*4882a593Smuzhiyun buildable.difference_update(self.rq.holdoff_tasks) 221*4882a593Smuzhiyun buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered) 222*4882a593Smuzhiyun if not buildable: 223*4882a593Smuzhiyun return None 224*4882a593Smuzhiyun 225*4882a593Smuzhiyun # Bitbake requires that at least one task be active. Only check for pressure if 226*4882a593Smuzhiyun # this is the case, otherwise the pressure limitation could result in no tasks 227*4882a593Smuzhiyun # being active and no new tasks started thereby, at times, breaking the scheduler. 228*4882a593Smuzhiyun if self.rq.stats.active and self.exceeds_max_pressure(): 229*4882a593Smuzhiyun return None 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun # Filter out tasks that have a max number of threads that have been exceeded 232*4882a593Smuzhiyun skip_buildable = {} 233*4882a593Smuzhiyun for running in self.rq.runq_running.difference(self.rq.runq_complete): 234*4882a593Smuzhiyun rtaskname = taskname_from_tid(running) 235*4882a593Smuzhiyun if rtaskname not in self.skip_maxthread: 236*4882a593Smuzhiyun self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads") 237*4882a593Smuzhiyun if not self.skip_maxthread[rtaskname]: 238*4882a593Smuzhiyun continue 239*4882a593Smuzhiyun if rtaskname in skip_buildable: 240*4882a593Smuzhiyun skip_buildable[rtaskname] += 1 241*4882a593Smuzhiyun else: 242*4882a593Smuzhiyun skip_buildable[rtaskname] = 1 243*4882a593Smuzhiyun 244*4882a593Smuzhiyun if len(buildable) == 1: 245*4882a593Smuzhiyun tid = buildable.pop() 246*4882a593Smuzhiyun taskname = taskname_from_tid(tid) 247*4882a593Smuzhiyun if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): 248*4882a593Smuzhiyun return None 249*4882a593Smuzhiyun stamp = self.stamps[tid] 250*4882a593Smuzhiyun if stamp not in self.rq.build_stamps.values(): 251*4882a593Smuzhiyun return tid 252*4882a593Smuzhiyun 253*4882a593Smuzhiyun if not self.rev_prio_map: 254*4882a593Smuzhiyun self.rev_prio_map = {} 255*4882a593Smuzhiyun for tid in self.rqdata.runtaskentries: 256*4882a593Smuzhiyun self.rev_prio_map[tid] = self.prio_map.index(tid) 257*4882a593Smuzhiyun 258*4882a593Smuzhiyun best = None 259*4882a593Smuzhiyun bestprio = None 260*4882a593Smuzhiyun for tid in buildable: 261*4882a593Smuzhiyun taskname = taskname_from_tid(tid) 262*4882a593Smuzhiyun if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): 263*4882a593Smuzhiyun continue 264*4882a593Smuzhiyun prio = self.rev_prio_map[tid] 265*4882a593Smuzhiyun if bestprio is None or bestprio > prio: 266*4882a593Smuzhiyun stamp = self.stamps[tid] 267*4882a593Smuzhiyun if stamp in self.rq.build_stamps.values(): 268*4882a593Smuzhiyun continue 269*4882a593Smuzhiyun bestprio = prio 270*4882a593Smuzhiyun best = tid 271*4882a593Smuzhiyun 272*4882a593Smuzhiyun return best 273*4882a593Smuzhiyun 274*4882a593Smuzhiyun def next(self): 275*4882a593Smuzhiyun """ 276*4882a593Smuzhiyun Return the id of the task we should build next 277*4882a593Smuzhiyun """ 278*4882a593Smuzhiyun if self.rq.can_start_task(): 279*4882a593Smuzhiyun return self.next_buildable_task() 280*4882a593Smuzhiyun 281*4882a593Smuzhiyun def newbuildable(self, task): 282*4882a593Smuzhiyun self.buildable.add(task) 283*4882a593Smuzhiyun 284*4882a593Smuzhiyun def removebuildable(self, task): 285*4882a593Smuzhiyun self.buildable.remove(task) 286*4882a593Smuzhiyun 287*4882a593Smuzhiyun def describe_task(self, taskid): 288*4882a593Smuzhiyun result = 'ID %s' % taskid 289*4882a593Smuzhiyun if self.rev_prio_map: 290*4882a593Smuzhiyun result = result + (' pri %d' % self.rev_prio_map[taskid]) 291*4882a593Smuzhiyun return result 292*4882a593Smuzhiyun 293*4882a593Smuzhiyun def dump_prio(self, comment): 294*4882a593Smuzhiyun bb.debug(3, '%s (most important first):\n%s' % 295*4882a593Smuzhiyun (comment, 296*4882a593Smuzhiyun '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for 297*4882a593Smuzhiyun index, taskid in enumerate(self.prio_map)]))) 298*4882a593Smuzhiyun 299*4882a593Smuzhiyunclass RunQueueSchedulerSpeed(RunQueueScheduler): 300*4882a593Smuzhiyun """ 301*4882a593Smuzhiyun A scheduler optimised for speed. The priority map is sorted by task weight, 302*4882a593Smuzhiyun heavier weighted tasks (tasks needed by the most other tasks) are run first. 303*4882a593Smuzhiyun """ 304*4882a593Smuzhiyun name = "speed" 305*4882a593Smuzhiyun 306*4882a593Smuzhiyun def __init__(self, runqueue, rqdata): 307*4882a593Smuzhiyun """ 308*4882a593Smuzhiyun The priority map is sorted by task weight. 309*4882a593Smuzhiyun """ 310*4882a593Smuzhiyun RunQueueScheduler.__init__(self, runqueue, rqdata) 311*4882a593Smuzhiyun 312*4882a593Smuzhiyun weights = {} 313*4882a593Smuzhiyun for tid in self.rqdata.runtaskentries: 314*4882a593Smuzhiyun weight = self.rqdata.runtaskentries[tid].weight 315*4882a593Smuzhiyun if not weight in weights: 316*4882a593Smuzhiyun weights[weight] = [] 317*4882a593Smuzhiyun weights[weight].append(tid) 318*4882a593Smuzhiyun 319*4882a593Smuzhiyun self.prio_map = [] 320*4882a593Smuzhiyun for weight in sorted(weights): 321*4882a593Smuzhiyun for w in weights[weight]: 322*4882a593Smuzhiyun self.prio_map.append(w) 323*4882a593Smuzhiyun 324*4882a593Smuzhiyun self.prio_map.reverse() 325*4882a593Smuzhiyun 326*4882a593Smuzhiyunclass RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): 327*4882a593Smuzhiyun """ 328*4882a593Smuzhiyun A scheduler optimised to complete .bb files as quickly as possible. The 329*4882a593Smuzhiyun priority map is sorted by task weight, but then reordered so once a given 330*4882a593Smuzhiyun .bb file starts to build, it's completed as quickly as possible by 331*4882a593Smuzhiyun running all tasks related to the same .bb file one after the after. 332*4882a593Smuzhiyun This works well where disk space is at a premium and classes like OE's 333*4882a593Smuzhiyun rm_work are in force. 334*4882a593Smuzhiyun """ 335*4882a593Smuzhiyun name = "completion" 336*4882a593Smuzhiyun 337*4882a593Smuzhiyun def __init__(self, runqueue, rqdata): 338*4882a593Smuzhiyun super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata) 339*4882a593Smuzhiyun 340*4882a593Smuzhiyun # Extract list of tasks for each recipe, with tasks sorted 341*4882a593Smuzhiyun # ascending from "must run first" (typically do_fetch) to 342*4882a593Smuzhiyun # "runs last" (do_build). The speed scheduler prioritizes 343*4882a593Smuzhiyun # tasks that must run first before the ones that run later; 344*4882a593Smuzhiyun # this is what we depend on here. 345*4882a593Smuzhiyun task_lists = {} 346*4882a593Smuzhiyun for taskid in self.prio_map: 347*4882a593Smuzhiyun fn, taskname = taskid.rsplit(':', 1) 348*4882a593Smuzhiyun task_lists.setdefault(fn, []).append(taskname) 349*4882a593Smuzhiyun 350*4882a593Smuzhiyun # Now unify the different task lists. The strategy is that 351*4882a593Smuzhiyun # common tasks get skipped and new ones get inserted after the 352*4882a593Smuzhiyun # preceeding common one(s) as they are found. Because task 353*4882a593Smuzhiyun # lists should differ only by their number of tasks, but not 354*4882a593Smuzhiyun # the ordering of the common tasks, this should result in a 355*4882a593Smuzhiyun # deterministic result that is a superset of the individual 356*4882a593Smuzhiyun # task ordering. 357*4882a593Smuzhiyun all_tasks = [] 358*4882a593Smuzhiyun for recipe, new_tasks in task_lists.items(): 359*4882a593Smuzhiyun index = 0 360*4882a593Smuzhiyun old_task = all_tasks[index] if index < len(all_tasks) else None 361*4882a593Smuzhiyun for new_task in new_tasks: 362*4882a593Smuzhiyun if old_task == new_task: 363*4882a593Smuzhiyun # Common task, skip it. This is the fast-path which 364*4882a593Smuzhiyun # avoids a full search. 365*4882a593Smuzhiyun index += 1 366*4882a593Smuzhiyun old_task = all_tasks[index] if index < len(all_tasks) else None 367*4882a593Smuzhiyun else: 368*4882a593Smuzhiyun try: 369*4882a593Smuzhiyun index = all_tasks.index(new_task) 370*4882a593Smuzhiyun # Already present, just not at the current 371*4882a593Smuzhiyun # place. We re-synchronized by changing the 372*4882a593Smuzhiyun # index so that it matches again. Now 373*4882a593Smuzhiyun # move on to the next existing task. 374*4882a593Smuzhiyun index += 1 375*4882a593Smuzhiyun old_task = all_tasks[index] if index < len(all_tasks) else None 376*4882a593Smuzhiyun except ValueError: 377*4882a593Smuzhiyun # Not present. Insert before old_task, which 378*4882a593Smuzhiyun # remains the same (but gets shifted back). 379*4882a593Smuzhiyun all_tasks.insert(index, new_task) 380*4882a593Smuzhiyun index += 1 381*4882a593Smuzhiyun bb.debug(3, 'merged task list: %s' % all_tasks) 382*4882a593Smuzhiyun 383*4882a593Smuzhiyun # Now reverse the order so that tasks that finish the work on one 384*4882a593Smuzhiyun # recipe are considered more imporant (= come first). The ordering 385*4882a593Smuzhiyun # is now so that do_build is most important. 386*4882a593Smuzhiyun all_tasks.reverse() 387*4882a593Smuzhiyun 388*4882a593Smuzhiyun # Group tasks of the same kind before tasks of less important 389*4882a593Smuzhiyun # kinds at the head of the queue (because earlier = lower 390*4882a593Smuzhiyun # priority number = runs earlier), while preserving the 391*4882a593Smuzhiyun # ordering by recipe. If recipe foo is more important than 392*4882a593Smuzhiyun # bar, then the goal is to work on foo's do_populate_sysroot 393*4882a593Smuzhiyun # before bar's do_populate_sysroot and on the more important 394*4882a593Smuzhiyun # tasks of foo before any of the less important tasks in any 395*4882a593Smuzhiyun # other recipe (if those other recipes are more important than 396*4882a593Smuzhiyun # foo). 397*4882a593Smuzhiyun # 398*4882a593Smuzhiyun # All of this only applies when tasks are runable. Explicit 399*4882a593Smuzhiyun # dependencies still override this ordering by priority. 400*4882a593Smuzhiyun # 401*4882a593Smuzhiyun # Here's an example why this priority re-ordering helps with 402*4882a593Smuzhiyun # minimizing disk usage. Consider a recipe foo with a higher 403*4882a593Smuzhiyun # priority than bar where foo DEPENDS on bar. Then the 404*4882a593Smuzhiyun # implicit rule (from base.bbclass) is that foo's do_configure 405*4882a593Smuzhiyun # depends on bar's do_populate_sysroot. This ensures that 406*4882a593Smuzhiyun # bar's do_populate_sysroot gets done first. Normally the 407*4882a593Smuzhiyun # tasks from foo would continue to run once that is done, and 408*4882a593Smuzhiyun # bar only gets completed and cleaned up later. By ordering 409*4882a593Smuzhiyun # bar's task that depend on bar's do_populate_sysroot before foo's 410*4882a593Smuzhiyun # do_configure, that problem gets avoided. 411*4882a593Smuzhiyun task_index = 0 412*4882a593Smuzhiyun self.dump_prio('original priorities') 413*4882a593Smuzhiyun for task in all_tasks: 414*4882a593Smuzhiyun for index in range(task_index, self.numTasks): 415*4882a593Smuzhiyun taskid = self.prio_map[index] 416*4882a593Smuzhiyun taskname = taskid.rsplit(':', 1)[1] 417*4882a593Smuzhiyun if taskname == task: 418*4882a593Smuzhiyun del self.prio_map[index] 419*4882a593Smuzhiyun self.prio_map.insert(task_index, taskid) 420*4882a593Smuzhiyun task_index += 1 421*4882a593Smuzhiyun self.dump_prio('completion priorities') 422*4882a593Smuzhiyun 423*4882a593Smuzhiyunclass RunTaskEntry(object): 424*4882a593Smuzhiyun def __init__(self): 425*4882a593Smuzhiyun self.depends = set() 426*4882a593Smuzhiyun self.revdeps = set() 427*4882a593Smuzhiyun self.hash = None 428*4882a593Smuzhiyun self.unihash = None 429*4882a593Smuzhiyun self.task = None 430*4882a593Smuzhiyun self.weight = 1 431*4882a593Smuzhiyun 432*4882a593Smuzhiyunclass RunQueueData: 433*4882a593Smuzhiyun """ 434*4882a593Smuzhiyun BitBake Run Queue implementation 435*4882a593Smuzhiyun """ 436*4882a593Smuzhiyun def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets): 437*4882a593Smuzhiyun self.cooker = cooker 438*4882a593Smuzhiyun self.dataCaches = dataCaches 439*4882a593Smuzhiyun self.taskData = taskData 440*4882a593Smuzhiyun self.targets = targets 441*4882a593Smuzhiyun self.rq = rq 442*4882a593Smuzhiyun self.warn_multi_bb = False 443*4882a593Smuzhiyun 444*4882a593Smuzhiyun self.multi_provider_allowed = (cfgData.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split() 445*4882a593Smuzhiyun self.setscene_ignore_tasks = get_setscene_enforce_ignore_tasks(cfgData, targets) 446*4882a593Smuzhiyun self.setscene_ignore_tasks_checked = False 447*4882a593Smuzhiyun self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") 448*4882a593Smuzhiyun self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() 449*4882a593Smuzhiyun 450*4882a593Smuzhiyun self.reset() 451*4882a593Smuzhiyun 452*4882a593Smuzhiyun def reset(self): 453*4882a593Smuzhiyun self.runtaskentries = {} 454*4882a593Smuzhiyun 455*4882a593Smuzhiyun def runq_depends_names(self, ids): 456*4882a593Smuzhiyun import re 457*4882a593Smuzhiyun ret = [] 458*4882a593Smuzhiyun for id in ids: 459*4882a593Smuzhiyun nam = os.path.basename(id) 460*4882a593Smuzhiyun nam = re.sub("_[^,]*,", ",", nam) 461*4882a593Smuzhiyun ret.extend([nam]) 462*4882a593Smuzhiyun return ret 463*4882a593Smuzhiyun 464*4882a593Smuzhiyun def get_task_hash(self, tid): 465*4882a593Smuzhiyun return self.runtaskentries[tid].hash 466*4882a593Smuzhiyun 467*4882a593Smuzhiyun def get_task_unihash(self, tid): 468*4882a593Smuzhiyun return self.runtaskentries[tid].unihash 469*4882a593Smuzhiyun 470*4882a593Smuzhiyun def get_user_idstring(self, tid, task_name_suffix = ""): 471*4882a593Smuzhiyun return tid + task_name_suffix 472*4882a593Smuzhiyun 473*4882a593Smuzhiyun def get_short_user_idstring(self, task, task_name_suffix = ""): 474*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(task) 475*4882a593Smuzhiyun pn = self.dataCaches[mc].pkg_fn[taskfn] 476*4882a593Smuzhiyun taskname = taskname_from_tid(task) + task_name_suffix 477*4882a593Smuzhiyun return "%s:%s" % (pn, taskname) 478*4882a593Smuzhiyun 479*4882a593Smuzhiyun def circular_depchains_handler(self, tasks): 480*4882a593Smuzhiyun """ 481*4882a593Smuzhiyun Some tasks aren't buildable, likely due to circular dependency issues. 482*4882a593Smuzhiyun Identify the circular dependencies and print them in a user readable format. 483*4882a593Smuzhiyun """ 484*4882a593Smuzhiyun from copy import deepcopy 485*4882a593Smuzhiyun 486*4882a593Smuzhiyun valid_chains = [] 487*4882a593Smuzhiyun explored_deps = {} 488*4882a593Smuzhiyun msgs = [] 489*4882a593Smuzhiyun 490*4882a593Smuzhiyun class TooManyLoops(Exception): 491*4882a593Smuzhiyun pass 492*4882a593Smuzhiyun 493*4882a593Smuzhiyun def chain_reorder(chain): 494*4882a593Smuzhiyun """ 495*4882a593Smuzhiyun Reorder a dependency chain so the lowest task id is first 496*4882a593Smuzhiyun """ 497*4882a593Smuzhiyun lowest = 0 498*4882a593Smuzhiyun new_chain = [] 499*4882a593Smuzhiyun for entry in range(len(chain)): 500*4882a593Smuzhiyun if chain[entry] < chain[lowest]: 501*4882a593Smuzhiyun lowest = entry 502*4882a593Smuzhiyun new_chain.extend(chain[lowest:]) 503*4882a593Smuzhiyun new_chain.extend(chain[:lowest]) 504*4882a593Smuzhiyun return new_chain 505*4882a593Smuzhiyun 506*4882a593Smuzhiyun def chain_compare_equal(chain1, chain2): 507*4882a593Smuzhiyun """ 508*4882a593Smuzhiyun Compare two dependency chains and see if they're the same 509*4882a593Smuzhiyun """ 510*4882a593Smuzhiyun if len(chain1) != len(chain2): 511*4882a593Smuzhiyun return False 512*4882a593Smuzhiyun for index in range(len(chain1)): 513*4882a593Smuzhiyun if chain1[index] != chain2[index]: 514*4882a593Smuzhiyun return False 515*4882a593Smuzhiyun return True 516*4882a593Smuzhiyun 517*4882a593Smuzhiyun def chain_array_contains(chain, chain_array): 518*4882a593Smuzhiyun """ 519*4882a593Smuzhiyun Return True if chain_array contains chain 520*4882a593Smuzhiyun """ 521*4882a593Smuzhiyun for ch in chain_array: 522*4882a593Smuzhiyun if chain_compare_equal(ch, chain): 523*4882a593Smuzhiyun return True 524*4882a593Smuzhiyun return False 525*4882a593Smuzhiyun 526*4882a593Smuzhiyun def find_chains(tid, prev_chain): 527*4882a593Smuzhiyun prev_chain.append(tid) 528*4882a593Smuzhiyun total_deps = [] 529*4882a593Smuzhiyun total_deps.extend(self.runtaskentries[tid].revdeps) 530*4882a593Smuzhiyun for revdep in self.runtaskentries[tid].revdeps: 531*4882a593Smuzhiyun if revdep in prev_chain: 532*4882a593Smuzhiyun idx = prev_chain.index(revdep) 533*4882a593Smuzhiyun # To prevent duplicates, reorder the chain to start with the lowest taskid 534*4882a593Smuzhiyun # and search through an array of those we've already printed 535*4882a593Smuzhiyun chain = prev_chain[idx:] 536*4882a593Smuzhiyun new_chain = chain_reorder(chain) 537*4882a593Smuzhiyun if not chain_array_contains(new_chain, valid_chains): 538*4882a593Smuzhiyun valid_chains.append(new_chain) 539*4882a593Smuzhiyun msgs.append("Dependency loop #%d found:\n" % len(valid_chains)) 540*4882a593Smuzhiyun for dep in new_chain: 541*4882a593Smuzhiyun msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends))) 542*4882a593Smuzhiyun msgs.append("\n") 543*4882a593Smuzhiyun if len(valid_chains) > 10: 544*4882a593Smuzhiyun msgs.append("Halted dependency loops search after 10 matches.\n") 545*4882a593Smuzhiyun raise TooManyLoops 546*4882a593Smuzhiyun continue 547*4882a593Smuzhiyun scan = False 548*4882a593Smuzhiyun if revdep not in explored_deps: 549*4882a593Smuzhiyun scan = True 550*4882a593Smuzhiyun elif revdep in explored_deps[revdep]: 551*4882a593Smuzhiyun scan = True 552*4882a593Smuzhiyun else: 553*4882a593Smuzhiyun for dep in prev_chain: 554*4882a593Smuzhiyun if dep in explored_deps[revdep]: 555*4882a593Smuzhiyun scan = True 556*4882a593Smuzhiyun if scan: 557*4882a593Smuzhiyun find_chains(revdep, copy.deepcopy(prev_chain)) 558*4882a593Smuzhiyun for dep in explored_deps[revdep]: 559*4882a593Smuzhiyun if dep not in total_deps: 560*4882a593Smuzhiyun total_deps.append(dep) 561*4882a593Smuzhiyun 562*4882a593Smuzhiyun explored_deps[tid] = total_deps 563*4882a593Smuzhiyun 564*4882a593Smuzhiyun try: 565*4882a593Smuzhiyun for task in tasks: 566*4882a593Smuzhiyun find_chains(task, []) 567*4882a593Smuzhiyun except TooManyLoops: 568*4882a593Smuzhiyun pass 569*4882a593Smuzhiyun 570*4882a593Smuzhiyun return msgs 571*4882a593Smuzhiyun 572*4882a593Smuzhiyun def calculate_task_weights(self, endpoints): 573*4882a593Smuzhiyun """ 574*4882a593Smuzhiyun Calculate a number representing the "weight" of each task. Heavier weighted tasks 575*4882a593Smuzhiyun have more dependencies and hence should be executed sooner for maximum speed. 576*4882a593Smuzhiyun 577*4882a593Smuzhiyun This function also sanity checks the task list finding tasks that are not 578*4882a593Smuzhiyun possible to execute due to circular dependencies. 579*4882a593Smuzhiyun """ 580*4882a593Smuzhiyun 581*4882a593Smuzhiyun numTasks = len(self.runtaskentries) 582*4882a593Smuzhiyun weight = {} 583*4882a593Smuzhiyun deps_left = {} 584*4882a593Smuzhiyun task_done = {} 585*4882a593Smuzhiyun 586*4882a593Smuzhiyun for tid in self.runtaskentries: 587*4882a593Smuzhiyun task_done[tid] = False 588*4882a593Smuzhiyun weight[tid] = 1 589*4882a593Smuzhiyun deps_left[tid] = len(self.runtaskentries[tid].revdeps) 590*4882a593Smuzhiyun 591*4882a593Smuzhiyun for tid in endpoints: 592*4882a593Smuzhiyun weight[tid] = 10 593*4882a593Smuzhiyun task_done[tid] = True 594*4882a593Smuzhiyun 595*4882a593Smuzhiyun while True: 596*4882a593Smuzhiyun next_points = [] 597*4882a593Smuzhiyun for tid in endpoints: 598*4882a593Smuzhiyun for revdep in self.runtaskentries[tid].depends: 599*4882a593Smuzhiyun weight[revdep] = weight[revdep] + weight[tid] 600*4882a593Smuzhiyun deps_left[revdep] = deps_left[revdep] - 1 601*4882a593Smuzhiyun if deps_left[revdep] == 0: 602*4882a593Smuzhiyun next_points.append(revdep) 603*4882a593Smuzhiyun task_done[revdep] = True 604*4882a593Smuzhiyun endpoints = next_points 605*4882a593Smuzhiyun if not next_points: 606*4882a593Smuzhiyun break 607*4882a593Smuzhiyun 608*4882a593Smuzhiyun # Circular dependency sanity check 609*4882a593Smuzhiyun problem_tasks = [] 610*4882a593Smuzhiyun for tid in self.runtaskentries: 611*4882a593Smuzhiyun if task_done[tid] is False or deps_left[tid] != 0: 612*4882a593Smuzhiyun problem_tasks.append(tid) 613*4882a593Smuzhiyun logger.debug2("Task %s is not buildable", tid) 614*4882a593Smuzhiyun logger.debug2("(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid]) 615*4882a593Smuzhiyun self.runtaskentries[tid].weight = weight[tid] 616*4882a593Smuzhiyun 617*4882a593Smuzhiyun if problem_tasks: 618*4882a593Smuzhiyun message = "%s unbuildable tasks were found.\n" % len(problem_tasks) 619*4882a593Smuzhiyun message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n" 620*4882a593Smuzhiyun message = message + "Identifying dependency loops (this may take a short while)...\n" 621*4882a593Smuzhiyun logger.error(message) 622*4882a593Smuzhiyun 623*4882a593Smuzhiyun msgs = self.circular_depchains_handler(problem_tasks) 624*4882a593Smuzhiyun 625*4882a593Smuzhiyun message = "\n" 626*4882a593Smuzhiyun for msg in msgs: 627*4882a593Smuzhiyun message = message + msg 628*4882a593Smuzhiyun bb.msg.fatal("RunQueue", message) 629*4882a593Smuzhiyun 630*4882a593Smuzhiyun return weight 631*4882a593Smuzhiyun 632*4882a593Smuzhiyun def prepare(self): 633*4882a593Smuzhiyun """ 634*4882a593Smuzhiyun Turn a set of taskData into a RunQueue and compute data needed 635*4882a593Smuzhiyun to optimise the execution order. 636*4882a593Smuzhiyun """ 637*4882a593Smuzhiyun 638*4882a593Smuzhiyun runq_build = {} 639*4882a593Smuzhiyun recursivetasks = {} 640*4882a593Smuzhiyun recursiveitasks = {} 641*4882a593Smuzhiyun recursivetasksselfref = set() 642*4882a593Smuzhiyun 643*4882a593Smuzhiyun taskData = self.taskData 644*4882a593Smuzhiyun 645*4882a593Smuzhiyun found = False 646*4882a593Smuzhiyun for mc in self.taskData: 647*4882a593Smuzhiyun if taskData[mc].taskentries: 648*4882a593Smuzhiyun found = True 649*4882a593Smuzhiyun break 650*4882a593Smuzhiyun if not found: 651*4882a593Smuzhiyun # Nothing to do 652*4882a593Smuzhiyun return 0 653*4882a593Smuzhiyun 654*4882a593Smuzhiyun self.init_progress_reporter.start() 655*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 656*4882a593Smuzhiyun 657*4882a593Smuzhiyun # Step A - Work out a list of tasks to run 658*4882a593Smuzhiyun # 659*4882a593Smuzhiyun # Taskdata gives us a list of possible providers for every build and run 660*4882a593Smuzhiyun # target ordered by priority. It also gives information on each of those 661*4882a593Smuzhiyun # providers. 662*4882a593Smuzhiyun # 663*4882a593Smuzhiyun # To create the actual list of tasks to execute we fix the list of 664*4882a593Smuzhiyun # providers and then resolve the dependencies into task IDs. This 665*4882a593Smuzhiyun # process is repeated for each type of dependency (tdepends, deptask, 666*4882a593Smuzhiyun # rdeptast, recrdeptask, idepends). 667*4882a593Smuzhiyun 668*4882a593Smuzhiyun def add_build_dependencies(depids, tasknames, depends, mc): 669*4882a593Smuzhiyun for depname in depids: 670*4882a593Smuzhiyun # Won't be in build_targets if ASSUME_PROVIDED 671*4882a593Smuzhiyun if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]: 672*4882a593Smuzhiyun continue 673*4882a593Smuzhiyun depdata = taskData[mc].build_targets[depname][0] 674*4882a593Smuzhiyun if depdata is None: 675*4882a593Smuzhiyun continue 676*4882a593Smuzhiyun for taskname in tasknames: 677*4882a593Smuzhiyun t = depdata + ":" + taskname 678*4882a593Smuzhiyun if t in taskData[mc].taskentries: 679*4882a593Smuzhiyun depends.add(t) 680*4882a593Smuzhiyun 681*4882a593Smuzhiyun def add_runtime_dependencies(depids, tasknames, depends, mc): 682*4882a593Smuzhiyun for depname in depids: 683*4882a593Smuzhiyun if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]: 684*4882a593Smuzhiyun continue 685*4882a593Smuzhiyun depdata = taskData[mc].run_targets[depname][0] 686*4882a593Smuzhiyun if depdata is None: 687*4882a593Smuzhiyun continue 688*4882a593Smuzhiyun for taskname in tasknames: 689*4882a593Smuzhiyun t = depdata + ":" + taskname 690*4882a593Smuzhiyun if t in taskData[mc].taskentries: 691*4882a593Smuzhiyun depends.add(t) 692*4882a593Smuzhiyun 693*4882a593Smuzhiyun def add_mc_dependencies(mc, tid): 694*4882a593Smuzhiyun mcdeps = taskData[mc].get_mcdepends() 695*4882a593Smuzhiyun for dep in mcdeps: 696*4882a593Smuzhiyun mcdependency = dep.split(':') 697*4882a593Smuzhiyun pn = mcdependency[3] 698*4882a593Smuzhiyun frommc = mcdependency[1] 699*4882a593Smuzhiyun mcdep = mcdependency[2] 700*4882a593Smuzhiyun deptask = mcdependency[4] 701*4882a593Smuzhiyun if mc == frommc: 702*4882a593Smuzhiyun fn = taskData[mcdep].build_targets[pn][0] 703*4882a593Smuzhiyun newdep = '%s:%s' % (fn,deptask) 704*4882a593Smuzhiyun taskData[mc].taskentries[tid].tdepends.append(newdep) 705*4882a593Smuzhiyun 706*4882a593Smuzhiyun for mc in taskData: 707*4882a593Smuzhiyun for tid in taskData[mc].taskentries: 708*4882a593Smuzhiyun 709*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 710*4882a593Smuzhiyun #runtid = build_tid(mc, fn, taskname) 711*4882a593Smuzhiyun 712*4882a593Smuzhiyun #logger.debug2("Processing %s,%s:%s", mc, fn, taskname) 713*4882a593Smuzhiyun 714*4882a593Smuzhiyun depends = set() 715*4882a593Smuzhiyun task_deps = self.dataCaches[mc].task_deps[taskfn] 716*4882a593Smuzhiyun 717*4882a593Smuzhiyun self.runtaskentries[tid] = RunTaskEntry() 718*4882a593Smuzhiyun 719*4882a593Smuzhiyun if fn in taskData[mc].failed_fns: 720*4882a593Smuzhiyun continue 721*4882a593Smuzhiyun 722*4882a593Smuzhiyun # We add multiconfig dependencies before processing internal task deps (tdepends) 723*4882a593Smuzhiyun if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']: 724*4882a593Smuzhiyun add_mc_dependencies(mc, tid) 725*4882a593Smuzhiyun 726*4882a593Smuzhiyun # Resolve task internal dependencies 727*4882a593Smuzhiyun # 728*4882a593Smuzhiyun # e.g. addtask before X after Y 729*4882a593Smuzhiyun for t in taskData[mc].taskentries[tid].tdepends: 730*4882a593Smuzhiyun (depmc, depfn, deptaskname, _) = split_tid_mcfn(t) 731*4882a593Smuzhiyun depends.add(build_tid(depmc, depfn, deptaskname)) 732*4882a593Smuzhiyun 733*4882a593Smuzhiyun # Resolve 'deptask' dependencies 734*4882a593Smuzhiyun # 735*4882a593Smuzhiyun # e.g. do_sometask[deptask] = "do_someothertask" 736*4882a593Smuzhiyun # (makes sure sometask runs after someothertask of all DEPENDS) 737*4882a593Smuzhiyun if 'deptask' in task_deps and taskname in task_deps['deptask']: 738*4882a593Smuzhiyun tasknames = task_deps['deptask'][taskname].split() 739*4882a593Smuzhiyun add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) 740*4882a593Smuzhiyun 741*4882a593Smuzhiyun # Resolve 'rdeptask' dependencies 742*4882a593Smuzhiyun # 743*4882a593Smuzhiyun # e.g. do_sometask[rdeptask] = "do_someothertask" 744*4882a593Smuzhiyun # (makes sure sometask runs after someothertask of all RDEPENDS) 745*4882a593Smuzhiyun if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']: 746*4882a593Smuzhiyun tasknames = task_deps['rdeptask'][taskname].split() 747*4882a593Smuzhiyun add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) 748*4882a593Smuzhiyun 749*4882a593Smuzhiyun # Resolve inter-task dependencies 750*4882a593Smuzhiyun # 751*4882a593Smuzhiyun # e.g. do_sometask[depends] = "targetname:do_someothertask" 752*4882a593Smuzhiyun # (makes sure sometask runs after targetname's someothertask) 753*4882a593Smuzhiyun idepends = taskData[mc].taskentries[tid].idepends 754*4882a593Smuzhiyun for (depname, idependtask) in idepends: 755*4882a593Smuzhiyun if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps: 756*4882a593Smuzhiyun # Won't be in build_targets if ASSUME_PROVIDED 757*4882a593Smuzhiyun depdata = taskData[mc].build_targets[depname][0] 758*4882a593Smuzhiyun if depdata is not None: 759*4882a593Smuzhiyun t = depdata + ":" + idependtask 760*4882a593Smuzhiyun depends.add(t) 761*4882a593Smuzhiyun if t not in taskData[mc].taskentries: 762*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) 763*4882a593Smuzhiyun irdepends = taskData[mc].taskentries[tid].irdepends 764*4882a593Smuzhiyun for (depname, idependtask) in irdepends: 765*4882a593Smuzhiyun if depname in taskData[mc].run_targets: 766*4882a593Smuzhiyun # Won't be in run_targets if ASSUME_PROVIDED 767*4882a593Smuzhiyun if not taskData[mc].run_targets[depname]: 768*4882a593Smuzhiyun continue 769*4882a593Smuzhiyun depdata = taskData[mc].run_targets[depname][0] 770*4882a593Smuzhiyun if depdata is not None: 771*4882a593Smuzhiyun t = depdata + ":" + idependtask 772*4882a593Smuzhiyun depends.add(t) 773*4882a593Smuzhiyun if t not in taskData[mc].taskentries: 774*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) 775*4882a593Smuzhiyun 776*4882a593Smuzhiyun # Resolve recursive 'recrdeptask' dependencies (Part A) 777*4882a593Smuzhiyun # 778*4882a593Smuzhiyun # e.g. do_sometask[recrdeptask] = "do_someothertask" 779*4882a593Smuzhiyun # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) 780*4882a593Smuzhiyun # We cover the recursive part of the dependencies below 781*4882a593Smuzhiyun if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']: 782*4882a593Smuzhiyun tasknames = task_deps['recrdeptask'][taskname].split() 783*4882a593Smuzhiyun recursivetasks[tid] = tasknames 784*4882a593Smuzhiyun add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) 785*4882a593Smuzhiyun add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) 786*4882a593Smuzhiyun if taskname in tasknames: 787*4882a593Smuzhiyun recursivetasksselfref.add(tid) 788*4882a593Smuzhiyun 789*4882a593Smuzhiyun if 'recideptask' in task_deps and taskname in task_deps['recideptask']: 790*4882a593Smuzhiyun recursiveitasks[tid] = [] 791*4882a593Smuzhiyun for t in task_deps['recideptask'][taskname].split(): 792*4882a593Smuzhiyun newdep = build_tid(mc, fn, t) 793*4882a593Smuzhiyun recursiveitasks[tid].append(newdep) 794*4882a593Smuzhiyun 795*4882a593Smuzhiyun self.runtaskentries[tid].depends = depends 796*4882a593Smuzhiyun # Remove all self references 797*4882a593Smuzhiyun self.runtaskentries[tid].depends.discard(tid) 798*4882a593Smuzhiyun 799*4882a593Smuzhiyun #self.dump_data() 800*4882a593Smuzhiyun 801*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 802*4882a593Smuzhiyun 803*4882a593Smuzhiyun # Resolve recursive 'recrdeptask' dependencies (Part B) 804*4882a593Smuzhiyun # 805*4882a593Smuzhiyun # e.g. do_sometask[recrdeptask] = "do_someothertask" 806*4882a593Smuzhiyun # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) 807*4882a593Smuzhiyun # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed 808*4882a593Smuzhiyun 809*4882a593Smuzhiyun # Generating/interating recursive lists of dependencies is painful and potentially slow 810*4882a593Smuzhiyun # Precompute recursive task dependencies here by: 811*4882a593Smuzhiyun # a) create a temp list of reverse dependencies (revdeps) 812*4882a593Smuzhiyun # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0) 813*4882a593Smuzhiyun # c) combine the total list of dependencies in cumulativedeps 814*4882a593Smuzhiyun # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower) 815*4882a593Smuzhiyun 816*4882a593Smuzhiyun 817*4882a593Smuzhiyun revdeps = {} 818*4882a593Smuzhiyun deps = {} 819*4882a593Smuzhiyun cumulativedeps = {} 820*4882a593Smuzhiyun for tid in self.runtaskentries: 821*4882a593Smuzhiyun deps[tid] = set(self.runtaskentries[tid].depends) 822*4882a593Smuzhiyun revdeps[tid] = set() 823*4882a593Smuzhiyun cumulativedeps[tid] = set() 824*4882a593Smuzhiyun # Generate a temp list of reverse dependencies 825*4882a593Smuzhiyun for tid in self.runtaskentries: 826*4882a593Smuzhiyun for dep in self.runtaskentries[tid].depends: 827*4882a593Smuzhiyun revdeps[dep].add(tid) 828*4882a593Smuzhiyun # Find the dependency chain endpoints 829*4882a593Smuzhiyun endpoints = set() 830*4882a593Smuzhiyun for tid in self.runtaskentries: 831*4882a593Smuzhiyun if not deps[tid]: 832*4882a593Smuzhiyun endpoints.add(tid) 833*4882a593Smuzhiyun # Iterate the chains collating dependencies 834*4882a593Smuzhiyun while endpoints: 835*4882a593Smuzhiyun next = set() 836*4882a593Smuzhiyun for tid in endpoints: 837*4882a593Smuzhiyun for dep in revdeps[tid]: 838*4882a593Smuzhiyun cumulativedeps[dep].add(fn_from_tid(tid)) 839*4882a593Smuzhiyun cumulativedeps[dep].update(cumulativedeps[tid]) 840*4882a593Smuzhiyun if tid in deps[dep]: 841*4882a593Smuzhiyun deps[dep].remove(tid) 842*4882a593Smuzhiyun if not deps[dep]: 843*4882a593Smuzhiyun next.add(dep) 844*4882a593Smuzhiyun endpoints = next 845*4882a593Smuzhiyun #for tid in deps: 846*4882a593Smuzhiyun # if deps[tid]: 847*4882a593Smuzhiyun # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid])) 848*4882a593Smuzhiyun 849*4882a593Smuzhiyun # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to 850*4882a593Smuzhiyun # resolve these recursively until we aren't adding any further extra dependencies 851*4882a593Smuzhiyun extradeps = True 852*4882a593Smuzhiyun while extradeps: 853*4882a593Smuzhiyun extradeps = 0 854*4882a593Smuzhiyun for tid in recursivetasks: 855*4882a593Smuzhiyun tasknames = recursivetasks[tid] 856*4882a593Smuzhiyun 857*4882a593Smuzhiyun totaldeps = set(self.runtaskentries[tid].depends) 858*4882a593Smuzhiyun if tid in recursiveitasks: 859*4882a593Smuzhiyun totaldeps.update(recursiveitasks[tid]) 860*4882a593Smuzhiyun for dep in recursiveitasks[tid]: 861*4882a593Smuzhiyun if dep not in self.runtaskentries: 862*4882a593Smuzhiyun continue 863*4882a593Smuzhiyun totaldeps.update(self.runtaskentries[dep].depends) 864*4882a593Smuzhiyun 865*4882a593Smuzhiyun deps = set() 866*4882a593Smuzhiyun for dep in totaldeps: 867*4882a593Smuzhiyun if dep in cumulativedeps: 868*4882a593Smuzhiyun deps.update(cumulativedeps[dep]) 869*4882a593Smuzhiyun 870*4882a593Smuzhiyun for t in deps: 871*4882a593Smuzhiyun for taskname in tasknames: 872*4882a593Smuzhiyun newtid = t + ":" + taskname 873*4882a593Smuzhiyun if newtid == tid: 874*4882a593Smuzhiyun continue 875*4882a593Smuzhiyun if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends: 876*4882a593Smuzhiyun extradeps += 1 877*4882a593Smuzhiyun self.runtaskentries[tid].depends.add(newtid) 878*4882a593Smuzhiyun 879*4882a593Smuzhiyun # Handle recursive tasks which depend upon other recursive tasks 880*4882a593Smuzhiyun deps = set() 881*4882a593Smuzhiyun for dep in self.runtaskentries[tid].depends.intersection(recursivetasks): 882*4882a593Smuzhiyun deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends)) 883*4882a593Smuzhiyun for newtid in deps: 884*4882a593Smuzhiyun for taskname in tasknames: 885*4882a593Smuzhiyun if not newtid.endswith(":" + taskname): 886*4882a593Smuzhiyun continue 887*4882a593Smuzhiyun if newtid in self.runtaskentries: 888*4882a593Smuzhiyun extradeps += 1 889*4882a593Smuzhiyun self.runtaskentries[tid].depends.add(newtid) 890*4882a593Smuzhiyun 891*4882a593Smuzhiyun bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps) 892*4882a593Smuzhiyun 893*4882a593Smuzhiyun # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work 894*4882a593Smuzhiyun for tid in recursivetasksselfref: 895*4882a593Smuzhiyun self.runtaskentries[tid].depends.difference_update(recursivetasksselfref) 896*4882a593Smuzhiyun 897*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 898*4882a593Smuzhiyun 899*4882a593Smuzhiyun #self.dump_data() 900*4882a593Smuzhiyun 901*4882a593Smuzhiyun # Step B - Mark all active tasks 902*4882a593Smuzhiyun # 903*4882a593Smuzhiyun # Start with the tasks we were asked to run and mark all dependencies 904*4882a593Smuzhiyun # as active too. If the task is to be 'forced', clear its stamp. Once 905*4882a593Smuzhiyun # all active tasks are marked, prune the ones we don't need. 906*4882a593Smuzhiyun 907*4882a593Smuzhiyun logger.verbose("Marking Active Tasks") 908*4882a593Smuzhiyun 909*4882a593Smuzhiyun def mark_active(tid, depth): 910*4882a593Smuzhiyun """ 911*4882a593Smuzhiyun Mark an item as active along with its depends 912*4882a593Smuzhiyun (calls itself recursively) 913*4882a593Smuzhiyun """ 914*4882a593Smuzhiyun 915*4882a593Smuzhiyun if tid in runq_build: 916*4882a593Smuzhiyun return 917*4882a593Smuzhiyun 918*4882a593Smuzhiyun runq_build[tid] = 1 919*4882a593Smuzhiyun 920*4882a593Smuzhiyun depends = self.runtaskentries[tid].depends 921*4882a593Smuzhiyun for depend in depends: 922*4882a593Smuzhiyun mark_active(depend, depth+1) 923*4882a593Smuzhiyun 924*4882a593Smuzhiyun def invalidate_task(tid, error_nostamp): 925*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 926*4882a593Smuzhiyun taskdep = self.dataCaches[mc].task_deps[taskfn] 927*4882a593Smuzhiyun if fn + ":" + taskname not in taskData[mc].taskentries: 928*4882a593Smuzhiyun logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname) 929*4882a593Smuzhiyun if 'nostamp' in taskdep and taskname in taskdep['nostamp']: 930*4882a593Smuzhiyun if error_nostamp: 931*4882a593Smuzhiyun bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname) 932*4882a593Smuzhiyun else: 933*4882a593Smuzhiyun bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) 934*4882a593Smuzhiyun else: 935*4882a593Smuzhiyun logger.verbose("Invalidate task %s, %s", taskname, fn) 936*4882a593Smuzhiyun bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn) 937*4882a593Smuzhiyun 938*4882a593Smuzhiyun self.target_tids = [] 939*4882a593Smuzhiyun for (mc, target, task, fn) in self.targets: 940*4882a593Smuzhiyun 941*4882a593Smuzhiyun if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]: 942*4882a593Smuzhiyun continue 943*4882a593Smuzhiyun 944*4882a593Smuzhiyun if target in taskData[mc].failed_deps: 945*4882a593Smuzhiyun continue 946*4882a593Smuzhiyun 947*4882a593Smuzhiyun parents = False 948*4882a593Smuzhiyun if task.endswith('-'): 949*4882a593Smuzhiyun parents = True 950*4882a593Smuzhiyun task = task[:-1] 951*4882a593Smuzhiyun 952*4882a593Smuzhiyun if fn in taskData[mc].failed_fns: 953*4882a593Smuzhiyun continue 954*4882a593Smuzhiyun 955*4882a593Smuzhiyun # fn already has mc prefix 956*4882a593Smuzhiyun tid = fn + ":" + task 957*4882a593Smuzhiyun self.target_tids.append(tid) 958*4882a593Smuzhiyun if tid not in taskData[mc].taskentries: 959*4882a593Smuzhiyun import difflib 960*4882a593Smuzhiyun tasks = [] 961*4882a593Smuzhiyun for x in taskData[mc].taskentries: 962*4882a593Smuzhiyun if x.startswith(fn + ":"): 963*4882a593Smuzhiyun tasks.append(taskname_from_tid(x)) 964*4882a593Smuzhiyun close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7) 965*4882a593Smuzhiyun if close_matches: 966*4882a593Smuzhiyun extra = ". Close matches:\n %s" % "\n ".join(close_matches) 967*4882a593Smuzhiyun else: 968*4882a593Smuzhiyun extra = "" 969*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra)) 970*4882a593Smuzhiyun 971*4882a593Smuzhiyun # For tasks called "XXXX-", ony run their dependencies 972*4882a593Smuzhiyun if parents: 973*4882a593Smuzhiyun for i in self.runtaskentries[tid].depends: 974*4882a593Smuzhiyun mark_active(i, 1) 975*4882a593Smuzhiyun else: 976*4882a593Smuzhiyun mark_active(tid, 1) 977*4882a593Smuzhiyun 978*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 979*4882a593Smuzhiyun 980*4882a593Smuzhiyun # Step C - Prune all inactive tasks 981*4882a593Smuzhiyun # 982*4882a593Smuzhiyun # Once all active tasks are marked, prune the ones we don't need. 983*4882a593Smuzhiyun 984*4882a593Smuzhiyun # Handle --runall 985*4882a593Smuzhiyun if self.cooker.configuration.runall: 986*4882a593Smuzhiyun # re-run the mark_active and then drop unused tasks from new list 987*4882a593Smuzhiyun reduced_tasklist = set(self.runtaskentries.keys()) 988*4882a593Smuzhiyun for tid in list(self.runtaskentries.keys()): 989*4882a593Smuzhiyun if tid not in runq_build: 990*4882a593Smuzhiyun reduced_tasklist.remove(tid) 991*4882a593Smuzhiyun runq_build = {} 992*4882a593Smuzhiyun 993*4882a593Smuzhiyun for task in self.cooker.configuration.runall: 994*4882a593Smuzhiyun if not task.startswith("do_"): 995*4882a593Smuzhiyun task = "do_{0}".format(task) 996*4882a593Smuzhiyun runall_tids = set() 997*4882a593Smuzhiyun for tid in reduced_tasklist: 998*4882a593Smuzhiyun wanttid = "{0}:{1}".format(fn_from_tid(tid), task) 999*4882a593Smuzhiyun if wanttid in self.runtaskentries: 1000*4882a593Smuzhiyun runall_tids.add(wanttid) 1001*4882a593Smuzhiyun 1002*4882a593Smuzhiyun for tid in list(runall_tids): 1003*4882a593Smuzhiyun mark_active(tid, 1) 1004*4882a593Smuzhiyun if self.cooker.configuration.force: 1005*4882a593Smuzhiyun invalidate_task(tid, False) 1006*4882a593Smuzhiyun 1007*4882a593Smuzhiyun delcount = set() 1008*4882a593Smuzhiyun for tid in list(self.runtaskentries.keys()): 1009*4882a593Smuzhiyun if tid not in runq_build: 1010*4882a593Smuzhiyun delcount.add(tid) 1011*4882a593Smuzhiyun del self.runtaskentries[tid] 1012*4882a593Smuzhiyun 1013*4882a593Smuzhiyun if self.cooker.configuration.runall: 1014*4882a593Smuzhiyun if not self.runtaskentries: 1015*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets))) 1016*4882a593Smuzhiyun 1017*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1018*4882a593Smuzhiyun 1019*4882a593Smuzhiyun # Handle runonly 1020*4882a593Smuzhiyun if self.cooker.configuration.runonly: 1021*4882a593Smuzhiyun # re-run the mark_active and then drop unused tasks from new list 1022*4882a593Smuzhiyun runq_build = {} 1023*4882a593Smuzhiyun 1024*4882a593Smuzhiyun for task in self.cooker.configuration.runonly: 1025*4882a593Smuzhiyun if not task.startswith("do_"): 1026*4882a593Smuzhiyun task = "do_{0}".format(task) 1027*4882a593Smuzhiyun runonly_tids = [k for k in self.runtaskentries.keys() if taskname_from_tid(k) == task] 1028*4882a593Smuzhiyun 1029*4882a593Smuzhiyun for tid in runonly_tids: 1030*4882a593Smuzhiyun mark_active(tid, 1) 1031*4882a593Smuzhiyun if self.cooker.configuration.force: 1032*4882a593Smuzhiyun invalidate_task(tid, False) 1033*4882a593Smuzhiyun 1034*4882a593Smuzhiyun for tid in list(self.runtaskentries.keys()): 1035*4882a593Smuzhiyun if tid not in runq_build: 1036*4882a593Smuzhiyun delcount.add(tid) 1037*4882a593Smuzhiyun del self.runtaskentries[tid] 1038*4882a593Smuzhiyun 1039*4882a593Smuzhiyun if not self.runtaskentries: 1040*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets))) 1041*4882a593Smuzhiyun 1042*4882a593Smuzhiyun # 1043*4882a593Smuzhiyun # Step D - Sanity checks and computation 1044*4882a593Smuzhiyun # 1045*4882a593Smuzhiyun 1046*4882a593Smuzhiyun # Check to make sure we still have tasks to run 1047*4882a593Smuzhiyun if not self.runtaskentries: 1048*4882a593Smuzhiyun if not taskData[''].halt: 1049*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") 1050*4882a593Smuzhiyun else: 1051*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") 1052*4882a593Smuzhiyun 1053*4882a593Smuzhiyun logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries)) 1054*4882a593Smuzhiyun 1055*4882a593Smuzhiyun logger.verbose("Assign Weightings") 1056*4882a593Smuzhiyun 1057*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1058*4882a593Smuzhiyun 1059*4882a593Smuzhiyun # Generate a list of reverse dependencies to ease future calculations 1060*4882a593Smuzhiyun for tid in self.runtaskentries: 1061*4882a593Smuzhiyun for dep in self.runtaskentries[tid].depends: 1062*4882a593Smuzhiyun self.runtaskentries[dep].revdeps.add(tid) 1063*4882a593Smuzhiyun 1064*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1065*4882a593Smuzhiyun 1066*4882a593Smuzhiyun # Identify tasks at the end of dependency chains 1067*4882a593Smuzhiyun # Error on circular dependency loops (length two) 1068*4882a593Smuzhiyun endpoints = [] 1069*4882a593Smuzhiyun for tid in self.runtaskentries: 1070*4882a593Smuzhiyun revdeps = self.runtaskentries[tid].revdeps 1071*4882a593Smuzhiyun if not revdeps: 1072*4882a593Smuzhiyun endpoints.append(tid) 1073*4882a593Smuzhiyun for dep in revdeps: 1074*4882a593Smuzhiyun if dep in self.runtaskentries[tid].depends: 1075*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep)) 1076*4882a593Smuzhiyun 1077*4882a593Smuzhiyun 1078*4882a593Smuzhiyun logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) 1079*4882a593Smuzhiyun 1080*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1081*4882a593Smuzhiyun 1082*4882a593Smuzhiyun # Calculate task weights 1083*4882a593Smuzhiyun # Check of higher length circular dependencies 1084*4882a593Smuzhiyun self.runq_weight = self.calculate_task_weights(endpoints) 1085*4882a593Smuzhiyun 1086*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1087*4882a593Smuzhiyun 1088*4882a593Smuzhiyun # Sanity Check - Check for multiple tasks building the same provider 1089*4882a593Smuzhiyun for mc in self.dataCaches: 1090*4882a593Smuzhiyun prov_list = {} 1091*4882a593Smuzhiyun seen_fn = [] 1092*4882a593Smuzhiyun for tid in self.runtaskentries: 1093*4882a593Smuzhiyun (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid) 1094*4882a593Smuzhiyun if taskfn in seen_fn: 1095*4882a593Smuzhiyun continue 1096*4882a593Smuzhiyun if mc != tidmc: 1097*4882a593Smuzhiyun continue 1098*4882a593Smuzhiyun seen_fn.append(taskfn) 1099*4882a593Smuzhiyun for prov in self.dataCaches[mc].fn_provides[taskfn]: 1100*4882a593Smuzhiyun if prov not in prov_list: 1101*4882a593Smuzhiyun prov_list[prov] = [taskfn] 1102*4882a593Smuzhiyun elif taskfn not in prov_list[prov]: 1103*4882a593Smuzhiyun prov_list[prov].append(taskfn) 1104*4882a593Smuzhiyun for prov in prov_list: 1105*4882a593Smuzhiyun if len(prov_list[prov]) < 2: 1106*4882a593Smuzhiyun continue 1107*4882a593Smuzhiyun if prov in self.multi_provider_allowed: 1108*4882a593Smuzhiyun continue 1109*4882a593Smuzhiyun seen_pn = [] 1110*4882a593Smuzhiyun # If two versions of the same PN are being built its fatal, we don't support it. 1111*4882a593Smuzhiyun for fn in prov_list[prov]: 1112*4882a593Smuzhiyun pn = self.dataCaches[mc].pkg_fn[fn] 1113*4882a593Smuzhiyun if pn not in seen_pn: 1114*4882a593Smuzhiyun seen_pn.append(pn) 1115*4882a593Smuzhiyun else: 1116*4882a593Smuzhiyun bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn)) 1117*4882a593Smuzhiyun msgs = ["Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))] 1118*4882a593Smuzhiyun # 1119*4882a593Smuzhiyun # Construct a list of things which uniquely depend on each provider 1120*4882a593Smuzhiyun # since this may help the user figure out which dependency is triggering this warning 1121*4882a593Smuzhiyun # 1122*4882a593Smuzhiyun msgs.append("\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from.") 1123*4882a593Smuzhiyun deplist = {} 1124*4882a593Smuzhiyun commondeps = None 1125*4882a593Smuzhiyun for provfn in prov_list[prov]: 1126*4882a593Smuzhiyun deps = set() 1127*4882a593Smuzhiyun for tid in self.runtaskentries: 1128*4882a593Smuzhiyun fn = fn_from_tid(tid) 1129*4882a593Smuzhiyun if fn != provfn: 1130*4882a593Smuzhiyun continue 1131*4882a593Smuzhiyun for dep in self.runtaskentries[tid].revdeps: 1132*4882a593Smuzhiyun fn = fn_from_tid(dep) 1133*4882a593Smuzhiyun if fn == provfn: 1134*4882a593Smuzhiyun continue 1135*4882a593Smuzhiyun deps.add(dep) 1136*4882a593Smuzhiyun if not commondeps: 1137*4882a593Smuzhiyun commondeps = set(deps) 1138*4882a593Smuzhiyun else: 1139*4882a593Smuzhiyun commondeps &= deps 1140*4882a593Smuzhiyun deplist[provfn] = deps 1141*4882a593Smuzhiyun for provfn in deplist: 1142*4882a593Smuzhiyun msgs.append("\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))) 1143*4882a593Smuzhiyun # 1144*4882a593Smuzhiyun # Construct a list of provides and runtime providers for each recipe 1145*4882a593Smuzhiyun # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC) 1146*4882a593Smuzhiyun # 1147*4882a593Smuzhiyun msgs.append("\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful.") 1148*4882a593Smuzhiyun provide_results = {} 1149*4882a593Smuzhiyun rprovide_results = {} 1150*4882a593Smuzhiyun commonprovs = None 1151*4882a593Smuzhiyun commonrprovs = None 1152*4882a593Smuzhiyun for provfn in prov_list[prov]: 1153*4882a593Smuzhiyun provides = set(self.dataCaches[mc].fn_provides[provfn]) 1154*4882a593Smuzhiyun rprovides = set() 1155*4882a593Smuzhiyun for rprovide in self.dataCaches[mc].rproviders: 1156*4882a593Smuzhiyun if provfn in self.dataCaches[mc].rproviders[rprovide]: 1157*4882a593Smuzhiyun rprovides.add(rprovide) 1158*4882a593Smuzhiyun for package in self.dataCaches[mc].packages: 1159*4882a593Smuzhiyun if provfn in self.dataCaches[mc].packages[package]: 1160*4882a593Smuzhiyun rprovides.add(package) 1161*4882a593Smuzhiyun for package in self.dataCaches[mc].packages_dynamic: 1162*4882a593Smuzhiyun if provfn in self.dataCaches[mc].packages_dynamic[package]: 1163*4882a593Smuzhiyun rprovides.add(package) 1164*4882a593Smuzhiyun if not commonprovs: 1165*4882a593Smuzhiyun commonprovs = set(provides) 1166*4882a593Smuzhiyun else: 1167*4882a593Smuzhiyun commonprovs &= provides 1168*4882a593Smuzhiyun provide_results[provfn] = provides 1169*4882a593Smuzhiyun if not commonrprovs: 1170*4882a593Smuzhiyun commonrprovs = set(rprovides) 1171*4882a593Smuzhiyun else: 1172*4882a593Smuzhiyun commonrprovs &= rprovides 1173*4882a593Smuzhiyun rprovide_results[provfn] = rprovides 1174*4882a593Smuzhiyun #msgs.append("\nCommon provides:\n %s" % ("\n ".join(commonprovs))) 1175*4882a593Smuzhiyun #msgs.append("\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))) 1176*4882a593Smuzhiyun for provfn in prov_list[prov]: 1177*4882a593Smuzhiyun msgs.append("\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))) 1178*4882a593Smuzhiyun msgs.append("\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))) 1179*4882a593Smuzhiyun 1180*4882a593Smuzhiyun if self.warn_multi_bb: 1181*4882a593Smuzhiyun logger.verbnote("".join(msgs)) 1182*4882a593Smuzhiyun else: 1183*4882a593Smuzhiyun logger.error("".join(msgs)) 1184*4882a593Smuzhiyun 1185*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1186*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1187*4882a593Smuzhiyun 1188*4882a593Smuzhiyun # Iterate over the task list looking for tasks with a 'setscene' function 1189*4882a593Smuzhiyun self.runq_setscene_tids = set() 1190*4882a593Smuzhiyun if not self.cooker.configuration.nosetscene: 1191*4882a593Smuzhiyun for tid in self.runtaskentries: 1192*4882a593Smuzhiyun (mc, fn, taskname, _) = split_tid_mcfn(tid) 1193*4882a593Smuzhiyun setscenetid = tid + "_setscene" 1194*4882a593Smuzhiyun if setscenetid not in taskData[mc].taskentries: 1195*4882a593Smuzhiyun continue 1196*4882a593Smuzhiyun self.runq_setscene_tids.add(tid) 1197*4882a593Smuzhiyun 1198*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1199*4882a593Smuzhiyun 1200*4882a593Smuzhiyun # Invalidate task if force mode active 1201*4882a593Smuzhiyun if self.cooker.configuration.force: 1202*4882a593Smuzhiyun for tid in self.target_tids: 1203*4882a593Smuzhiyun invalidate_task(tid, False) 1204*4882a593Smuzhiyun 1205*4882a593Smuzhiyun # Invalidate task if invalidate mode active 1206*4882a593Smuzhiyun if self.cooker.configuration.invalidate_stamp: 1207*4882a593Smuzhiyun for tid in self.target_tids: 1208*4882a593Smuzhiyun fn = fn_from_tid(tid) 1209*4882a593Smuzhiyun for st in self.cooker.configuration.invalidate_stamp.split(','): 1210*4882a593Smuzhiyun if not st.startswith("do_"): 1211*4882a593Smuzhiyun st = "do_%s" % st 1212*4882a593Smuzhiyun invalidate_task(fn + ":" + st, True) 1213*4882a593Smuzhiyun 1214*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1215*4882a593Smuzhiyun 1216*4882a593Smuzhiyun # Create and print to the logs a virtual/xxxx -> PN (fn) table 1217*4882a593Smuzhiyun for mc in taskData: 1218*4882a593Smuzhiyun virtmap = taskData[mc].get_providermap(prefix="virtual/") 1219*4882a593Smuzhiyun virtpnmap = {} 1220*4882a593Smuzhiyun for v in virtmap: 1221*4882a593Smuzhiyun virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]] 1222*4882a593Smuzhiyun bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v])) 1223*4882a593Smuzhiyun if hasattr(bb.parse.siggen, "tasks_resolved"): 1224*4882a593Smuzhiyun bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) 1225*4882a593Smuzhiyun 1226*4882a593Smuzhiyun self.init_progress_reporter.next_stage() 1227*4882a593Smuzhiyun 1228*4882a593Smuzhiyun bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids) 1229*4882a593Smuzhiyun 1230*4882a593Smuzhiyun # Iterate over the task list and call into the siggen code 1231*4882a593Smuzhiyun dealtwith = set() 1232*4882a593Smuzhiyun todeal = set(self.runtaskentries) 1233*4882a593Smuzhiyun while todeal: 1234*4882a593Smuzhiyun for tid in todeal.copy(): 1235*4882a593Smuzhiyun if not (self.runtaskentries[tid].depends - dealtwith): 1236*4882a593Smuzhiyun dealtwith.add(tid) 1237*4882a593Smuzhiyun todeal.remove(tid) 1238*4882a593Smuzhiyun self.prepare_task_hash(tid) 1239*4882a593Smuzhiyun 1240*4882a593Smuzhiyun bb.parse.siggen.writeout_file_checksum_cache() 1241*4882a593Smuzhiyun 1242*4882a593Smuzhiyun #self.dump_data() 1243*4882a593Smuzhiyun return len(self.runtaskentries) 1244*4882a593Smuzhiyun 1245*4882a593Smuzhiyun def prepare_task_hash(self, tid): 1246*4882a593Smuzhiyun dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid)) 1247*4882a593Smuzhiyun bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc) 1248*4882a593Smuzhiyun self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, dc) 1249*4882a593Smuzhiyun self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid) 1250*4882a593Smuzhiyun 1251*4882a593Smuzhiyun def dump_data(self): 1252*4882a593Smuzhiyun """ 1253*4882a593Smuzhiyun Dump some debug information on the internal data structures 1254*4882a593Smuzhiyun """ 1255*4882a593Smuzhiyun logger.debug3("run_tasks:") 1256*4882a593Smuzhiyun for tid in self.runtaskentries: 1257*4882a593Smuzhiyun logger.debug3(" %s: %s Deps %s RevDeps %s", tid, 1258*4882a593Smuzhiyun self.runtaskentries[tid].weight, 1259*4882a593Smuzhiyun self.runtaskentries[tid].depends, 1260*4882a593Smuzhiyun self.runtaskentries[tid].revdeps) 1261*4882a593Smuzhiyun 1262*4882a593Smuzhiyunclass RunQueueWorker(): 1263*4882a593Smuzhiyun def __init__(self, process, pipe): 1264*4882a593Smuzhiyun self.process = process 1265*4882a593Smuzhiyun self.pipe = pipe 1266*4882a593Smuzhiyun 1267*4882a593Smuzhiyunclass RunQueue: 1268*4882a593Smuzhiyun def __init__(self, cooker, cfgData, dataCaches, taskData, targets): 1269*4882a593Smuzhiyun 1270*4882a593Smuzhiyun self.cooker = cooker 1271*4882a593Smuzhiyun self.cfgData = cfgData 1272*4882a593Smuzhiyun self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) 1273*4882a593Smuzhiyun 1274*4882a593Smuzhiyun self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None 1275*4882a593Smuzhiyun self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None 1276*4882a593Smuzhiyun 1277*4882a593Smuzhiyun self.state = runQueuePrepare 1278*4882a593Smuzhiyun 1279*4882a593Smuzhiyun # For disk space monitor 1280*4882a593Smuzhiyun # Invoked at regular time intervals via the bitbake heartbeat event 1281*4882a593Smuzhiyun # while the build is running. We generate a unique name for the handler 1282*4882a593Smuzhiyun # here, just in case that there ever is more than one RunQueue instance, 1283*4882a593Smuzhiyun # start the handler when reaching runQueueSceneInit, and stop it when 1284*4882a593Smuzhiyun # done with the build. 1285*4882a593Smuzhiyun self.dm = monitordisk.diskMonitor(cfgData) 1286*4882a593Smuzhiyun self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self)) 1287*4882a593Smuzhiyun self.dm_event_handler_registered = False 1288*4882a593Smuzhiyun self.rqexe = None 1289*4882a593Smuzhiyun self.worker = {} 1290*4882a593Smuzhiyun self.fakeworker = {} 1291*4882a593Smuzhiyun 1292*4882a593Smuzhiyun def _start_worker(self, mc, fakeroot = False, rqexec = None): 1293*4882a593Smuzhiyun logger.debug("Starting bitbake-worker") 1294*4882a593Smuzhiyun magic = "decafbad" 1295*4882a593Smuzhiyun if self.cooker.configuration.profile: 1296*4882a593Smuzhiyun magic = "decafbadbad" 1297*4882a593Smuzhiyun fakerootlogs = None 1298*4882a593Smuzhiyun if fakeroot: 1299*4882a593Smuzhiyun magic = magic + "beef" 1300*4882a593Smuzhiyun mcdata = self.cooker.databuilder.mcdata[mc] 1301*4882a593Smuzhiyun fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD")) 1302*4882a593Smuzhiyun fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split() 1303*4882a593Smuzhiyun env = os.environ.copy() 1304*4882a593Smuzhiyun for key, value in (var.split('=') for var in fakerootenv): 1305*4882a593Smuzhiyun env[key] = value 1306*4882a593Smuzhiyun worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env) 1307*4882a593Smuzhiyun fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs 1308*4882a593Smuzhiyun else: 1309*4882a593Smuzhiyun worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE) 1310*4882a593Smuzhiyun bb.utils.nonblockingfd(worker.stdout) 1311*4882a593Smuzhiyun workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs) 1312*4882a593Smuzhiyun 1313*4882a593Smuzhiyun workerdata = { 1314*4882a593Smuzhiyun "taskdeps" : self.rqdata.dataCaches[mc].task_deps, 1315*4882a593Smuzhiyun "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv, 1316*4882a593Smuzhiyun "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs, 1317*4882a593Smuzhiyun "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv, 1318*4882a593Smuzhiyun "sigdata" : bb.parse.siggen.get_taskdata(), 1319*4882a593Smuzhiyun "logdefaultlevel" : bb.msg.loggerDefaultLogLevel, 1320*4882a593Smuzhiyun "build_verbose_shell" : self.cooker.configuration.build_verbose_shell, 1321*4882a593Smuzhiyun "build_verbose_stdout" : self.cooker.configuration.build_verbose_stdout, 1322*4882a593Smuzhiyun "logdefaultdomain" : bb.msg.loggerDefaultDomains, 1323*4882a593Smuzhiyun "prhost" : self.cooker.prhost, 1324*4882a593Smuzhiyun "buildname" : self.cfgData.getVar("BUILDNAME"), 1325*4882a593Smuzhiyun "date" : self.cfgData.getVar("DATE"), 1326*4882a593Smuzhiyun "time" : self.cfgData.getVar("TIME"), 1327*4882a593Smuzhiyun "hashservaddr" : self.cooker.hashservaddr, 1328*4882a593Smuzhiyun "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"), 1329*4882a593Smuzhiyun } 1330*4882a593Smuzhiyun 1331*4882a593Smuzhiyun worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>") 1332*4882a593Smuzhiyun worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>") 1333*4882a593Smuzhiyun worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>") 1334*4882a593Smuzhiyun worker.stdin.flush() 1335*4882a593Smuzhiyun 1336*4882a593Smuzhiyun return RunQueueWorker(worker, workerpipe) 1337*4882a593Smuzhiyun 1338*4882a593Smuzhiyun def _teardown_worker(self, worker): 1339*4882a593Smuzhiyun if not worker: 1340*4882a593Smuzhiyun return 1341*4882a593Smuzhiyun logger.debug("Teardown for bitbake-worker") 1342*4882a593Smuzhiyun try: 1343*4882a593Smuzhiyun worker.process.stdin.write(b"<quit></quit>") 1344*4882a593Smuzhiyun worker.process.stdin.flush() 1345*4882a593Smuzhiyun worker.process.stdin.close() 1346*4882a593Smuzhiyun except IOError: 1347*4882a593Smuzhiyun pass 1348*4882a593Smuzhiyun while worker.process.returncode is None: 1349*4882a593Smuzhiyun worker.pipe.read() 1350*4882a593Smuzhiyun worker.process.poll() 1351*4882a593Smuzhiyun while worker.pipe.read(): 1352*4882a593Smuzhiyun continue 1353*4882a593Smuzhiyun worker.pipe.close() 1354*4882a593Smuzhiyun 1355*4882a593Smuzhiyun def start_worker(self): 1356*4882a593Smuzhiyun if self.worker: 1357*4882a593Smuzhiyun self.teardown_workers() 1358*4882a593Smuzhiyun self.teardown = False 1359*4882a593Smuzhiyun for mc in self.rqdata.dataCaches: 1360*4882a593Smuzhiyun self.worker[mc] = self._start_worker(mc) 1361*4882a593Smuzhiyun 1362*4882a593Smuzhiyun def start_fakeworker(self, rqexec, mc): 1363*4882a593Smuzhiyun if not mc in self.fakeworker: 1364*4882a593Smuzhiyun self.fakeworker[mc] = self._start_worker(mc, True, rqexec) 1365*4882a593Smuzhiyun 1366*4882a593Smuzhiyun def teardown_workers(self): 1367*4882a593Smuzhiyun self.teardown = True 1368*4882a593Smuzhiyun for mc in self.worker: 1369*4882a593Smuzhiyun self._teardown_worker(self.worker[mc]) 1370*4882a593Smuzhiyun self.worker = {} 1371*4882a593Smuzhiyun for mc in self.fakeworker: 1372*4882a593Smuzhiyun self._teardown_worker(self.fakeworker[mc]) 1373*4882a593Smuzhiyun self.fakeworker = {} 1374*4882a593Smuzhiyun 1375*4882a593Smuzhiyun def read_workers(self): 1376*4882a593Smuzhiyun for mc in self.worker: 1377*4882a593Smuzhiyun self.worker[mc].pipe.read() 1378*4882a593Smuzhiyun for mc in self.fakeworker: 1379*4882a593Smuzhiyun self.fakeworker[mc].pipe.read() 1380*4882a593Smuzhiyun 1381*4882a593Smuzhiyun def active_fds(self): 1382*4882a593Smuzhiyun fds = [] 1383*4882a593Smuzhiyun for mc in self.worker: 1384*4882a593Smuzhiyun fds.append(self.worker[mc].pipe.input) 1385*4882a593Smuzhiyun for mc in self.fakeworker: 1386*4882a593Smuzhiyun fds.append(self.fakeworker[mc].pipe.input) 1387*4882a593Smuzhiyun return fds 1388*4882a593Smuzhiyun 1389*4882a593Smuzhiyun def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None): 1390*4882a593Smuzhiyun def get_timestamp(f): 1391*4882a593Smuzhiyun try: 1392*4882a593Smuzhiyun if not os.access(f, os.F_OK): 1393*4882a593Smuzhiyun return None 1394*4882a593Smuzhiyun return os.stat(f)[stat.ST_MTIME] 1395*4882a593Smuzhiyun except: 1396*4882a593Smuzhiyun return None 1397*4882a593Smuzhiyun 1398*4882a593Smuzhiyun (mc, fn, tn, taskfn) = split_tid_mcfn(tid) 1399*4882a593Smuzhiyun if taskname is None: 1400*4882a593Smuzhiyun taskname = tn 1401*4882a593Smuzhiyun 1402*4882a593Smuzhiyun stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) 1403*4882a593Smuzhiyun 1404*4882a593Smuzhiyun # If the stamp is missing, it's not current 1405*4882a593Smuzhiyun if not os.access(stampfile, os.F_OK): 1406*4882a593Smuzhiyun logger.debug2("Stampfile %s not available", stampfile) 1407*4882a593Smuzhiyun return False 1408*4882a593Smuzhiyun # If it's a 'nostamp' task, it's not current 1409*4882a593Smuzhiyun taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] 1410*4882a593Smuzhiyun if 'nostamp' in taskdep and taskname in taskdep['nostamp']: 1411*4882a593Smuzhiyun logger.debug2("%s.%s is nostamp\n", fn, taskname) 1412*4882a593Smuzhiyun return False 1413*4882a593Smuzhiyun 1414*4882a593Smuzhiyun if taskname != "do_setscene" and taskname.endswith("_setscene"): 1415*4882a593Smuzhiyun return True 1416*4882a593Smuzhiyun 1417*4882a593Smuzhiyun if cache is None: 1418*4882a593Smuzhiyun cache = {} 1419*4882a593Smuzhiyun 1420*4882a593Smuzhiyun iscurrent = True 1421*4882a593Smuzhiyun t1 = get_timestamp(stampfile) 1422*4882a593Smuzhiyun for dep in self.rqdata.runtaskentries[tid].depends: 1423*4882a593Smuzhiyun if iscurrent: 1424*4882a593Smuzhiyun (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep) 1425*4882a593Smuzhiyun stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2) 1426*4882a593Smuzhiyun stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2) 1427*4882a593Smuzhiyun t2 = get_timestamp(stampfile2) 1428*4882a593Smuzhiyun t3 = get_timestamp(stampfile3) 1429*4882a593Smuzhiyun if t3 and not t2: 1430*4882a593Smuzhiyun continue 1431*4882a593Smuzhiyun if t3 and t3 > t2: 1432*4882a593Smuzhiyun continue 1433*4882a593Smuzhiyun if fn == fn2: 1434*4882a593Smuzhiyun if not t2: 1435*4882a593Smuzhiyun logger.debug2('Stampfile %s does not exist', stampfile2) 1436*4882a593Smuzhiyun iscurrent = False 1437*4882a593Smuzhiyun break 1438*4882a593Smuzhiyun if t1 < t2: 1439*4882a593Smuzhiyun logger.debug2('Stampfile %s < %s', stampfile, stampfile2) 1440*4882a593Smuzhiyun iscurrent = False 1441*4882a593Smuzhiyun break 1442*4882a593Smuzhiyun if recurse and iscurrent: 1443*4882a593Smuzhiyun if dep in cache: 1444*4882a593Smuzhiyun iscurrent = cache[dep] 1445*4882a593Smuzhiyun if not iscurrent: 1446*4882a593Smuzhiyun logger.debug2('Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2)) 1447*4882a593Smuzhiyun else: 1448*4882a593Smuzhiyun iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache) 1449*4882a593Smuzhiyun cache[dep] = iscurrent 1450*4882a593Smuzhiyun if recurse: 1451*4882a593Smuzhiyun cache[tid] = iscurrent 1452*4882a593Smuzhiyun return iscurrent 1453*4882a593Smuzhiyun 1454*4882a593Smuzhiyun def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True): 1455*4882a593Smuzhiyun valid = set() 1456*4882a593Smuzhiyun if self.hashvalidate: 1457*4882a593Smuzhiyun sq_data = {} 1458*4882a593Smuzhiyun sq_data['hash'] = {} 1459*4882a593Smuzhiyun sq_data['hashfn'] = {} 1460*4882a593Smuzhiyun sq_data['unihash'] = {} 1461*4882a593Smuzhiyun for tid in tocheck: 1462*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 1463*4882a593Smuzhiyun sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash 1464*4882a593Smuzhiyun sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn] 1465*4882a593Smuzhiyun sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash 1466*4882a593Smuzhiyun 1467*4882a593Smuzhiyun valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary) 1468*4882a593Smuzhiyun 1469*4882a593Smuzhiyun return valid 1470*4882a593Smuzhiyun 1471*4882a593Smuzhiyun def validate_hash(self, sq_data, d, siginfo, currentcount, summary): 1472*4882a593Smuzhiyun locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount, "summary" : summary} 1473*4882a593Smuzhiyun 1474*4882a593Smuzhiyun # Metadata has **kwargs so args can be added, sq_data can also gain new fields 1475*4882a593Smuzhiyun call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summary)" 1476*4882a593Smuzhiyun 1477*4882a593Smuzhiyun return bb.utils.better_eval(call, locs) 1478*4882a593Smuzhiyun 1479*4882a593Smuzhiyun def _execute_runqueue(self): 1480*4882a593Smuzhiyun """ 1481*4882a593Smuzhiyun Run the tasks in a queue prepared by rqdata.prepare() 1482*4882a593Smuzhiyun Upon failure, optionally try to recover the build using any alternate providers 1483*4882a593Smuzhiyun (if the halt on failure configuration option isn't set) 1484*4882a593Smuzhiyun """ 1485*4882a593Smuzhiyun 1486*4882a593Smuzhiyun retval = True 1487*4882a593Smuzhiyun 1488*4882a593Smuzhiyun if self.state is runQueuePrepare: 1489*4882a593Smuzhiyun # NOTE: if you add, remove or significantly refactor the stages of this 1490*4882a593Smuzhiyun # process then you should recalculate the weightings here. This is quite 1491*4882a593Smuzhiyun # easy to do - just change the next line temporarily to pass debug=True as 1492*4882a593Smuzhiyun # the last parameter and you'll get a printout of the weightings as well 1493*4882a593Smuzhiyun # as a map to the lines where next_stage() was called. Of course this isn't 1494*4882a593Smuzhiyun # critical, but it helps to keep the progress reporting accurate. 1495*4882a593Smuzhiyun self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data, 1496*4882a593Smuzhiyun "Initialising tasks", 1497*4882a593Smuzhiyun [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244]) 1498*4882a593Smuzhiyun if self.rqdata.prepare() == 0: 1499*4882a593Smuzhiyun self.state = runQueueComplete 1500*4882a593Smuzhiyun else: 1501*4882a593Smuzhiyun self.state = runQueueSceneInit 1502*4882a593Smuzhiyun bb.parse.siggen.save_unitaskhashes() 1503*4882a593Smuzhiyun 1504*4882a593Smuzhiyun if self.state is runQueueSceneInit: 1505*4882a593Smuzhiyun self.rqdata.init_progress_reporter.next_stage() 1506*4882a593Smuzhiyun 1507*4882a593Smuzhiyun # we are ready to run, emit dependency info to any UI or class which 1508*4882a593Smuzhiyun # needs it 1509*4882a593Smuzhiyun depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData) 1510*4882a593Smuzhiyun self.rqdata.init_progress_reporter.next_stage() 1511*4882a593Smuzhiyun bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data) 1512*4882a593Smuzhiyun 1513*4882a593Smuzhiyun if not self.dm_event_handler_registered: 1514*4882a593Smuzhiyun res = bb.event.register(self.dm_event_handler_name, 1515*4882a593Smuzhiyun lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False, 1516*4882a593Smuzhiyun ('bb.event.HeartbeatEvent',), data=self.cfgData) 1517*4882a593Smuzhiyun self.dm_event_handler_registered = True 1518*4882a593Smuzhiyun 1519*4882a593Smuzhiyun dump = self.cooker.configuration.dump_signatures 1520*4882a593Smuzhiyun if dump: 1521*4882a593Smuzhiyun self.rqdata.init_progress_reporter.finish() 1522*4882a593Smuzhiyun if 'printdiff' in dump: 1523*4882a593Smuzhiyun invalidtasks = self.print_diffscenetasks() 1524*4882a593Smuzhiyun self.dump_signatures(dump) 1525*4882a593Smuzhiyun if 'printdiff' in dump: 1526*4882a593Smuzhiyun self.write_diffscenetasks(invalidtasks) 1527*4882a593Smuzhiyun self.state = runQueueComplete 1528*4882a593Smuzhiyun 1529*4882a593Smuzhiyun if self.state is runQueueSceneInit: 1530*4882a593Smuzhiyun self.rqdata.init_progress_reporter.next_stage() 1531*4882a593Smuzhiyun self.start_worker() 1532*4882a593Smuzhiyun self.rqdata.init_progress_reporter.next_stage() 1533*4882a593Smuzhiyun self.rqexe = RunQueueExecute(self) 1534*4882a593Smuzhiyun 1535*4882a593Smuzhiyun # If we don't have any setscene functions, skip execution 1536*4882a593Smuzhiyun if not self.rqdata.runq_setscene_tids: 1537*4882a593Smuzhiyun logger.info('No setscene tasks') 1538*4882a593Smuzhiyun for tid in self.rqdata.runtaskentries: 1539*4882a593Smuzhiyun if not self.rqdata.runtaskentries[tid].depends: 1540*4882a593Smuzhiyun self.rqexe.setbuildable(tid) 1541*4882a593Smuzhiyun self.rqexe.tasks_notcovered.add(tid) 1542*4882a593Smuzhiyun self.rqexe.sqdone = True 1543*4882a593Smuzhiyun logger.info('Executing Tasks') 1544*4882a593Smuzhiyun self.state = runQueueRunning 1545*4882a593Smuzhiyun 1546*4882a593Smuzhiyun if self.state is runQueueRunning: 1547*4882a593Smuzhiyun retval = self.rqexe.execute() 1548*4882a593Smuzhiyun 1549*4882a593Smuzhiyun if self.state is runQueueCleanUp: 1550*4882a593Smuzhiyun retval = self.rqexe.finish() 1551*4882a593Smuzhiyun 1552*4882a593Smuzhiyun build_done = self.state is runQueueComplete or self.state is runQueueFailed 1553*4882a593Smuzhiyun 1554*4882a593Smuzhiyun if build_done and self.dm_event_handler_registered: 1555*4882a593Smuzhiyun bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData) 1556*4882a593Smuzhiyun self.dm_event_handler_registered = False 1557*4882a593Smuzhiyun 1558*4882a593Smuzhiyun if build_done and self.rqexe: 1559*4882a593Smuzhiyun bb.parse.siggen.save_unitaskhashes() 1560*4882a593Smuzhiyun self.teardown_workers() 1561*4882a593Smuzhiyun if self.rqexe: 1562*4882a593Smuzhiyun if self.rqexe.stats.failed: 1563*4882a593Smuzhiyun logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed) 1564*4882a593Smuzhiyun else: 1565*4882a593Smuzhiyun # Let's avoid the word "failed" if nothing actually did 1566*4882a593Smuzhiyun logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) 1567*4882a593Smuzhiyun 1568*4882a593Smuzhiyun if self.state is runQueueFailed: 1569*4882a593Smuzhiyun raise bb.runqueue.TaskFailure(self.rqexe.failed_tids) 1570*4882a593Smuzhiyun 1571*4882a593Smuzhiyun if self.state is runQueueComplete: 1572*4882a593Smuzhiyun # All done 1573*4882a593Smuzhiyun return False 1574*4882a593Smuzhiyun 1575*4882a593Smuzhiyun # Loop 1576*4882a593Smuzhiyun return retval 1577*4882a593Smuzhiyun 1578*4882a593Smuzhiyun def execute_runqueue(self): 1579*4882a593Smuzhiyun # Catch unexpected exceptions and ensure we exit when an error occurs, not loop. 1580*4882a593Smuzhiyun try: 1581*4882a593Smuzhiyun return self._execute_runqueue() 1582*4882a593Smuzhiyun except bb.runqueue.TaskFailure: 1583*4882a593Smuzhiyun raise 1584*4882a593Smuzhiyun except SystemExit: 1585*4882a593Smuzhiyun raise 1586*4882a593Smuzhiyun except bb.BBHandledException: 1587*4882a593Smuzhiyun try: 1588*4882a593Smuzhiyun self.teardown_workers() 1589*4882a593Smuzhiyun except: 1590*4882a593Smuzhiyun pass 1591*4882a593Smuzhiyun self.state = runQueueComplete 1592*4882a593Smuzhiyun raise 1593*4882a593Smuzhiyun except Exception as err: 1594*4882a593Smuzhiyun logger.exception("An uncaught exception occurred in runqueue") 1595*4882a593Smuzhiyun try: 1596*4882a593Smuzhiyun self.teardown_workers() 1597*4882a593Smuzhiyun except: 1598*4882a593Smuzhiyun pass 1599*4882a593Smuzhiyun self.state = runQueueComplete 1600*4882a593Smuzhiyun raise 1601*4882a593Smuzhiyun 1602*4882a593Smuzhiyun def finish_runqueue(self, now = False): 1603*4882a593Smuzhiyun if not self.rqexe: 1604*4882a593Smuzhiyun self.state = runQueueComplete 1605*4882a593Smuzhiyun return 1606*4882a593Smuzhiyun 1607*4882a593Smuzhiyun if now: 1608*4882a593Smuzhiyun self.rqexe.finish_now() 1609*4882a593Smuzhiyun else: 1610*4882a593Smuzhiyun self.rqexe.finish() 1611*4882a593Smuzhiyun 1612*4882a593Smuzhiyun def rq_dump_sigfn(self, fn, options): 1613*4882a593Smuzhiyun bb_cache = bb.cache.NoCache(self.cooker.databuilder) 1614*4882a593Smuzhiyun mc = bb.runqueue.mc_from_tid(fn) 1615*4882a593Smuzhiyun the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn)) 1616*4882a593Smuzhiyun siggen = bb.parse.siggen 1617*4882a593Smuzhiyun dataCaches = self.rqdata.dataCaches 1618*4882a593Smuzhiyun siggen.dump_sigfn(fn, dataCaches, options) 1619*4882a593Smuzhiyun 1620*4882a593Smuzhiyun def dump_signatures(self, options): 1621*4882a593Smuzhiyun fns = set() 1622*4882a593Smuzhiyun bb.note("Reparsing files to collect dependency data") 1623*4882a593Smuzhiyun 1624*4882a593Smuzhiyun for tid in self.rqdata.runtaskentries: 1625*4882a593Smuzhiyun fn = fn_from_tid(tid) 1626*4882a593Smuzhiyun fns.add(fn) 1627*4882a593Smuzhiyun 1628*4882a593Smuzhiyun max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) 1629*4882a593Smuzhiyun # We cannot use the real multiprocessing.Pool easily due to some local data 1630*4882a593Smuzhiyun # that can't be pickled. This is a cheap multi-process solution. 1631*4882a593Smuzhiyun launched = [] 1632*4882a593Smuzhiyun while fns: 1633*4882a593Smuzhiyun if len(launched) < max_process: 1634*4882a593Smuzhiyun p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options)) 1635*4882a593Smuzhiyun p.start() 1636*4882a593Smuzhiyun launched.append(p) 1637*4882a593Smuzhiyun for q in launched: 1638*4882a593Smuzhiyun # The finished processes are joined when calling is_alive() 1639*4882a593Smuzhiyun if not q.is_alive(): 1640*4882a593Smuzhiyun launched.remove(q) 1641*4882a593Smuzhiyun for p in launched: 1642*4882a593Smuzhiyun p.join() 1643*4882a593Smuzhiyun 1644*4882a593Smuzhiyun bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) 1645*4882a593Smuzhiyun 1646*4882a593Smuzhiyun return 1647*4882a593Smuzhiyun 1648*4882a593Smuzhiyun def print_diffscenetasks(self): 1649*4882a593Smuzhiyun 1650*4882a593Smuzhiyun noexec = [] 1651*4882a593Smuzhiyun tocheck = set() 1652*4882a593Smuzhiyun 1653*4882a593Smuzhiyun for tid in self.rqdata.runtaskentries: 1654*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 1655*4882a593Smuzhiyun taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] 1656*4882a593Smuzhiyun 1657*4882a593Smuzhiyun if 'noexec' in taskdep and taskname in taskdep['noexec']: 1658*4882a593Smuzhiyun noexec.append(tid) 1659*4882a593Smuzhiyun continue 1660*4882a593Smuzhiyun 1661*4882a593Smuzhiyun tocheck.add(tid) 1662*4882a593Smuzhiyun 1663*4882a593Smuzhiyun valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False) 1664*4882a593Smuzhiyun 1665*4882a593Smuzhiyun # Tasks which are both setscene and noexec never care about dependencies 1666*4882a593Smuzhiyun # We therefore find tasks which are setscene and noexec and mark their 1667*4882a593Smuzhiyun # unique dependencies as valid. 1668*4882a593Smuzhiyun for tid in noexec: 1669*4882a593Smuzhiyun if tid not in self.rqdata.runq_setscene_tids: 1670*4882a593Smuzhiyun continue 1671*4882a593Smuzhiyun for dep in self.rqdata.runtaskentries[tid].depends: 1672*4882a593Smuzhiyun hasnoexecparents = True 1673*4882a593Smuzhiyun for dep2 in self.rqdata.runtaskentries[dep].revdeps: 1674*4882a593Smuzhiyun if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec: 1675*4882a593Smuzhiyun continue 1676*4882a593Smuzhiyun hasnoexecparents = False 1677*4882a593Smuzhiyun break 1678*4882a593Smuzhiyun if hasnoexecparents: 1679*4882a593Smuzhiyun valid_new.add(dep) 1680*4882a593Smuzhiyun 1681*4882a593Smuzhiyun invalidtasks = set() 1682*4882a593Smuzhiyun for tid in self.rqdata.runtaskentries: 1683*4882a593Smuzhiyun if tid not in valid_new and tid not in noexec: 1684*4882a593Smuzhiyun invalidtasks.add(tid) 1685*4882a593Smuzhiyun 1686*4882a593Smuzhiyun found = set() 1687*4882a593Smuzhiyun processed = set() 1688*4882a593Smuzhiyun for tid in invalidtasks: 1689*4882a593Smuzhiyun toprocess = set([tid]) 1690*4882a593Smuzhiyun while toprocess: 1691*4882a593Smuzhiyun next = set() 1692*4882a593Smuzhiyun for t in toprocess: 1693*4882a593Smuzhiyun for dep in self.rqdata.runtaskentries[t].depends: 1694*4882a593Smuzhiyun if dep in invalidtasks: 1695*4882a593Smuzhiyun found.add(tid) 1696*4882a593Smuzhiyun if dep not in processed: 1697*4882a593Smuzhiyun processed.add(dep) 1698*4882a593Smuzhiyun next.add(dep) 1699*4882a593Smuzhiyun toprocess = next 1700*4882a593Smuzhiyun if tid in found: 1701*4882a593Smuzhiyun toprocess = set() 1702*4882a593Smuzhiyun 1703*4882a593Smuzhiyun tasklist = [] 1704*4882a593Smuzhiyun for tid in invalidtasks.difference(found): 1705*4882a593Smuzhiyun tasklist.append(tid) 1706*4882a593Smuzhiyun 1707*4882a593Smuzhiyun if tasklist: 1708*4882a593Smuzhiyun bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist)) 1709*4882a593Smuzhiyun 1710*4882a593Smuzhiyun return invalidtasks.difference(found) 1711*4882a593Smuzhiyun 1712*4882a593Smuzhiyun def write_diffscenetasks(self, invalidtasks): 1713*4882a593Smuzhiyun 1714*4882a593Smuzhiyun # Define recursion callback 1715*4882a593Smuzhiyun def recursecb(key, hash1, hash2): 1716*4882a593Smuzhiyun hashes = [hash1, hash2] 1717*4882a593Smuzhiyun hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData) 1718*4882a593Smuzhiyun 1719*4882a593Smuzhiyun recout = [] 1720*4882a593Smuzhiyun if len(hashfiles) == 2: 1721*4882a593Smuzhiyun out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb) 1722*4882a593Smuzhiyun recout.extend(list(' ' + l for l in out2)) 1723*4882a593Smuzhiyun else: 1724*4882a593Smuzhiyun recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2)) 1725*4882a593Smuzhiyun 1726*4882a593Smuzhiyun return recout 1727*4882a593Smuzhiyun 1728*4882a593Smuzhiyun 1729*4882a593Smuzhiyun for tid in invalidtasks: 1730*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 1731*4882a593Smuzhiyun pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 1732*4882a593Smuzhiyun h = self.rqdata.runtaskentries[tid].hash 1733*4882a593Smuzhiyun matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc]) 1734*4882a593Smuzhiyun match = None 1735*4882a593Smuzhiyun for m in matches: 1736*4882a593Smuzhiyun if h in m: 1737*4882a593Smuzhiyun match = m 1738*4882a593Smuzhiyun if match is None: 1739*4882a593Smuzhiyun bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h) 1740*4882a593Smuzhiyun matches = {k : v for k, v in iter(matches.items()) if h not in k} 1741*4882a593Smuzhiyun if matches: 1742*4882a593Smuzhiyun latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1] 1743*4882a593Smuzhiyun prevh = __find_sha256__.search(latestmatch).group(0) 1744*4882a593Smuzhiyun output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb) 1745*4882a593Smuzhiyun bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output)) 1746*4882a593Smuzhiyun 1747*4882a593Smuzhiyun 1748*4882a593Smuzhiyunclass RunQueueExecute: 1749*4882a593Smuzhiyun 1750*4882a593Smuzhiyun def __init__(self, rq): 1751*4882a593Smuzhiyun self.rq = rq 1752*4882a593Smuzhiyun self.cooker = rq.cooker 1753*4882a593Smuzhiyun self.cfgData = rq.cfgData 1754*4882a593Smuzhiyun self.rqdata = rq.rqdata 1755*4882a593Smuzhiyun 1756*4882a593Smuzhiyun self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1) 1757*4882a593Smuzhiyun self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed" 1758*4882a593Smuzhiyun self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU") 1759*4882a593Smuzhiyun self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO") 1760*4882a593Smuzhiyun self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY") 1761*4882a593Smuzhiyun 1762*4882a593Smuzhiyun self.sq_buildable = set() 1763*4882a593Smuzhiyun self.sq_running = set() 1764*4882a593Smuzhiyun self.sq_live = set() 1765*4882a593Smuzhiyun 1766*4882a593Smuzhiyun self.updated_taskhash_queue = [] 1767*4882a593Smuzhiyun self.pending_migrations = set() 1768*4882a593Smuzhiyun 1769*4882a593Smuzhiyun self.runq_buildable = set() 1770*4882a593Smuzhiyun self.runq_running = set() 1771*4882a593Smuzhiyun self.runq_complete = set() 1772*4882a593Smuzhiyun self.runq_tasksrun = set() 1773*4882a593Smuzhiyun 1774*4882a593Smuzhiyun self.build_stamps = {} 1775*4882a593Smuzhiyun self.build_stamps2 = [] 1776*4882a593Smuzhiyun self.failed_tids = [] 1777*4882a593Smuzhiyun self.sq_deferred = {} 1778*4882a593Smuzhiyun 1779*4882a593Smuzhiyun self.stampcache = {} 1780*4882a593Smuzhiyun 1781*4882a593Smuzhiyun self.holdoff_tasks = set() 1782*4882a593Smuzhiyun self.holdoff_need_update = True 1783*4882a593Smuzhiyun self.sqdone = False 1784*4882a593Smuzhiyun 1785*4882a593Smuzhiyun self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids)) 1786*4882a593Smuzhiyun 1787*4882a593Smuzhiyun for mc in rq.worker: 1788*4882a593Smuzhiyun rq.worker[mc].pipe.setrunqueueexec(self) 1789*4882a593Smuzhiyun for mc in rq.fakeworker: 1790*4882a593Smuzhiyun rq.fakeworker[mc].pipe.setrunqueueexec(self) 1791*4882a593Smuzhiyun 1792*4882a593Smuzhiyun if self.number_tasks <= 0: 1793*4882a593Smuzhiyun bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks) 1794*4882a593Smuzhiyun 1795*4882a593Smuzhiyun lower_limit = 1.0 1796*4882a593Smuzhiyun upper_limit = 1000000.0 1797*4882a593Smuzhiyun if self.max_cpu_pressure: 1798*4882a593Smuzhiyun self.max_cpu_pressure = float(self.max_cpu_pressure) 1799*4882a593Smuzhiyun if self.max_cpu_pressure < lower_limit: 1800*4882a593Smuzhiyun bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit)) 1801*4882a593Smuzhiyun if self.max_cpu_pressure > upper_limit: 1802*4882a593Smuzhiyun bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure)) 1803*4882a593Smuzhiyun 1804*4882a593Smuzhiyun if self.max_io_pressure: 1805*4882a593Smuzhiyun self.max_io_pressure = float(self.max_io_pressure) 1806*4882a593Smuzhiyun if self.max_io_pressure < lower_limit: 1807*4882a593Smuzhiyun bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit)) 1808*4882a593Smuzhiyun if self.max_io_pressure > upper_limit: 1809*4882a593Smuzhiyun bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) 1810*4882a593Smuzhiyun 1811*4882a593Smuzhiyun if self.max_memory_pressure: 1812*4882a593Smuzhiyun self.max_memory_pressure = float(self.max_memory_pressure) 1813*4882a593Smuzhiyun if self.max_memory_pressure < lower_limit: 1814*4882a593Smuzhiyun bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit)) 1815*4882a593Smuzhiyun if self.max_memory_pressure > upper_limit: 1816*4882a593Smuzhiyun bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) 1817*4882a593Smuzhiyun 1818*4882a593Smuzhiyun # List of setscene tasks which we've covered 1819*4882a593Smuzhiyun self.scenequeue_covered = set() 1820*4882a593Smuzhiyun # List of tasks which are covered (including setscene ones) 1821*4882a593Smuzhiyun self.tasks_covered = set() 1822*4882a593Smuzhiyun self.tasks_scenequeue_done = set() 1823*4882a593Smuzhiyun self.scenequeue_notcovered = set() 1824*4882a593Smuzhiyun self.tasks_notcovered = set() 1825*4882a593Smuzhiyun self.scenequeue_notneeded = set() 1826*4882a593Smuzhiyun 1827*4882a593Smuzhiyun # We can't skip specified target tasks which aren't setscene tasks 1828*4882a593Smuzhiyun self.cantskip = set(self.rqdata.target_tids) 1829*4882a593Smuzhiyun self.cantskip.difference_update(self.rqdata.runq_setscene_tids) 1830*4882a593Smuzhiyun self.cantskip.intersection_update(self.rqdata.runtaskentries) 1831*4882a593Smuzhiyun 1832*4882a593Smuzhiyun schedulers = self.get_schedulers() 1833*4882a593Smuzhiyun for scheduler in schedulers: 1834*4882a593Smuzhiyun if self.scheduler == scheduler.name: 1835*4882a593Smuzhiyun self.sched = scheduler(self, self.rqdata) 1836*4882a593Smuzhiyun logger.debug("Using runqueue scheduler '%s'", scheduler.name) 1837*4882a593Smuzhiyun break 1838*4882a593Smuzhiyun else: 1839*4882a593Smuzhiyun bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" % 1840*4882a593Smuzhiyun (self.scheduler, ", ".join(obj.name for obj in schedulers))) 1841*4882a593Smuzhiyun 1842*4882a593Smuzhiyun #if self.rqdata.runq_setscene_tids: 1843*4882a593Smuzhiyun self.sqdata = SQData() 1844*4882a593Smuzhiyun build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self) 1845*4882a593Smuzhiyun 1846*4882a593Smuzhiyun def runqueue_process_waitpid(self, task, status, fakerootlog=None): 1847*4882a593Smuzhiyun 1848*4882a593Smuzhiyun # self.build_stamps[pid] may not exist when use shared work directory. 1849*4882a593Smuzhiyun if task in self.build_stamps: 1850*4882a593Smuzhiyun self.build_stamps2.remove(self.build_stamps[task]) 1851*4882a593Smuzhiyun del self.build_stamps[task] 1852*4882a593Smuzhiyun 1853*4882a593Smuzhiyun if task in self.sq_live: 1854*4882a593Smuzhiyun if status != 0: 1855*4882a593Smuzhiyun self.sq_task_fail(task, status) 1856*4882a593Smuzhiyun else: 1857*4882a593Smuzhiyun self.sq_task_complete(task) 1858*4882a593Smuzhiyun self.sq_live.remove(task) 1859*4882a593Smuzhiyun self.stats.updateActiveSetscene(len(self.sq_live)) 1860*4882a593Smuzhiyun else: 1861*4882a593Smuzhiyun if status != 0: 1862*4882a593Smuzhiyun self.task_fail(task, status, fakerootlog=fakerootlog) 1863*4882a593Smuzhiyun else: 1864*4882a593Smuzhiyun self.task_complete(task) 1865*4882a593Smuzhiyun return True 1866*4882a593Smuzhiyun 1867*4882a593Smuzhiyun def finish_now(self): 1868*4882a593Smuzhiyun for mc in self.rq.worker: 1869*4882a593Smuzhiyun try: 1870*4882a593Smuzhiyun self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>") 1871*4882a593Smuzhiyun self.rq.worker[mc].process.stdin.flush() 1872*4882a593Smuzhiyun except IOError: 1873*4882a593Smuzhiyun # worker must have died? 1874*4882a593Smuzhiyun pass 1875*4882a593Smuzhiyun for mc in self.rq.fakeworker: 1876*4882a593Smuzhiyun try: 1877*4882a593Smuzhiyun self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>") 1878*4882a593Smuzhiyun self.rq.fakeworker[mc].process.stdin.flush() 1879*4882a593Smuzhiyun except IOError: 1880*4882a593Smuzhiyun # worker must have died? 1881*4882a593Smuzhiyun pass 1882*4882a593Smuzhiyun 1883*4882a593Smuzhiyun if self.failed_tids: 1884*4882a593Smuzhiyun self.rq.state = runQueueFailed 1885*4882a593Smuzhiyun return 1886*4882a593Smuzhiyun 1887*4882a593Smuzhiyun self.rq.state = runQueueComplete 1888*4882a593Smuzhiyun return 1889*4882a593Smuzhiyun 1890*4882a593Smuzhiyun def finish(self): 1891*4882a593Smuzhiyun self.rq.state = runQueueCleanUp 1892*4882a593Smuzhiyun 1893*4882a593Smuzhiyun active = self.stats.active + len(self.sq_live) 1894*4882a593Smuzhiyun if active > 0: 1895*4882a593Smuzhiyun bb.event.fire(runQueueExitWait(active), self.cfgData) 1896*4882a593Smuzhiyun self.rq.read_workers() 1897*4882a593Smuzhiyun return self.rq.active_fds() 1898*4882a593Smuzhiyun 1899*4882a593Smuzhiyun if self.failed_tids: 1900*4882a593Smuzhiyun self.rq.state = runQueueFailed 1901*4882a593Smuzhiyun return True 1902*4882a593Smuzhiyun 1903*4882a593Smuzhiyun self.rq.state = runQueueComplete 1904*4882a593Smuzhiyun return True 1905*4882a593Smuzhiyun 1906*4882a593Smuzhiyun # Used by setscene only 1907*4882a593Smuzhiyun def check_dependencies(self, task, taskdeps): 1908*4882a593Smuzhiyun if not self.rq.depvalidate: 1909*4882a593Smuzhiyun return False 1910*4882a593Smuzhiyun 1911*4882a593Smuzhiyun # Must not edit parent data 1912*4882a593Smuzhiyun taskdeps = set(taskdeps) 1913*4882a593Smuzhiyun 1914*4882a593Smuzhiyun taskdata = {} 1915*4882a593Smuzhiyun taskdeps.add(task) 1916*4882a593Smuzhiyun for dep in taskdeps: 1917*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(dep) 1918*4882a593Smuzhiyun pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 1919*4882a593Smuzhiyun taskdata[dep] = [pn, taskname, fn] 1920*4882a593Smuzhiyun call = self.rq.depvalidate + "(task, taskdata, notneeded, d)" 1921*4882a593Smuzhiyun locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data } 1922*4882a593Smuzhiyun valid = bb.utils.better_eval(call, locs) 1923*4882a593Smuzhiyun return valid 1924*4882a593Smuzhiyun 1925*4882a593Smuzhiyun def can_start_task(self): 1926*4882a593Smuzhiyun active = self.stats.active + len(self.sq_live) 1927*4882a593Smuzhiyun can_start = active < self.number_tasks 1928*4882a593Smuzhiyun return can_start 1929*4882a593Smuzhiyun 1930*4882a593Smuzhiyun def get_schedulers(self): 1931*4882a593Smuzhiyun schedulers = set(obj for obj in globals().values() 1932*4882a593Smuzhiyun if type(obj) is type and 1933*4882a593Smuzhiyun issubclass(obj, RunQueueScheduler)) 1934*4882a593Smuzhiyun 1935*4882a593Smuzhiyun user_schedulers = self.cfgData.getVar("BB_SCHEDULERS") 1936*4882a593Smuzhiyun if user_schedulers: 1937*4882a593Smuzhiyun for sched in user_schedulers.split(): 1938*4882a593Smuzhiyun if not "." in sched: 1939*4882a593Smuzhiyun bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched) 1940*4882a593Smuzhiyun continue 1941*4882a593Smuzhiyun 1942*4882a593Smuzhiyun modname, name = sched.rsplit(".", 1) 1943*4882a593Smuzhiyun try: 1944*4882a593Smuzhiyun module = __import__(modname, fromlist=(name,)) 1945*4882a593Smuzhiyun except ImportError as exc: 1946*4882a593Smuzhiyun logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc)) 1947*4882a593Smuzhiyun raise SystemExit(1) 1948*4882a593Smuzhiyun else: 1949*4882a593Smuzhiyun schedulers.add(getattr(module, name)) 1950*4882a593Smuzhiyun return schedulers 1951*4882a593Smuzhiyun 1952*4882a593Smuzhiyun def setbuildable(self, task): 1953*4882a593Smuzhiyun self.runq_buildable.add(task) 1954*4882a593Smuzhiyun self.sched.newbuildable(task) 1955*4882a593Smuzhiyun 1956*4882a593Smuzhiyun def task_completeoutright(self, task): 1957*4882a593Smuzhiyun """ 1958*4882a593Smuzhiyun Mark a task as completed 1959*4882a593Smuzhiyun Look at the reverse dependencies and mark any task with 1960*4882a593Smuzhiyun completed dependencies as buildable 1961*4882a593Smuzhiyun """ 1962*4882a593Smuzhiyun self.runq_complete.add(task) 1963*4882a593Smuzhiyun for revdep in self.rqdata.runtaskentries[task].revdeps: 1964*4882a593Smuzhiyun if revdep in self.runq_running: 1965*4882a593Smuzhiyun continue 1966*4882a593Smuzhiyun if revdep in self.runq_buildable: 1967*4882a593Smuzhiyun continue 1968*4882a593Smuzhiyun alldeps = True 1969*4882a593Smuzhiyun for dep in self.rqdata.runtaskentries[revdep].depends: 1970*4882a593Smuzhiyun if dep not in self.runq_complete: 1971*4882a593Smuzhiyun alldeps = False 1972*4882a593Smuzhiyun break 1973*4882a593Smuzhiyun if alldeps: 1974*4882a593Smuzhiyun self.setbuildable(revdep) 1975*4882a593Smuzhiyun logger.debug("Marking task %s as buildable", revdep) 1976*4882a593Smuzhiyun 1977*4882a593Smuzhiyun for t in self.sq_deferred.copy(): 1978*4882a593Smuzhiyun if self.sq_deferred[t] == task: 1979*4882a593Smuzhiyun logger.debug2("Deferred task %s now buildable" % t) 1980*4882a593Smuzhiyun del self.sq_deferred[t] 1981*4882a593Smuzhiyun update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) 1982*4882a593Smuzhiyun 1983*4882a593Smuzhiyun def task_complete(self, task): 1984*4882a593Smuzhiyun self.stats.taskCompleted() 1985*4882a593Smuzhiyun bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) 1986*4882a593Smuzhiyun self.task_completeoutright(task) 1987*4882a593Smuzhiyun self.runq_tasksrun.add(task) 1988*4882a593Smuzhiyun 1989*4882a593Smuzhiyun def task_fail(self, task, exitcode, fakerootlog=None): 1990*4882a593Smuzhiyun """ 1991*4882a593Smuzhiyun Called when a task has failed 1992*4882a593Smuzhiyun Updates the state engine with the failure 1993*4882a593Smuzhiyun """ 1994*4882a593Smuzhiyun self.stats.taskFailed() 1995*4882a593Smuzhiyun self.failed_tids.append(task) 1996*4882a593Smuzhiyun 1997*4882a593Smuzhiyun fakeroot_log = [] 1998*4882a593Smuzhiyun if fakerootlog and os.path.exists(fakerootlog): 1999*4882a593Smuzhiyun with open(fakerootlog) as fakeroot_log_file: 2000*4882a593Smuzhiyun fakeroot_failed = False 2001*4882a593Smuzhiyun for line in reversed(fakeroot_log_file.readlines()): 2002*4882a593Smuzhiyun for fakeroot_error in ['mismatch', 'error', 'fatal']: 2003*4882a593Smuzhiyun if fakeroot_error in line.lower(): 2004*4882a593Smuzhiyun fakeroot_failed = True 2005*4882a593Smuzhiyun if 'doing new pid setup and server start' in line: 2006*4882a593Smuzhiyun break 2007*4882a593Smuzhiyun fakeroot_log.append(line) 2008*4882a593Smuzhiyun 2009*4882a593Smuzhiyun if not fakeroot_failed: 2010*4882a593Smuzhiyun fakeroot_log = [] 2011*4882a593Smuzhiyun 2012*4882a593Smuzhiyun bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData) 2013*4882a593Smuzhiyun 2014*4882a593Smuzhiyun if self.rqdata.taskData[''].halt: 2015*4882a593Smuzhiyun self.rq.state = runQueueCleanUp 2016*4882a593Smuzhiyun 2017*4882a593Smuzhiyun def task_skip(self, task, reason): 2018*4882a593Smuzhiyun self.runq_running.add(task) 2019*4882a593Smuzhiyun self.setbuildable(task) 2020*4882a593Smuzhiyun bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData) 2021*4882a593Smuzhiyun self.task_completeoutright(task) 2022*4882a593Smuzhiyun self.stats.taskSkipped() 2023*4882a593Smuzhiyun self.stats.taskCompleted() 2024*4882a593Smuzhiyun 2025*4882a593Smuzhiyun def summarise_scenequeue_errors(self): 2026*4882a593Smuzhiyun err = False 2027*4882a593Smuzhiyun if not self.sqdone: 2028*4882a593Smuzhiyun logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered))) 2029*4882a593Smuzhiyun completeevent = sceneQueueComplete(self.stats, self.rq) 2030*4882a593Smuzhiyun bb.event.fire(completeevent, self.cfgData) 2031*4882a593Smuzhiyun if self.sq_deferred: 2032*4882a593Smuzhiyun logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred)) 2033*4882a593Smuzhiyun err = True 2034*4882a593Smuzhiyun if self.updated_taskhash_queue: 2035*4882a593Smuzhiyun logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue)) 2036*4882a593Smuzhiyun err = True 2037*4882a593Smuzhiyun if self.holdoff_tasks: 2038*4882a593Smuzhiyun logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks)) 2039*4882a593Smuzhiyun err = True 2040*4882a593Smuzhiyun 2041*4882a593Smuzhiyun for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered): 2042*4882a593Smuzhiyun # No task should end up in both covered and uncovered, that is a bug. 2043*4882a593Smuzhiyun logger.error("Setscene task %s in both covered and notcovered." % tid) 2044*4882a593Smuzhiyun 2045*4882a593Smuzhiyun for tid in self.rqdata.runq_setscene_tids: 2046*4882a593Smuzhiyun if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: 2047*4882a593Smuzhiyun err = True 2048*4882a593Smuzhiyun logger.error("Setscene Task %s was never marked as covered or not covered" % tid) 2049*4882a593Smuzhiyun if tid not in self.sq_buildable: 2050*4882a593Smuzhiyun err = True 2051*4882a593Smuzhiyun logger.error("Setscene Task %s was never marked as buildable" % tid) 2052*4882a593Smuzhiyun if tid not in self.sq_running: 2053*4882a593Smuzhiyun err = True 2054*4882a593Smuzhiyun logger.error("Setscene Task %s was never marked as running" % tid) 2055*4882a593Smuzhiyun 2056*4882a593Smuzhiyun for x in self.rqdata.runtaskentries: 2057*4882a593Smuzhiyun if x not in self.tasks_covered and x not in self.tasks_notcovered: 2058*4882a593Smuzhiyun logger.error("Task %s was never moved from the setscene queue" % x) 2059*4882a593Smuzhiyun err = True 2060*4882a593Smuzhiyun if x not in self.tasks_scenequeue_done: 2061*4882a593Smuzhiyun logger.error("Task %s was never processed by the setscene code" % x) 2062*4882a593Smuzhiyun err = True 2063*4882a593Smuzhiyun if not self.rqdata.runtaskentries[x].depends and x not in self.runq_buildable: 2064*4882a593Smuzhiyun logger.error("Task %s was never marked as buildable by the setscene code" % x) 2065*4882a593Smuzhiyun err = True 2066*4882a593Smuzhiyun return err 2067*4882a593Smuzhiyun 2068*4882a593Smuzhiyun 2069*4882a593Smuzhiyun def execute(self): 2070*4882a593Smuzhiyun """ 2071*4882a593Smuzhiyun Run the tasks in a queue prepared by prepare_runqueue 2072*4882a593Smuzhiyun """ 2073*4882a593Smuzhiyun 2074*4882a593Smuzhiyun self.rq.read_workers() 2075*4882a593Smuzhiyun if self.updated_taskhash_queue or self.pending_migrations: 2076*4882a593Smuzhiyun self.process_possible_migrations() 2077*4882a593Smuzhiyun 2078*4882a593Smuzhiyun if not hasattr(self, "sorted_setscene_tids"): 2079*4882a593Smuzhiyun # Don't want to sort this set every execution 2080*4882a593Smuzhiyun self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids) 2081*4882a593Smuzhiyun 2082*4882a593Smuzhiyun task = None 2083*4882a593Smuzhiyun if not self.sqdone and self.can_start_task(): 2084*4882a593Smuzhiyun # Find the next setscene to run 2085*4882a593Smuzhiyun for nexttask in self.sorted_setscene_tids: 2086*4882a593Smuzhiyun if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values(): 2087*4882a593Smuzhiyun if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]): 2088*4882a593Smuzhiyun if nexttask not in self.rqdata.target_tids: 2089*4882a593Smuzhiyun logger.debug2("Skipping setscene for task %s" % nexttask) 2090*4882a593Smuzhiyun self.sq_task_skip(nexttask) 2091*4882a593Smuzhiyun self.scenequeue_notneeded.add(nexttask) 2092*4882a593Smuzhiyun if nexttask in self.sq_deferred: 2093*4882a593Smuzhiyun del self.sq_deferred[nexttask] 2094*4882a593Smuzhiyun return True 2095*4882a593Smuzhiyun # If covered tasks are running, need to wait for them to complete 2096*4882a593Smuzhiyun for t in self.sqdata.sq_covered_tasks[nexttask]: 2097*4882a593Smuzhiyun if t in self.runq_running and t not in self.runq_complete: 2098*4882a593Smuzhiyun continue 2099*4882a593Smuzhiyun if nexttask in self.sq_deferred: 2100*4882a593Smuzhiyun if self.sq_deferred[nexttask] not in self.runq_complete: 2101*4882a593Smuzhiyun continue 2102*4882a593Smuzhiyun logger.debug("Task %s no longer deferred" % nexttask) 2103*4882a593Smuzhiyun del self.sq_deferred[nexttask] 2104*4882a593Smuzhiyun valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False) 2105*4882a593Smuzhiyun if not valid: 2106*4882a593Smuzhiyun logger.debug("%s didn't become valid, skipping setscene" % nexttask) 2107*4882a593Smuzhiyun self.sq_task_failoutright(nexttask) 2108*4882a593Smuzhiyun return True 2109*4882a593Smuzhiyun if nexttask in self.sqdata.outrightfail: 2110*4882a593Smuzhiyun logger.debug2('No package found, so skipping setscene task %s', nexttask) 2111*4882a593Smuzhiyun self.sq_task_failoutright(nexttask) 2112*4882a593Smuzhiyun return True 2113*4882a593Smuzhiyun if nexttask in self.sqdata.unskippable: 2114*4882a593Smuzhiyun logger.debug2("Setscene task %s is unskippable" % nexttask) 2115*4882a593Smuzhiyun task = nexttask 2116*4882a593Smuzhiyun break 2117*4882a593Smuzhiyun if task is not None: 2118*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(task) 2119*4882a593Smuzhiyun taskname = taskname + "_setscene" 2120*4882a593Smuzhiyun if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache): 2121*4882a593Smuzhiyun logger.debug2('Stamp for underlying task %s is current, so skipping setscene variant', task) 2122*4882a593Smuzhiyun self.sq_task_failoutright(task) 2123*4882a593Smuzhiyun return True 2124*4882a593Smuzhiyun 2125*4882a593Smuzhiyun if self.cooker.configuration.force: 2126*4882a593Smuzhiyun if task in self.rqdata.target_tids: 2127*4882a593Smuzhiyun self.sq_task_failoutright(task) 2128*4882a593Smuzhiyun return True 2129*4882a593Smuzhiyun 2130*4882a593Smuzhiyun if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): 2131*4882a593Smuzhiyun logger.debug2('Setscene stamp current task %s, so skip it and its dependencies', task) 2132*4882a593Smuzhiyun self.sq_task_skip(task) 2133*4882a593Smuzhiyun return True 2134*4882a593Smuzhiyun 2135*4882a593Smuzhiyun if self.cooker.configuration.skipsetscene: 2136*4882a593Smuzhiyun logger.debug2('No setscene tasks should be executed. Skipping %s', task) 2137*4882a593Smuzhiyun self.sq_task_failoutright(task) 2138*4882a593Smuzhiyun return True 2139*4882a593Smuzhiyun 2140*4882a593Smuzhiyun startevent = sceneQueueTaskStarted(task, self.stats, self.rq) 2141*4882a593Smuzhiyun bb.event.fire(startevent, self.cfgData) 2142*4882a593Smuzhiyun 2143*4882a593Smuzhiyun taskdepdata = self.sq_build_taskdepdata(task) 2144*4882a593Smuzhiyun 2145*4882a593Smuzhiyun taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] 2146*4882a593Smuzhiyun taskhash = self.rqdata.get_task_hash(task) 2147*4882a593Smuzhiyun unihash = self.rqdata.get_task_unihash(task) 2148*4882a593Smuzhiyun if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: 2149*4882a593Smuzhiyun if not mc in self.rq.fakeworker: 2150*4882a593Smuzhiyun self.rq.start_fakeworker(self, mc) 2151*4882a593Smuzhiyun self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") 2152*4882a593Smuzhiyun self.rq.fakeworker[mc].process.stdin.flush() 2153*4882a593Smuzhiyun else: 2154*4882a593Smuzhiyun self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>") 2155*4882a593Smuzhiyun self.rq.worker[mc].process.stdin.flush() 2156*4882a593Smuzhiyun 2157*4882a593Smuzhiyun self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) 2158*4882a593Smuzhiyun self.build_stamps2.append(self.build_stamps[task]) 2159*4882a593Smuzhiyun self.sq_running.add(task) 2160*4882a593Smuzhiyun self.sq_live.add(task) 2161*4882a593Smuzhiyun self.stats.updateActiveSetscene(len(self.sq_live)) 2162*4882a593Smuzhiyun if self.can_start_task(): 2163*4882a593Smuzhiyun return True 2164*4882a593Smuzhiyun 2165*4882a593Smuzhiyun self.update_holdofftasks() 2166*4882a593Smuzhiyun 2167*4882a593Smuzhiyun if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks: 2168*4882a593Smuzhiyun hashequiv_logger.verbose("Setscene tasks completed") 2169*4882a593Smuzhiyun 2170*4882a593Smuzhiyun err = self.summarise_scenequeue_errors() 2171*4882a593Smuzhiyun if err: 2172*4882a593Smuzhiyun self.rq.state = runQueueFailed 2173*4882a593Smuzhiyun return True 2174*4882a593Smuzhiyun 2175*4882a593Smuzhiyun if self.cooker.configuration.setsceneonly: 2176*4882a593Smuzhiyun self.rq.state = runQueueComplete 2177*4882a593Smuzhiyun return True 2178*4882a593Smuzhiyun self.sqdone = True 2179*4882a593Smuzhiyun 2180*4882a593Smuzhiyun if self.stats.total == 0: 2181*4882a593Smuzhiyun # nothing to do 2182*4882a593Smuzhiyun self.rq.state = runQueueComplete 2183*4882a593Smuzhiyun return True 2184*4882a593Smuzhiyun 2185*4882a593Smuzhiyun if self.cooker.configuration.setsceneonly: 2186*4882a593Smuzhiyun task = None 2187*4882a593Smuzhiyun else: 2188*4882a593Smuzhiyun task = self.sched.next() 2189*4882a593Smuzhiyun if task is not None: 2190*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(task) 2191*4882a593Smuzhiyun 2192*4882a593Smuzhiyun if self.rqdata.setscene_ignore_tasks is not None: 2193*4882a593Smuzhiyun if self.check_setscene_ignore_tasks(task): 2194*4882a593Smuzhiyun self.task_fail(task, "setscene ignore_tasks") 2195*4882a593Smuzhiyun return True 2196*4882a593Smuzhiyun 2197*4882a593Smuzhiyun if task in self.tasks_covered: 2198*4882a593Smuzhiyun logger.debug2("Setscene covered task %s", task) 2199*4882a593Smuzhiyun self.task_skip(task, "covered") 2200*4882a593Smuzhiyun return True 2201*4882a593Smuzhiyun 2202*4882a593Smuzhiyun if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): 2203*4882a593Smuzhiyun logger.debug2("Stamp current task %s", task) 2204*4882a593Smuzhiyun 2205*4882a593Smuzhiyun self.task_skip(task, "existing") 2206*4882a593Smuzhiyun self.runq_tasksrun.add(task) 2207*4882a593Smuzhiyun return True 2208*4882a593Smuzhiyun 2209*4882a593Smuzhiyun taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] 2210*4882a593Smuzhiyun if 'noexec' in taskdep and taskname in taskdep['noexec']: 2211*4882a593Smuzhiyun startevent = runQueueTaskStarted(task, self.stats, self.rq, 2212*4882a593Smuzhiyun noexec=True) 2213*4882a593Smuzhiyun bb.event.fire(startevent, self.cfgData) 2214*4882a593Smuzhiyun self.runq_running.add(task) 2215*4882a593Smuzhiyun self.stats.taskActive() 2216*4882a593Smuzhiyun if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): 2217*4882a593Smuzhiyun bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) 2218*4882a593Smuzhiyun self.task_complete(task) 2219*4882a593Smuzhiyun return True 2220*4882a593Smuzhiyun else: 2221*4882a593Smuzhiyun startevent = runQueueTaskStarted(task, self.stats, self.rq) 2222*4882a593Smuzhiyun bb.event.fire(startevent, self.cfgData) 2223*4882a593Smuzhiyun 2224*4882a593Smuzhiyun taskdepdata = self.build_taskdepdata(task) 2225*4882a593Smuzhiyun 2226*4882a593Smuzhiyun taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] 2227*4882a593Smuzhiyun taskhash = self.rqdata.get_task_hash(task) 2228*4882a593Smuzhiyun unihash = self.rqdata.get_task_unihash(task) 2229*4882a593Smuzhiyun if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): 2230*4882a593Smuzhiyun if not mc in self.rq.fakeworker: 2231*4882a593Smuzhiyun try: 2232*4882a593Smuzhiyun self.rq.start_fakeworker(self, mc) 2233*4882a593Smuzhiyun except OSError as exc: 2234*4882a593Smuzhiyun logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) 2235*4882a593Smuzhiyun self.rq.state = runQueueFailed 2236*4882a593Smuzhiyun self.stats.taskFailed() 2237*4882a593Smuzhiyun return True 2238*4882a593Smuzhiyun self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") 2239*4882a593Smuzhiyun self.rq.fakeworker[mc].process.stdin.flush() 2240*4882a593Smuzhiyun else: 2241*4882a593Smuzhiyun self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") 2242*4882a593Smuzhiyun self.rq.worker[mc].process.stdin.flush() 2243*4882a593Smuzhiyun 2244*4882a593Smuzhiyun self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) 2245*4882a593Smuzhiyun self.build_stamps2.append(self.build_stamps[task]) 2246*4882a593Smuzhiyun self.runq_running.add(task) 2247*4882a593Smuzhiyun self.stats.taskActive() 2248*4882a593Smuzhiyun if self.can_start_task(): 2249*4882a593Smuzhiyun return True 2250*4882a593Smuzhiyun 2251*4882a593Smuzhiyun if self.stats.active > 0 or self.sq_live: 2252*4882a593Smuzhiyun self.rq.read_workers() 2253*4882a593Smuzhiyun return self.rq.active_fds() 2254*4882a593Smuzhiyun 2255*4882a593Smuzhiyun # No more tasks can be run. If we have deferred setscene tasks we should run them. 2256*4882a593Smuzhiyun if self.sq_deferred: 2257*4882a593Smuzhiyun deferred_tid = list(self.sq_deferred.keys())[0] 2258*4882a593Smuzhiyun blocking_tid = self.sq_deferred.pop(deferred_tid) 2259*4882a593Smuzhiyun logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s blocked by %s" % (deferred_tid, blocking_tid)) 2260*4882a593Smuzhiyun return True 2261*4882a593Smuzhiyun 2262*4882a593Smuzhiyun if self.failed_tids: 2263*4882a593Smuzhiyun self.rq.state = runQueueFailed 2264*4882a593Smuzhiyun return True 2265*4882a593Smuzhiyun 2266*4882a593Smuzhiyun # Sanity Checks 2267*4882a593Smuzhiyun err = self.summarise_scenequeue_errors() 2268*4882a593Smuzhiyun for task in self.rqdata.runtaskentries: 2269*4882a593Smuzhiyun if task not in self.runq_buildable: 2270*4882a593Smuzhiyun logger.error("Task %s never buildable!", task) 2271*4882a593Smuzhiyun err = True 2272*4882a593Smuzhiyun elif task not in self.runq_running: 2273*4882a593Smuzhiyun logger.error("Task %s never ran!", task) 2274*4882a593Smuzhiyun err = True 2275*4882a593Smuzhiyun elif task not in self.runq_complete: 2276*4882a593Smuzhiyun logger.error("Task %s never completed!", task) 2277*4882a593Smuzhiyun err = True 2278*4882a593Smuzhiyun 2279*4882a593Smuzhiyun if err: 2280*4882a593Smuzhiyun self.rq.state = runQueueFailed 2281*4882a593Smuzhiyun else: 2282*4882a593Smuzhiyun self.rq.state = runQueueComplete 2283*4882a593Smuzhiyun 2284*4882a593Smuzhiyun return True 2285*4882a593Smuzhiyun 2286*4882a593Smuzhiyun def filtermcdeps(self, task, mc, deps): 2287*4882a593Smuzhiyun ret = set() 2288*4882a593Smuzhiyun for dep in deps: 2289*4882a593Smuzhiyun thismc = mc_from_tid(dep) 2290*4882a593Smuzhiyun if thismc != mc: 2291*4882a593Smuzhiyun continue 2292*4882a593Smuzhiyun ret.add(dep) 2293*4882a593Smuzhiyun return ret 2294*4882a593Smuzhiyun 2295*4882a593Smuzhiyun # We filter out multiconfig dependencies from taskdepdata we pass to the tasks 2296*4882a593Smuzhiyun # as most code can't handle them 2297*4882a593Smuzhiyun def build_taskdepdata(self, task): 2298*4882a593Smuzhiyun taskdepdata = {} 2299*4882a593Smuzhiyun mc = mc_from_tid(task) 2300*4882a593Smuzhiyun next = self.rqdata.runtaskentries[task].depends.copy() 2301*4882a593Smuzhiyun next.add(task) 2302*4882a593Smuzhiyun next = self.filtermcdeps(task, mc, next) 2303*4882a593Smuzhiyun while next: 2304*4882a593Smuzhiyun additional = [] 2305*4882a593Smuzhiyun for revdep in next: 2306*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) 2307*4882a593Smuzhiyun pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 2308*4882a593Smuzhiyun deps = self.rqdata.runtaskentries[revdep].depends 2309*4882a593Smuzhiyun provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] 2310*4882a593Smuzhiyun taskhash = self.rqdata.runtaskentries[revdep].hash 2311*4882a593Smuzhiyun unihash = self.rqdata.runtaskentries[revdep].unihash 2312*4882a593Smuzhiyun deps = self.filtermcdeps(task, mc, deps) 2313*4882a593Smuzhiyun taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash] 2314*4882a593Smuzhiyun for revdep2 in deps: 2315*4882a593Smuzhiyun if revdep2 not in taskdepdata: 2316*4882a593Smuzhiyun additional.append(revdep2) 2317*4882a593Smuzhiyun next = additional 2318*4882a593Smuzhiyun 2319*4882a593Smuzhiyun #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) 2320*4882a593Smuzhiyun return taskdepdata 2321*4882a593Smuzhiyun 2322*4882a593Smuzhiyun def update_holdofftasks(self): 2323*4882a593Smuzhiyun 2324*4882a593Smuzhiyun if not self.holdoff_need_update: 2325*4882a593Smuzhiyun return 2326*4882a593Smuzhiyun 2327*4882a593Smuzhiyun notcovered = set(self.scenequeue_notcovered) 2328*4882a593Smuzhiyun notcovered |= self.cantskip 2329*4882a593Smuzhiyun for tid in self.scenequeue_notcovered: 2330*4882a593Smuzhiyun notcovered |= self.sqdata.sq_covered_tasks[tid] 2331*4882a593Smuzhiyun notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids) 2332*4882a593Smuzhiyun notcovered.intersection_update(self.tasks_scenequeue_done) 2333*4882a593Smuzhiyun 2334*4882a593Smuzhiyun covered = set(self.scenequeue_covered) 2335*4882a593Smuzhiyun for tid in self.scenequeue_covered: 2336*4882a593Smuzhiyun covered |= self.sqdata.sq_covered_tasks[tid] 2337*4882a593Smuzhiyun covered.difference_update(notcovered) 2338*4882a593Smuzhiyun covered.intersection_update(self.tasks_scenequeue_done) 2339*4882a593Smuzhiyun 2340*4882a593Smuzhiyun for tid in notcovered | covered: 2341*4882a593Smuzhiyun if not self.rqdata.runtaskentries[tid].depends: 2342*4882a593Smuzhiyun self.setbuildable(tid) 2343*4882a593Smuzhiyun elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete): 2344*4882a593Smuzhiyun self.setbuildable(tid) 2345*4882a593Smuzhiyun 2346*4882a593Smuzhiyun self.tasks_covered = covered 2347*4882a593Smuzhiyun self.tasks_notcovered = notcovered 2348*4882a593Smuzhiyun 2349*4882a593Smuzhiyun self.holdoff_tasks = set() 2350*4882a593Smuzhiyun 2351*4882a593Smuzhiyun for tid in self.rqdata.runq_setscene_tids: 2352*4882a593Smuzhiyun if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: 2353*4882a593Smuzhiyun self.holdoff_tasks.add(tid) 2354*4882a593Smuzhiyun 2355*4882a593Smuzhiyun for tid in self.holdoff_tasks.copy(): 2356*4882a593Smuzhiyun for dep in self.sqdata.sq_covered_tasks[tid]: 2357*4882a593Smuzhiyun if dep not in self.runq_complete: 2358*4882a593Smuzhiyun self.holdoff_tasks.add(dep) 2359*4882a593Smuzhiyun 2360*4882a593Smuzhiyun self.holdoff_need_update = False 2361*4882a593Smuzhiyun 2362*4882a593Smuzhiyun def process_possible_migrations(self): 2363*4882a593Smuzhiyun 2364*4882a593Smuzhiyun changed = set() 2365*4882a593Smuzhiyun toprocess = set() 2366*4882a593Smuzhiyun for tid, unihash in self.updated_taskhash_queue.copy(): 2367*4882a593Smuzhiyun if tid in self.runq_running and tid not in self.runq_complete: 2368*4882a593Smuzhiyun continue 2369*4882a593Smuzhiyun 2370*4882a593Smuzhiyun self.updated_taskhash_queue.remove((tid, unihash)) 2371*4882a593Smuzhiyun 2372*4882a593Smuzhiyun if unihash != self.rqdata.runtaskentries[tid].unihash: 2373*4882a593Smuzhiyun # Make sure we rehash any other tasks with the same task hash that we're deferred against. 2374*4882a593Smuzhiyun torehash = [tid] 2375*4882a593Smuzhiyun for deftid in self.sq_deferred: 2376*4882a593Smuzhiyun if self.sq_deferred[deftid] == tid: 2377*4882a593Smuzhiyun torehash.append(deftid) 2378*4882a593Smuzhiyun for hashtid in torehash: 2379*4882a593Smuzhiyun hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash)) 2380*4882a593Smuzhiyun self.rqdata.runtaskentries[hashtid].unihash = unihash 2381*4882a593Smuzhiyun bb.parse.siggen.set_unihash(hashtid, unihash) 2382*4882a593Smuzhiyun toprocess.add(hashtid) 2383*4882a593Smuzhiyun if torehash: 2384*4882a593Smuzhiyun # Need to save after set_unihash above 2385*4882a593Smuzhiyun bb.parse.siggen.save_unitaskhashes() 2386*4882a593Smuzhiyun 2387*4882a593Smuzhiyun # Work out all tasks which depend upon these 2388*4882a593Smuzhiyun total = set() 2389*4882a593Smuzhiyun next = set() 2390*4882a593Smuzhiyun for p in toprocess: 2391*4882a593Smuzhiyun next |= self.rqdata.runtaskentries[p].revdeps 2392*4882a593Smuzhiyun while next: 2393*4882a593Smuzhiyun current = next.copy() 2394*4882a593Smuzhiyun total = total | next 2395*4882a593Smuzhiyun next = set() 2396*4882a593Smuzhiyun for ntid in current: 2397*4882a593Smuzhiyun next |= self.rqdata.runtaskentries[ntid].revdeps 2398*4882a593Smuzhiyun next.difference_update(total) 2399*4882a593Smuzhiyun 2400*4882a593Smuzhiyun # Now iterate those tasks in dependency order to regenerate their taskhash/unihash 2401*4882a593Smuzhiyun next = set() 2402*4882a593Smuzhiyun for p in total: 2403*4882a593Smuzhiyun if not self.rqdata.runtaskentries[p].depends: 2404*4882a593Smuzhiyun next.add(p) 2405*4882a593Smuzhiyun elif self.rqdata.runtaskentries[p].depends.isdisjoint(total): 2406*4882a593Smuzhiyun next.add(p) 2407*4882a593Smuzhiyun 2408*4882a593Smuzhiyun # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled 2409*4882a593Smuzhiyun while next: 2410*4882a593Smuzhiyun current = next.copy() 2411*4882a593Smuzhiyun next = set() 2412*4882a593Smuzhiyun for tid in current: 2413*4882a593Smuzhiyun if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total): 2414*4882a593Smuzhiyun continue 2415*4882a593Smuzhiyun orighash = self.rqdata.runtaskentries[tid].hash 2416*4882a593Smuzhiyun dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid)) 2417*4882a593Smuzhiyun newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc) 2418*4882a593Smuzhiyun origuni = self.rqdata.runtaskentries[tid].unihash 2419*4882a593Smuzhiyun newuni = bb.parse.siggen.get_unihash(tid) 2420*4882a593Smuzhiyun # FIXME, need to check it can come from sstate at all for determinism? 2421*4882a593Smuzhiyun remapped = False 2422*4882a593Smuzhiyun if newuni == origuni: 2423*4882a593Smuzhiyun # Nothing to do, we match, skip code below 2424*4882a593Smuzhiyun remapped = True 2425*4882a593Smuzhiyun elif tid in self.scenequeue_covered or tid in self.sq_live: 2426*4882a593Smuzhiyun # Already ran this setscene task or it running. Report the new taskhash 2427*4882a593Smuzhiyun bb.parse.siggen.report_unihash_equiv(tid, newhash, origuni, newuni, self.rqdata.dataCaches) 2428*4882a593Smuzhiyun hashequiv_logger.verbose("Already covered setscene for %s so ignoring rehash (remap)" % (tid)) 2429*4882a593Smuzhiyun remapped = True 2430*4882a593Smuzhiyun 2431*4882a593Smuzhiyun if not remapped: 2432*4882a593Smuzhiyun #logger.debug("Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni)) 2433*4882a593Smuzhiyun self.rqdata.runtaskentries[tid].hash = newhash 2434*4882a593Smuzhiyun self.rqdata.runtaskentries[tid].unihash = newuni 2435*4882a593Smuzhiyun changed.add(tid) 2436*4882a593Smuzhiyun 2437*4882a593Smuzhiyun next |= self.rqdata.runtaskentries[tid].revdeps 2438*4882a593Smuzhiyun total.remove(tid) 2439*4882a593Smuzhiyun next.intersection_update(total) 2440*4882a593Smuzhiyun 2441*4882a593Smuzhiyun if changed: 2442*4882a593Smuzhiyun for mc in self.rq.worker: 2443*4882a593Smuzhiyun self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>") 2444*4882a593Smuzhiyun for mc in self.rq.fakeworker: 2445*4882a593Smuzhiyun self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>") 2446*4882a593Smuzhiyun 2447*4882a593Smuzhiyun hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed))) 2448*4882a593Smuzhiyun 2449*4882a593Smuzhiyun for tid in changed: 2450*4882a593Smuzhiyun if tid not in self.rqdata.runq_setscene_tids: 2451*4882a593Smuzhiyun continue 2452*4882a593Smuzhiyun if tid not in self.pending_migrations: 2453*4882a593Smuzhiyun self.pending_migrations.add(tid) 2454*4882a593Smuzhiyun 2455*4882a593Smuzhiyun update_tasks = [] 2456*4882a593Smuzhiyun for tid in self.pending_migrations.copy(): 2457*4882a593Smuzhiyun if tid in self.runq_running or tid in self.sq_live: 2458*4882a593Smuzhiyun # Too late, task already running, not much we can do now 2459*4882a593Smuzhiyun self.pending_migrations.remove(tid) 2460*4882a593Smuzhiyun continue 2461*4882a593Smuzhiyun 2462*4882a593Smuzhiyun valid = True 2463*4882a593Smuzhiyun # Check no tasks this covers are running 2464*4882a593Smuzhiyun for dep in self.sqdata.sq_covered_tasks[tid]: 2465*4882a593Smuzhiyun if dep in self.runq_running and dep not in self.runq_complete: 2466*4882a593Smuzhiyun hashequiv_logger.debug2("Task %s is running which blocks setscene for %s from running" % (dep, tid)) 2467*4882a593Smuzhiyun valid = False 2468*4882a593Smuzhiyun break 2469*4882a593Smuzhiyun if not valid: 2470*4882a593Smuzhiyun continue 2471*4882a593Smuzhiyun 2472*4882a593Smuzhiyun self.pending_migrations.remove(tid) 2473*4882a593Smuzhiyun changed = True 2474*4882a593Smuzhiyun 2475*4882a593Smuzhiyun if tid in self.tasks_scenequeue_done: 2476*4882a593Smuzhiyun self.tasks_scenequeue_done.remove(tid) 2477*4882a593Smuzhiyun for dep in self.sqdata.sq_covered_tasks[tid]: 2478*4882a593Smuzhiyun if dep in self.runq_complete and dep not in self.runq_tasksrun: 2479*4882a593Smuzhiyun bb.error("Task %s marked as completed but now needing to rerun? Halting build." % dep) 2480*4882a593Smuzhiyun self.failed_tids.append(tid) 2481*4882a593Smuzhiyun self.rq.state = runQueueCleanUp 2482*4882a593Smuzhiyun return 2483*4882a593Smuzhiyun 2484*4882a593Smuzhiyun if dep not in self.runq_complete: 2485*4882a593Smuzhiyun if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable: 2486*4882a593Smuzhiyun self.tasks_scenequeue_done.remove(dep) 2487*4882a593Smuzhiyun 2488*4882a593Smuzhiyun if tid in self.sq_buildable: 2489*4882a593Smuzhiyun self.sq_buildable.remove(tid) 2490*4882a593Smuzhiyun if tid in self.sq_running: 2491*4882a593Smuzhiyun self.sq_running.remove(tid) 2492*4882a593Smuzhiyun if tid in self.sqdata.outrightfail: 2493*4882a593Smuzhiyun self.sqdata.outrightfail.remove(tid) 2494*4882a593Smuzhiyun if tid in self.scenequeue_notcovered: 2495*4882a593Smuzhiyun self.scenequeue_notcovered.remove(tid) 2496*4882a593Smuzhiyun if tid in self.scenequeue_covered: 2497*4882a593Smuzhiyun self.scenequeue_covered.remove(tid) 2498*4882a593Smuzhiyun if tid in self.scenequeue_notneeded: 2499*4882a593Smuzhiyun self.scenequeue_notneeded.remove(tid) 2500*4882a593Smuzhiyun 2501*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 2502*4882a593Smuzhiyun self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True) 2503*4882a593Smuzhiyun 2504*4882a593Smuzhiyun if tid in self.stampcache: 2505*4882a593Smuzhiyun del self.stampcache[tid] 2506*4882a593Smuzhiyun 2507*4882a593Smuzhiyun if tid in self.build_stamps: 2508*4882a593Smuzhiyun del self.build_stamps[tid] 2509*4882a593Smuzhiyun 2510*4882a593Smuzhiyun update_tasks.append(tid) 2511*4882a593Smuzhiyun 2512*4882a593Smuzhiyun update_tasks2 = [] 2513*4882a593Smuzhiyun for tid in update_tasks: 2514*4882a593Smuzhiyun harddepfail = False 2515*4882a593Smuzhiyun for t in self.sqdata.sq_harddeps: 2516*4882a593Smuzhiyun if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered: 2517*4882a593Smuzhiyun harddepfail = True 2518*4882a593Smuzhiyun break 2519*4882a593Smuzhiyun if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): 2520*4882a593Smuzhiyun if tid not in self.sq_buildable: 2521*4882a593Smuzhiyun self.sq_buildable.add(tid) 2522*4882a593Smuzhiyun if not self.sqdata.sq_revdeps[tid]: 2523*4882a593Smuzhiyun self.sq_buildable.add(tid) 2524*4882a593Smuzhiyun 2525*4882a593Smuzhiyun update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid)) 2526*4882a593Smuzhiyun 2527*4882a593Smuzhiyun if update_tasks2: 2528*4882a593Smuzhiyun self.sqdone = False 2529*4882a593Smuzhiyun for mc in sorted(self.sqdata.multiconfigs): 2530*4882a593Smuzhiyun for tid in sorted([t[0] for t in update_tasks2]): 2531*4882a593Smuzhiyun if mc_from_tid(tid) != mc: 2532*4882a593Smuzhiyun continue 2533*4882a593Smuzhiyun h = pending_hash_index(tid, self.rqdata) 2534*4882a593Smuzhiyun if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]: 2535*4882a593Smuzhiyun self.sq_deferred[tid] = self.sqdata.hashes[h] 2536*4882a593Smuzhiyun bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h])) 2537*4882a593Smuzhiyun update_scenequeue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) 2538*4882a593Smuzhiyun 2539*4882a593Smuzhiyun for (tid, harddepfail, origvalid) in update_tasks2: 2540*4882a593Smuzhiyun if tid in self.sqdata.valid and not origvalid: 2541*4882a593Smuzhiyun hashequiv_logger.verbose("Setscene task %s became valid" % tid) 2542*4882a593Smuzhiyun if harddepfail: 2543*4882a593Smuzhiyun self.sq_task_failoutright(tid) 2544*4882a593Smuzhiyun 2545*4882a593Smuzhiyun if changed: 2546*4882a593Smuzhiyun self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) 2547*4882a593Smuzhiyun self.holdoff_need_update = True 2548*4882a593Smuzhiyun 2549*4882a593Smuzhiyun def scenequeue_updatecounters(self, task, fail=False): 2550*4882a593Smuzhiyun 2551*4882a593Smuzhiyun for dep in sorted(self.sqdata.sq_deps[task]): 2552*4882a593Smuzhiyun if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]: 2553*4882a593Smuzhiyun if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered: 2554*4882a593Smuzhiyun # dependency could be already processed, e.g. noexec setscene task 2555*4882a593Smuzhiyun continue 2556*4882a593Smuzhiyun noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache) 2557*4882a593Smuzhiyun if noexec or stamppresent: 2558*4882a593Smuzhiyun continue 2559*4882a593Smuzhiyun logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep)) 2560*4882a593Smuzhiyun self.sq_task_failoutright(dep) 2561*4882a593Smuzhiyun continue 2562*4882a593Smuzhiyun if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): 2563*4882a593Smuzhiyun if dep not in self.sq_buildable: 2564*4882a593Smuzhiyun self.sq_buildable.add(dep) 2565*4882a593Smuzhiyun 2566*4882a593Smuzhiyun next = set([task]) 2567*4882a593Smuzhiyun while next: 2568*4882a593Smuzhiyun new = set() 2569*4882a593Smuzhiyun for t in sorted(next): 2570*4882a593Smuzhiyun self.tasks_scenequeue_done.add(t) 2571*4882a593Smuzhiyun # Look down the dependency chain for non-setscene things which this task depends on 2572*4882a593Smuzhiyun # and mark as 'done' 2573*4882a593Smuzhiyun for dep in self.rqdata.runtaskentries[t].depends: 2574*4882a593Smuzhiyun if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done: 2575*4882a593Smuzhiyun continue 2576*4882a593Smuzhiyun if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done): 2577*4882a593Smuzhiyun new.add(dep) 2578*4882a593Smuzhiyun next = new 2579*4882a593Smuzhiyun 2580*4882a593Smuzhiyun self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) 2581*4882a593Smuzhiyun self.holdoff_need_update = True 2582*4882a593Smuzhiyun 2583*4882a593Smuzhiyun def sq_task_completeoutright(self, task): 2584*4882a593Smuzhiyun """ 2585*4882a593Smuzhiyun Mark a task as completed 2586*4882a593Smuzhiyun Look at the reverse dependencies and mark any task with 2587*4882a593Smuzhiyun completed dependencies as buildable 2588*4882a593Smuzhiyun """ 2589*4882a593Smuzhiyun 2590*4882a593Smuzhiyun logger.debug('Found task %s which could be accelerated', task) 2591*4882a593Smuzhiyun self.scenequeue_covered.add(task) 2592*4882a593Smuzhiyun self.scenequeue_updatecounters(task) 2593*4882a593Smuzhiyun 2594*4882a593Smuzhiyun def sq_check_taskfail(self, task): 2595*4882a593Smuzhiyun if self.rqdata.setscene_ignore_tasks is not None: 2596*4882a593Smuzhiyun realtask = task.split('_setscene')[0] 2597*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) 2598*4882a593Smuzhiyun pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 2599*4882a593Smuzhiyun if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): 2600*4882a593Smuzhiyun logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) 2601*4882a593Smuzhiyun self.rq.state = runQueueCleanUp 2602*4882a593Smuzhiyun 2603*4882a593Smuzhiyun def sq_task_complete(self, task): 2604*4882a593Smuzhiyun bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) 2605*4882a593Smuzhiyun self.sq_task_completeoutright(task) 2606*4882a593Smuzhiyun 2607*4882a593Smuzhiyun def sq_task_fail(self, task, result): 2608*4882a593Smuzhiyun bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData) 2609*4882a593Smuzhiyun self.scenequeue_notcovered.add(task) 2610*4882a593Smuzhiyun self.scenequeue_updatecounters(task, True) 2611*4882a593Smuzhiyun self.sq_check_taskfail(task) 2612*4882a593Smuzhiyun 2613*4882a593Smuzhiyun def sq_task_failoutright(self, task): 2614*4882a593Smuzhiyun self.sq_running.add(task) 2615*4882a593Smuzhiyun self.sq_buildable.add(task) 2616*4882a593Smuzhiyun self.scenequeue_notcovered.add(task) 2617*4882a593Smuzhiyun self.scenequeue_updatecounters(task, True) 2618*4882a593Smuzhiyun 2619*4882a593Smuzhiyun def sq_task_skip(self, task): 2620*4882a593Smuzhiyun self.sq_running.add(task) 2621*4882a593Smuzhiyun self.sq_buildable.add(task) 2622*4882a593Smuzhiyun self.sq_task_completeoutright(task) 2623*4882a593Smuzhiyun 2624*4882a593Smuzhiyun def sq_build_taskdepdata(self, task): 2625*4882a593Smuzhiyun def getsetscenedeps(tid): 2626*4882a593Smuzhiyun deps = set() 2627*4882a593Smuzhiyun (mc, fn, taskname, _) = split_tid_mcfn(tid) 2628*4882a593Smuzhiyun realtid = tid + "_setscene" 2629*4882a593Smuzhiyun idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends 2630*4882a593Smuzhiyun for (depname, idependtask) in idepends: 2631*4882a593Smuzhiyun if depname not in self.rqdata.taskData[mc].build_targets: 2632*4882a593Smuzhiyun continue 2633*4882a593Smuzhiyun 2634*4882a593Smuzhiyun depfn = self.rqdata.taskData[mc].build_targets[depname][0] 2635*4882a593Smuzhiyun if depfn is None: 2636*4882a593Smuzhiyun continue 2637*4882a593Smuzhiyun deptid = depfn + ":" + idependtask.replace("_setscene", "") 2638*4882a593Smuzhiyun deps.add(deptid) 2639*4882a593Smuzhiyun return deps 2640*4882a593Smuzhiyun 2641*4882a593Smuzhiyun taskdepdata = {} 2642*4882a593Smuzhiyun next = getsetscenedeps(task) 2643*4882a593Smuzhiyun next.add(task) 2644*4882a593Smuzhiyun while next: 2645*4882a593Smuzhiyun additional = [] 2646*4882a593Smuzhiyun for revdep in next: 2647*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) 2648*4882a593Smuzhiyun pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 2649*4882a593Smuzhiyun deps = getsetscenedeps(revdep) 2650*4882a593Smuzhiyun provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] 2651*4882a593Smuzhiyun taskhash = self.rqdata.runtaskentries[revdep].hash 2652*4882a593Smuzhiyun unihash = self.rqdata.runtaskentries[revdep].unihash 2653*4882a593Smuzhiyun taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash] 2654*4882a593Smuzhiyun for revdep2 in deps: 2655*4882a593Smuzhiyun if revdep2 not in taskdepdata: 2656*4882a593Smuzhiyun additional.append(revdep2) 2657*4882a593Smuzhiyun next = additional 2658*4882a593Smuzhiyun 2659*4882a593Smuzhiyun #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) 2660*4882a593Smuzhiyun return taskdepdata 2661*4882a593Smuzhiyun 2662*4882a593Smuzhiyun def check_setscene_ignore_tasks(self, tid): 2663*4882a593Smuzhiyun # Check task that is going to run against the ignore tasks list 2664*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 2665*4882a593Smuzhiyun # Ignore covered tasks 2666*4882a593Smuzhiyun if tid in self.tasks_covered: 2667*4882a593Smuzhiyun return False 2668*4882a593Smuzhiyun # Ignore stamped tasks 2669*4882a593Smuzhiyun if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache): 2670*4882a593Smuzhiyun return False 2671*4882a593Smuzhiyun # Ignore noexec tasks 2672*4882a593Smuzhiyun taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] 2673*4882a593Smuzhiyun if 'noexec' in taskdep and taskname in taskdep['noexec']: 2674*4882a593Smuzhiyun return False 2675*4882a593Smuzhiyun 2676*4882a593Smuzhiyun pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 2677*4882a593Smuzhiyun if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): 2678*4882a593Smuzhiyun if tid in self.rqdata.runq_setscene_tids: 2679*4882a593Smuzhiyun msg = ['Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)] 2680*4882a593Smuzhiyun else: 2681*4882a593Smuzhiyun msg = ['Task %s.%s attempted to execute unexpectedly' % (pn, taskname)] 2682*4882a593Smuzhiyun for t in self.scenequeue_notcovered: 2683*4882a593Smuzhiyun msg.append("\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)) 2684*4882a593Smuzhiyun msg.append('\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered)) 2685*4882a593Smuzhiyun logger.error("".join(msg)) 2686*4882a593Smuzhiyun return True 2687*4882a593Smuzhiyun return False 2688*4882a593Smuzhiyun 2689*4882a593Smuzhiyunclass SQData(object): 2690*4882a593Smuzhiyun def __init__(self): 2691*4882a593Smuzhiyun # SceneQueue dependencies 2692*4882a593Smuzhiyun self.sq_deps = {} 2693*4882a593Smuzhiyun # SceneQueue reverse dependencies 2694*4882a593Smuzhiyun self.sq_revdeps = {} 2695*4882a593Smuzhiyun # Injected inter-setscene task dependencies 2696*4882a593Smuzhiyun self.sq_harddeps = {} 2697*4882a593Smuzhiyun # Cache of stamp files so duplicates can't run in parallel 2698*4882a593Smuzhiyun self.stamps = {} 2699*4882a593Smuzhiyun # Setscene tasks directly depended upon by the build 2700*4882a593Smuzhiyun self.unskippable = set() 2701*4882a593Smuzhiyun # List of setscene tasks which aren't present 2702*4882a593Smuzhiyun self.outrightfail = set() 2703*4882a593Smuzhiyun # A list of normal tasks a setscene task covers 2704*4882a593Smuzhiyun self.sq_covered_tasks = {} 2705*4882a593Smuzhiyun 2706*4882a593Smuzhiyundef build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq): 2707*4882a593Smuzhiyun 2708*4882a593Smuzhiyun sq_revdeps = {} 2709*4882a593Smuzhiyun sq_revdeps_squash = {} 2710*4882a593Smuzhiyun sq_collated_deps = {} 2711*4882a593Smuzhiyun 2712*4882a593Smuzhiyun # We need to construct a dependency graph for the setscene functions. Intermediate 2713*4882a593Smuzhiyun # dependencies between the setscene tasks only complicate the code. This code 2714*4882a593Smuzhiyun # therefore aims to collapse the huge runqueue dependency tree into a smaller one 2715*4882a593Smuzhiyun # only containing the setscene functions. 2716*4882a593Smuzhiyun 2717*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2718*4882a593Smuzhiyun 2719*4882a593Smuzhiyun # First process the chains up to the first setscene task. 2720*4882a593Smuzhiyun endpoints = {} 2721*4882a593Smuzhiyun for tid in rqdata.runtaskentries: 2722*4882a593Smuzhiyun sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps) 2723*4882a593Smuzhiyun sq_revdeps_squash[tid] = set() 2724*4882a593Smuzhiyun if not sq_revdeps[tid] and tid not in rqdata.runq_setscene_tids: 2725*4882a593Smuzhiyun #bb.warn("Added endpoint %s" % (tid)) 2726*4882a593Smuzhiyun endpoints[tid] = set() 2727*4882a593Smuzhiyun 2728*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2729*4882a593Smuzhiyun 2730*4882a593Smuzhiyun # Secondly process the chains between setscene tasks. 2731*4882a593Smuzhiyun for tid in rqdata.runq_setscene_tids: 2732*4882a593Smuzhiyun sq_collated_deps[tid] = set() 2733*4882a593Smuzhiyun #bb.warn("Added endpoint 2 %s" % (tid)) 2734*4882a593Smuzhiyun for dep in rqdata.runtaskentries[tid].depends: 2735*4882a593Smuzhiyun if tid in sq_revdeps[dep]: 2736*4882a593Smuzhiyun sq_revdeps[dep].remove(tid) 2737*4882a593Smuzhiyun if dep not in endpoints: 2738*4882a593Smuzhiyun endpoints[dep] = set() 2739*4882a593Smuzhiyun #bb.warn(" Added endpoint 3 %s" % (dep)) 2740*4882a593Smuzhiyun endpoints[dep].add(tid) 2741*4882a593Smuzhiyun 2742*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2743*4882a593Smuzhiyun 2744*4882a593Smuzhiyun def process_endpoints(endpoints): 2745*4882a593Smuzhiyun newendpoints = {} 2746*4882a593Smuzhiyun for point, task in endpoints.items(): 2747*4882a593Smuzhiyun tasks = set() 2748*4882a593Smuzhiyun if task: 2749*4882a593Smuzhiyun tasks |= task 2750*4882a593Smuzhiyun if sq_revdeps_squash[point]: 2751*4882a593Smuzhiyun tasks |= sq_revdeps_squash[point] 2752*4882a593Smuzhiyun if point not in rqdata.runq_setscene_tids: 2753*4882a593Smuzhiyun for t in tasks: 2754*4882a593Smuzhiyun sq_collated_deps[t].add(point) 2755*4882a593Smuzhiyun sq_revdeps_squash[point] = set() 2756*4882a593Smuzhiyun if point in rqdata.runq_setscene_tids: 2757*4882a593Smuzhiyun sq_revdeps_squash[point] = tasks 2758*4882a593Smuzhiyun continue 2759*4882a593Smuzhiyun for dep in rqdata.runtaskentries[point].depends: 2760*4882a593Smuzhiyun if point in sq_revdeps[dep]: 2761*4882a593Smuzhiyun sq_revdeps[dep].remove(point) 2762*4882a593Smuzhiyun if tasks: 2763*4882a593Smuzhiyun sq_revdeps_squash[dep] |= tasks 2764*4882a593Smuzhiyun if not sq_revdeps[dep] and dep not in rqdata.runq_setscene_tids: 2765*4882a593Smuzhiyun newendpoints[dep] = task 2766*4882a593Smuzhiyun if newendpoints: 2767*4882a593Smuzhiyun process_endpoints(newendpoints) 2768*4882a593Smuzhiyun 2769*4882a593Smuzhiyun process_endpoints(endpoints) 2770*4882a593Smuzhiyun 2771*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2772*4882a593Smuzhiyun 2773*4882a593Smuzhiyun # Build a list of tasks which are "unskippable" 2774*4882a593Smuzhiyun # These are direct endpoints referenced by the build upto and including setscene tasks 2775*4882a593Smuzhiyun # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon 2776*4882a593Smuzhiyun new = True 2777*4882a593Smuzhiyun for tid in rqdata.runtaskentries: 2778*4882a593Smuzhiyun if not rqdata.runtaskentries[tid].revdeps: 2779*4882a593Smuzhiyun sqdata.unskippable.add(tid) 2780*4882a593Smuzhiyun sqdata.unskippable |= sqrq.cantskip 2781*4882a593Smuzhiyun while new: 2782*4882a593Smuzhiyun new = False 2783*4882a593Smuzhiyun orig = sqdata.unskippable.copy() 2784*4882a593Smuzhiyun for tid in sorted(orig, reverse=True): 2785*4882a593Smuzhiyun if tid in rqdata.runq_setscene_tids: 2786*4882a593Smuzhiyun continue 2787*4882a593Smuzhiyun if not rqdata.runtaskentries[tid].depends: 2788*4882a593Smuzhiyun # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable 2789*4882a593Smuzhiyun sqrq.setbuildable(tid) 2790*4882a593Smuzhiyun sqdata.unskippable |= rqdata.runtaskentries[tid].depends 2791*4882a593Smuzhiyun if sqdata.unskippable != orig: 2792*4882a593Smuzhiyun new = True 2793*4882a593Smuzhiyun 2794*4882a593Smuzhiyun sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids) 2795*4882a593Smuzhiyun 2796*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries)) 2797*4882a593Smuzhiyun 2798*4882a593Smuzhiyun # Sanity check all dependencies could be changed to setscene task references 2799*4882a593Smuzhiyun for taskcounter, tid in enumerate(rqdata.runtaskentries): 2800*4882a593Smuzhiyun if tid in rqdata.runq_setscene_tids: 2801*4882a593Smuzhiyun pass 2802*4882a593Smuzhiyun elif sq_revdeps_squash[tid]: 2803*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, halting. Please report this problem.") 2804*4882a593Smuzhiyun else: 2805*4882a593Smuzhiyun del sq_revdeps_squash[tid] 2806*4882a593Smuzhiyun rqdata.init_progress_reporter.update(taskcounter) 2807*4882a593Smuzhiyun 2808*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2809*4882a593Smuzhiyun 2810*4882a593Smuzhiyun # Resolve setscene inter-task dependencies 2811*4882a593Smuzhiyun # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene" 2812*4882a593Smuzhiyun # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies 2813*4882a593Smuzhiyun for tid in rqdata.runq_setscene_tids: 2814*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 2815*4882a593Smuzhiyun realtid = tid + "_setscene" 2816*4882a593Smuzhiyun idepends = rqdata.taskData[mc].taskentries[realtid].idepends 2817*4882a593Smuzhiyun sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True) 2818*4882a593Smuzhiyun for (depname, idependtask) in idepends: 2819*4882a593Smuzhiyun 2820*4882a593Smuzhiyun if depname not in rqdata.taskData[mc].build_targets: 2821*4882a593Smuzhiyun continue 2822*4882a593Smuzhiyun 2823*4882a593Smuzhiyun depfn = rqdata.taskData[mc].build_targets[depname][0] 2824*4882a593Smuzhiyun if depfn is None: 2825*4882a593Smuzhiyun continue 2826*4882a593Smuzhiyun deptid = depfn + ":" + idependtask.replace("_setscene", "") 2827*4882a593Smuzhiyun if deptid not in rqdata.runtaskentries: 2828*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask)) 2829*4882a593Smuzhiyun 2830*4882a593Smuzhiyun if not deptid in sqdata.sq_harddeps: 2831*4882a593Smuzhiyun sqdata.sq_harddeps[deptid] = set() 2832*4882a593Smuzhiyun sqdata.sq_harddeps[deptid].add(tid) 2833*4882a593Smuzhiyun 2834*4882a593Smuzhiyun sq_revdeps_squash[tid].add(deptid) 2835*4882a593Smuzhiyun # Have to zero this to avoid circular dependencies 2836*4882a593Smuzhiyun sq_revdeps_squash[deptid] = set() 2837*4882a593Smuzhiyun 2838*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2839*4882a593Smuzhiyun 2840*4882a593Smuzhiyun for task in sqdata.sq_harddeps: 2841*4882a593Smuzhiyun for dep in sqdata.sq_harddeps[task]: 2842*4882a593Smuzhiyun sq_revdeps_squash[dep].add(task) 2843*4882a593Smuzhiyun 2844*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2845*4882a593Smuzhiyun 2846*4882a593Smuzhiyun #for tid in sq_revdeps_squash: 2847*4882a593Smuzhiyun # data = "" 2848*4882a593Smuzhiyun # for dep in sq_revdeps_squash[tid]: 2849*4882a593Smuzhiyun # data = data + "\n %s" % dep 2850*4882a593Smuzhiyun # bb.warn("Task %s_setscene: is %s " % (tid, data)) 2851*4882a593Smuzhiyun 2852*4882a593Smuzhiyun sqdata.sq_revdeps = sq_revdeps_squash 2853*4882a593Smuzhiyun sqdata.sq_covered_tasks = sq_collated_deps 2854*4882a593Smuzhiyun 2855*4882a593Smuzhiyun # Build reverse version of revdeps to populate deps structure 2856*4882a593Smuzhiyun for tid in sqdata.sq_revdeps: 2857*4882a593Smuzhiyun sqdata.sq_deps[tid] = set() 2858*4882a593Smuzhiyun for tid in sqdata.sq_revdeps: 2859*4882a593Smuzhiyun for dep in sqdata.sq_revdeps[tid]: 2860*4882a593Smuzhiyun sqdata.sq_deps[dep].add(tid) 2861*4882a593Smuzhiyun 2862*4882a593Smuzhiyun rqdata.init_progress_reporter.next_stage() 2863*4882a593Smuzhiyun 2864*4882a593Smuzhiyun sqdata.multiconfigs = set() 2865*4882a593Smuzhiyun for tid in sqdata.sq_revdeps: 2866*4882a593Smuzhiyun sqdata.multiconfigs.add(mc_from_tid(tid)) 2867*4882a593Smuzhiyun if not sqdata.sq_revdeps[tid]: 2868*4882a593Smuzhiyun sqrq.sq_buildable.add(tid) 2869*4882a593Smuzhiyun 2870*4882a593Smuzhiyun rqdata.init_progress_reporter.finish() 2871*4882a593Smuzhiyun 2872*4882a593Smuzhiyun sqdata.noexec = set() 2873*4882a593Smuzhiyun sqdata.stamppresent = set() 2874*4882a593Smuzhiyun sqdata.valid = set() 2875*4882a593Smuzhiyun 2876*4882a593Smuzhiyun sqdata.hashes = {} 2877*4882a593Smuzhiyun sqrq.sq_deferred = {} 2878*4882a593Smuzhiyun for mc in sorted(sqdata.multiconfigs): 2879*4882a593Smuzhiyun for tid in sorted(sqdata.sq_revdeps): 2880*4882a593Smuzhiyun if mc_from_tid(tid) != mc: 2881*4882a593Smuzhiyun continue 2882*4882a593Smuzhiyun h = pending_hash_index(tid, rqdata) 2883*4882a593Smuzhiyun if h not in sqdata.hashes: 2884*4882a593Smuzhiyun sqdata.hashes[h] = tid 2885*4882a593Smuzhiyun else: 2886*4882a593Smuzhiyun sqrq.sq_deferred[tid] = sqdata.hashes[h] 2887*4882a593Smuzhiyun bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h])) 2888*4882a593Smuzhiyun 2889*4882a593Smuzhiyun update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True) 2890*4882a593Smuzhiyun 2891*4882a593Smuzhiyun # Compute a list of 'stale' sstate tasks where the current hash does not match the one 2892*4882a593Smuzhiyun # in any stamp files. Pass the list out to metadata as an event. 2893*4882a593Smuzhiyun found = {} 2894*4882a593Smuzhiyun for tid in rqdata.runq_setscene_tids: 2895*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 2896*4882a593Smuzhiyun stamps = bb.build.find_stale_stamps(taskname, rqdata.dataCaches[mc], taskfn) 2897*4882a593Smuzhiyun if stamps: 2898*4882a593Smuzhiyun if mc not in found: 2899*4882a593Smuzhiyun found[mc] = {} 2900*4882a593Smuzhiyun found[mc][tid] = stamps 2901*4882a593Smuzhiyun for mc in found: 2902*4882a593Smuzhiyun event = bb.event.StaleSetSceneTasks(found[mc]) 2903*4882a593Smuzhiyun bb.event.fire(event, cooker.databuilder.mcdata[mc]) 2904*4882a593Smuzhiyun 2905*4882a593Smuzhiyundef check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False): 2906*4882a593Smuzhiyun 2907*4882a593Smuzhiyun (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) 2908*4882a593Smuzhiyun 2909*4882a593Smuzhiyun taskdep = rqdata.dataCaches[mc].task_deps[taskfn] 2910*4882a593Smuzhiyun 2911*4882a593Smuzhiyun if 'noexec' in taskdep and taskname in taskdep['noexec']: 2912*4882a593Smuzhiyun bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn) 2913*4882a593Smuzhiyun return True, False 2914*4882a593Smuzhiyun 2915*4882a593Smuzhiyun if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache): 2916*4882a593Smuzhiyun logger.debug2('Setscene stamp current for task %s', tid) 2917*4882a593Smuzhiyun return False, True 2918*4882a593Smuzhiyun 2919*4882a593Smuzhiyun if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache): 2920*4882a593Smuzhiyun logger.debug2('Normal stamp current for task %s', tid) 2921*4882a593Smuzhiyun return False, True 2922*4882a593Smuzhiyun 2923*4882a593Smuzhiyun return False, False 2924*4882a593Smuzhiyun 2925*4882a593Smuzhiyundef update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True): 2926*4882a593Smuzhiyun 2927*4882a593Smuzhiyun tocheck = set() 2928*4882a593Smuzhiyun 2929*4882a593Smuzhiyun for tid in sorted(tids): 2930*4882a593Smuzhiyun if tid in sqdata.stamppresent: 2931*4882a593Smuzhiyun sqdata.stamppresent.remove(tid) 2932*4882a593Smuzhiyun if tid in sqdata.valid: 2933*4882a593Smuzhiyun sqdata.valid.remove(tid) 2934*4882a593Smuzhiyun if tid in sqdata.outrightfail: 2935*4882a593Smuzhiyun sqdata.outrightfail.remove(tid) 2936*4882a593Smuzhiyun 2937*4882a593Smuzhiyun noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True) 2938*4882a593Smuzhiyun 2939*4882a593Smuzhiyun if noexec: 2940*4882a593Smuzhiyun sqdata.noexec.add(tid) 2941*4882a593Smuzhiyun sqrq.sq_task_skip(tid) 2942*4882a593Smuzhiyun continue 2943*4882a593Smuzhiyun 2944*4882a593Smuzhiyun if stamppresent: 2945*4882a593Smuzhiyun sqdata.stamppresent.add(tid) 2946*4882a593Smuzhiyun sqrq.sq_task_skip(tid) 2947*4882a593Smuzhiyun continue 2948*4882a593Smuzhiyun 2949*4882a593Smuzhiyun tocheck.add(tid) 2950*4882a593Smuzhiyun 2951*4882a593Smuzhiyun sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary) 2952*4882a593Smuzhiyun 2953*4882a593Smuzhiyun for tid in tids: 2954*4882a593Smuzhiyun if tid in sqdata.stamppresent: 2955*4882a593Smuzhiyun continue 2956*4882a593Smuzhiyun if tid in sqdata.valid: 2957*4882a593Smuzhiyun continue 2958*4882a593Smuzhiyun if tid in sqdata.noexec: 2959*4882a593Smuzhiyun continue 2960*4882a593Smuzhiyun if tid in sqrq.scenequeue_covered: 2961*4882a593Smuzhiyun continue 2962*4882a593Smuzhiyun if tid in sqrq.scenequeue_notcovered: 2963*4882a593Smuzhiyun continue 2964*4882a593Smuzhiyun if tid in sqrq.sq_deferred: 2965*4882a593Smuzhiyun continue 2966*4882a593Smuzhiyun sqdata.outrightfail.add(tid) 2967*4882a593Smuzhiyun 2968*4882a593Smuzhiyunclass TaskFailure(Exception): 2969*4882a593Smuzhiyun """ 2970*4882a593Smuzhiyun Exception raised when a task in a runqueue fails 2971*4882a593Smuzhiyun """ 2972*4882a593Smuzhiyun def __init__(self, x): 2973*4882a593Smuzhiyun self.args = x 2974*4882a593Smuzhiyun 2975*4882a593Smuzhiyun 2976*4882a593Smuzhiyunclass runQueueExitWait(bb.event.Event): 2977*4882a593Smuzhiyun """ 2978*4882a593Smuzhiyun Event when waiting for task processes to exit 2979*4882a593Smuzhiyun """ 2980*4882a593Smuzhiyun 2981*4882a593Smuzhiyun def __init__(self, remain): 2982*4882a593Smuzhiyun self.remain = remain 2983*4882a593Smuzhiyun self.message = "Waiting for %s active tasks to finish" % remain 2984*4882a593Smuzhiyun bb.event.Event.__init__(self) 2985*4882a593Smuzhiyun 2986*4882a593Smuzhiyunclass runQueueEvent(bb.event.Event): 2987*4882a593Smuzhiyun """ 2988*4882a593Smuzhiyun Base runQueue event class 2989*4882a593Smuzhiyun """ 2990*4882a593Smuzhiyun def __init__(self, task, stats, rq): 2991*4882a593Smuzhiyun self.taskid = task 2992*4882a593Smuzhiyun self.taskstring = task 2993*4882a593Smuzhiyun self.taskname = taskname_from_tid(task) 2994*4882a593Smuzhiyun self.taskfile = fn_from_tid(task) 2995*4882a593Smuzhiyun self.taskhash = rq.rqdata.get_task_hash(task) 2996*4882a593Smuzhiyun self.stats = stats.copy() 2997*4882a593Smuzhiyun bb.event.Event.__init__(self) 2998*4882a593Smuzhiyun 2999*4882a593Smuzhiyunclass sceneQueueEvent(runQueueEvent): 3000*4882a593Smuzhiyun """ 3001*4882a593Smuzhiyun Base sceneQueue event class 3002*4882a593Smuzhiyun """ 3003*4882a593Smuzhiyun def __init__(self, task, stats, rq, noexec=False): 3004*4882a593Smuzhiyun runQueueEvent.__init__(self, task, stats, rq) 3005*4882a593Smuzhiyun self.taskstring = task + "_setscene" 3006*4882a593Smuzhiyun self.taskname = taskname_from_tid(task) + "_setscene" 3007*4882a593Smuzhiyun self.taskfile = fn_from_tid(task) 3008*4882a593Smuzhiyun self.taskhash = rq.rqdata.get_task_hash(task) 3009*4882a593Smuzhiyun 3010*4882a593Smuzhiyunclass runQueueTaskStarted(runQueueEvent): 3011*4882a593Smuzhiyun """ 3012*4882a593Smuzhiyun Event notifying a task was started 3013*4882a593Smuzhiyun """ 3014*4882a593Smuzhiyun def __init__(self, task, stats, rq, noexec=False): 3015*4882a593Smuzhiyun runQueueEvent.__init__(self, task, stats, rq) 3016*4882a593Smuzhiyun self.noexec = noexec 3017*4882a593Smuzhiyun 3018*4882a593Smuzhiyunclass sceneQueueTaskStarted(sceneQueueEvent): 3019*4882a593Smuzhiyun """ 3020*4882a593Smuzhiyun Event notifying a setscene task was started 3021*4882a593Smuzhiyun """ 3022*4882a593Smuzhiyun def __init__(self, task, stats, rq, noexec=False): 3023*4882a593Smuzhiyun sceneQueueEvent.__init__(self, task, stats, rq) 3024*4882a593Smuzhiyun self.noexec = noexec 3025*4882a593Smuzhiyun 3026*4882a593Smuzhiyunclass runQueueTaskFailed(runQueueEvent): 3027*4882a593Smuzhiyun """ 3028*4882a593Smuzhiyun Event notifying a task failed 3029*4882a593Smuzhiyun """ 3030*4882a593Smuzhiyun def __init__(self, task, stats, exitcode, rq, fakeroot_log=None): 3031*4882a593Smuzhiyun runQueueEvent.__init__(self, task, stats, rq) 3032*4882a593Smuzhiyun self.exitcode = exitcode 3033*4882a593Smuzhiyun self.fakeroot_log = fakeroot_log 3034*4882a593Smuzhiyun 3035*4882a593Smuzhiyun def __str__(self): 3036*4882a593Smuzhiyun if self.fakeroot_log: 3037*4882a593Smuzhiyun return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log) 3038*4882a593Smuzhiyun else: 3039*4882a593Smuzhiyun return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode) 3040*4882a593Smuzhiyun 3041*4882a593Smuzhiyunclass sceneQueueTaskFailed(sceneQueueEvent): 3042*4882a593Smuzhiyun """ 3043*4882a593Smuzhiyun Event notifying a setscene task failed 3044*4882a593Smuzhiyun """ 3045*4882a593Smuzhiyun def __init__(self, task, stats, exitcode, rq): 3046*4882a593Smuzhiyun sceneQueueEvent.__init__(self, task, stats, rq) 3047*4882a593Smuzhiyun self.exitcode = exitcode 3048*4882a593Smuzhiyun 3049*4882a593Smuzhiyun def __str__(self): 3050*4882a593Smuzhiyun return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode) 3051*4882a593Smuzhiyun 3052*4882a593Smuzhiyunclass sceneQueueComplete(sceneQueueEvent): 3053*4882a593Smuzhiyun """ 3054*4882a593Smuzhiyun Event when all the sceneQueue tasks are complete 3055*4882a593Smuzhiyun """ 3056*4882a593Smuzhiyun def __init__(self, stats, rq): 3057*4882a593Smuzhiyun self.stats = stats.copy() 3058*4882a593Smuzhiyun bb.event.Event.__init__(self) 3059*4882a593Smuzhiyun 3060*4882a593Smuzhiyunclass runQueueTaskCompleted(runQueueEvent): 3061*4882a593Smuzhiyun """ 3062*4882a593Smuzhiyun Event notifying a task completed 3063*4882a593Smuzhiyun """ 3064*4882a593Smuzhiyun 3065*4882a593Smuzhiyunclass sceneQueueTaskCompleted(sceneQueueEvent): 3066*4882a593Smuzhiyun """ 3067*4882a593Smuzhiyun Event notifying a setscene task completed 3068*4882a593Smuzhiyun """ 3069*4882a593Smuzhiyun 3070*4882a593Smuzhiyunclass runQueueTaskSkipped(runQueueEvent): 3071*4882a593Smuzhiyun """ 3072*4882a593Smuzhiyun Event notifying a task was skipped 3073*4882a593Smuzhiyun """ 3074*4882a593Smuzhiyun def __init__(self, task, stats, rq, reason): 3075*4882a593Smuzhiyun runQueueEvent.__init__(self, task, stats, rq) 3076*4882a593Smuzhiyun self.reason = reason 3077*4882a593Smuzhiyun 3078*4882a593Smuzhiyunclass taskUniHashUpdate(bb.event.Event): 3079*4882a593Smuzhiyun """ 3080*4882a593Smuzhiyun Base runQueue event class 3081*4882a593Smuzhiyun """ 3082*4882a593Smuzhiyun def __init__(self, task, unihash): 3083*4882a593Smuzhiyun self.taskid = task 3084*4882a593Smuzhiyun self.unihash = unihash 3085*4882a593Smuzhiyun bb.event.Event.__init__(self) 3086*4882a593Smuzhiyun 3087*4882a593Smuzhiyunclass runQueuePipe(): 3088*4882a593Smuzhiyun """ 3089*4882a593Smuzhiyun Abstraction for a pipe between a worker thread and the server 3090*4882a593Smuzhiyun """ 3091*4882a593Smuzhiyun def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None): 3092*4882a593Smuzhiyun self.input = pipein 3093*4882a593Smuzhiyun if pipeout: 3094*4882a593Smuzhiyun pipeout.close() 3095*4882a593Smuzhiyun bb.utils.nonblockingfd(self.input) 3096*4882a593Smuzhiyun self.queue = b"" 3097*4882a593Smuzhiyun self.d = d 3098*4882a593Smuzhiyun self.rq = rq 3099*4882a593Smuzhiyun self.rqexec = rqexec 3100*4882a593Smuzhiyun self.fakerootlogs = fakerootlogs 3101*4882a593Smuzhiyun 3102*4882a593Smuzhiyun def setrunqueueexec(self, rqexec): 3103*4882a593Smuzhiyun self.rqexec = rqexec 3104*4882a593Smuzhiyun 3105*4882a593Smuzhiyun def read(self): 3106*4882a593Smuzhiyun for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]: 3107*4882a593Smuzhiyun for worker in workers.values(): 3108*4882a593Smuzhiyun worker.process.poll() 3109*4882a593Smuzhiyun if worker.process.returncode is not None and not self.rq.teardown: 3110*4882a593Smuzhiyun bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode))) 3111*4882a593Smuzhiyun self.rq.finish_runqueue(True) 3112*4882a593Smuzhiyun 3113*4882a593Smuzhiyun start = len(self.queue) 3114*4882a593Smuzhiyun try: 3115*4882a593Smuzhiyun self.queue = self.queue + (self.input.read(102400) or b"") 3116*4882a593Smuzhiyun except (OSError, IOError) as e: 3117*4882a593Smuzhiyun if e.errno != errno.EAGAIN: 3118*4882a593Smuzhiyun raise 3119*4882a593Smuzhiyun end = len(self.queue) 3120*4882a593Smuzhiyun found = True 3121*4882a593Smuzhiyun while found and self.queue: 3122*4882a593Smuzhiyun found = False 3123*4882a593Smuzhiyun index = self.queue.find(b"</event>") 3124*4882a593Smuzhiyun while index != -1 and self.queue.startswith(b"<event>"): 3125*4882a593Smuzhiyun try: 3126*4882a593Smuzhiyun event = pickle.loads(self.queue[7:index]) 3127*4882a593Smuzhiyun except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: 3128*4882a593Smuzhiyun if isinstance(e, pickle.UnpicklingError) and "truncated" in str(e): 3129*4882a593Smuzhiyun # The pickled data could contain "</event>" so search for the next occurance 3130*4882a593Smuzhiyun # unpickling again, this should be the only way an unpickle error could occur 3131*4882a593Smuzhiyun index = self.queue.find(b"</event>", index + 1) 3132*4882a593Smuzhiyun continue 3133*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index])) 3134*4882a593Smuzhiyun bb.event.fire_from_worker(event, self.d) 3135*4882a593Smuzhiyun if isinstance(event, taskUniHashUpdate): 3136*4882a593Smuzhiyun self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash)) 3137*4882a593Smuzhiyun found = True 3138*4882a593Smuzhiyun self.queue = self.queue[index+8:] 3139*4882a593Smuzhiyun index = self.queue.find(b"</event>") 3140*4882a593Smuzhiyun index = self.queue.find(b"</exitcode>") 3141*4882a593Smuzhiyun while index != -1 and self.queue.startswith(b"<exitcode>"): 3142*4882a593Smuzhiyun try: 3143*4882a593Smuzhiyun task, status = pickle.loads(self.queue[10:index]) 3144*4882a593Smuzhiyun except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: 3145*4882a593Smuzhiyun bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index])) 3146*4882a593Smuzhiyun (_, _, _, taskfn) = split_tid_mcfn(task) 3147*4882a593Smuzhiyun fakerootlog = None 3148*4882a593Smuzhiyun if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs: 3149*4882a593Smuzhiyun fakerootlog = self.fakerootlogs[taskfn] 3150*4882a593Smuzhiyun self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog) 3151*4882a593Smuzhiyun found = True 3152*4882a593Smuzhiyun self.queue = self.queue[index+11:] 3153*4882a593Smuzhiyun index = self.queue.find(b"</exitcode>") 3154*4882a593Smuzhiyun return (end > start) 3155*4882a593Smuzhiyun 3156*4882a593Smuzhiyun def close(self): 3157*4882a593Smuzhiyun while self.read(): 3158*4882a593Smuzhiyun continue 3159*4882a593Smuzhiyun if self.queue: 3160*4882a593Smuzhiyun print("Warning, worker left partial message: %s" % self.queue) 3161*4882a593Smuzhiyun self.input.close() 3162*4882a593Smuzhiyun 3163*4882a593Smuzhiyundef get_setscene_enforce_ignore_tasks(d, targets): 3164*4882a593Smuzhiyun if d.getVar('BB_SETSCENE_ENFORCE') != '1': 3165*4882a593Smuzhiyun return None 3166*4882a593Smuzhiyun ignore_tasks = (d.getVar("BB_SETSCENE_ENFORCE_IGNORE_TASKS") or "").split() 3167*4882a593Smuzhiyun outlist = [] 3168*4882a593Smuzhiyun for item in ignore_tasks[:]: 3169*4882a593Smuzhiyun if item.startswith('%:'): 3170*4882a593Smuzhiyun for (mc, target, task, fn) in targets: 3171*4882a593Smuzhiyun outlist.append(target + ':' + item.split(':')[1]) 3172*4882a593Smuzhiyun else: 3173*4882a593Smuzhiyun outlist.append(item) 3174*4882a593Smuzhiyun return outlist 3175*4882a593Smuzhiyun 3176*4882a593Smuzhiyundef check_setscene_enforce_ignore_tasks(pn, taskname, ignore_tasks): 3177*4882a593Smuzhiyun import fnmatch 3178*4882a593Smuzhiyun if ignore_tasks is not None: 3179*4882a593Smuzhiyun item = '%s:%s' % (pn, taskname) 3180*4882a593Smuzhiyun for ignore_tasks in ignore_tasks: 3181*4882a593Smuzhiyun if fnmatch.fnmatch(item, ignore_tasks): 3182*4882a593Smuzhiyun return True 3183*4882a593Smuzhiyun return False 3184*4882a593Smuzhiyun return True 3185