Lines Matching full:self

89     def __init__(self, total, setscene_total):  argument
90 self.completed = 0
91 self.skipped = 0
92 self.failed = 0
93 self.active = 0
94 self.setscene_active = 0
95 self.setscene_covered = 0
96 self.setscene_notcovered = 0
97 self.setscene_total = setscene_total
98 self.total = total
100 def copy(self): argument
101 obj = self.__class__(self.total, self.setscene_total)
102 obj.__dict__.update(self.__dict__)
105 def taskFailed(self): argument
106 self.active = self.active - 1
107 self.failed = self.failed + 1
109 def taskCompleted(self): argument
110 self.active = self.active - 1
111 self.completed = self.completed + 1
113 def taskSkipped(self): argument
114 self.active = self.active + 1
115 self.skipped = self.skipped + 1
117 def taskActive(self): argument
118 self.active = self.active + 1
120 def updateCovered(self, covered, notcovered): argument
121 self.setscene_covered = covered
122 self.setscene_notcovered = notcovered
124 def updateActiveSetscene(self, active): argument
125 self.setscene_active = active
142 def __init__(self, runqueue, rqdata): argument
147 self.rq = runqueue
148 self.rqdata = rqdata
149 self.numTasks = len(self.rqdata.runtaskentries)
151 self.prio_map = [self.rqdata.runtaskentries.keys()]
153 self.buildable = set()
154 self.skip_maxthread = {}
155 self.stamps = {}
156 for tid in self.rqdata.runtaskentries:
158self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
159 if tid in self.rq.runq_buildable:
160 self.buildable.append(tid)
162 self.rev_prio_map = None
163 self.is_pressure_usable()
165 def is_pressure_usable(self): argument
171 if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure:
177 self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
178 self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
179self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
180 self.prev_pressure_time = time.time()
181 self.check_pressure = True
184 self.check_pressure = False
186 self.check_pressure = False
188 def exceeds_max_pressure(self): argument
193 if self.check_pressure:
201 …exceeds_cpu_pressure = self.rq.max_cpu_pressure and (float(curr_cpu_pressure) - float(self.prev_c…
202 …exceeds_io_pressure = self.rq.max_io_pressure and (float(curr_io_pressure) - float(self.prev_io_p…
203 …xceeds_memory_pressure = self.rq.max_memory_pressure and (float(curr_memory_pressure) - float(self
205 if now - self.prev_pressure_time > 1.0:
206 self.prev_cpu_pressure = curr_cpu_pressure
207 self.prev_io_pressure = curr_io_pressure
208 self.prev_memory_pressure = curr_memory_pressure
209 self.prev_pressure_time = now
213 def next_buildable_task(self): argument
218 self.buildable.difference_update(self.rq.runq_running)
219 buildable = set(self.buildable)
220 buildable.difference_update(self.rq.holdoff_tasks)
221 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
228 if self.rq.stats.active and self.exceeds_max_pressure():
233 for running in self.rq.runq_running.difference(self.rq.runq_complete):
235 if rtaskname not in self.skip_maxthread:
236self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
237 if not self.skip_maxthread[rtaskname]:
247 … if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
249 stamp = self.stamps[tid]
250 if stamp not in self.rq.build_stamps.values():
253 if not self.rev_prio_map:
254 self.rev_prio_map = {}
255 for tid in self.rqdata.runtaskentries:
256 self.rev_prio_map[tid] = self.prio_map.index(tid)
262 … if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
264 prio = self.rev_prio_map[tid]
266 stamp = self.stamps[tid]
267 if stamp in self.rq.build_stamps.values():
274 def next(self): argument
278 if self.rq.can_start_task():
279 return self.next_buildable_task()
281 def newbuildable(self, task): argument
282 self.buildable.add(task)
284 def removebuildable(self, task): argument
285 self.buildable.remove(task)
287 def describe_task(self, taskid): argument
289 if self.rev_prio_map:
290 result = result + (' pri %d' % self.rev_prio_map[taskid])
293 def dump_prio(self, comment): argument
296 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
297 index, taskid in enumerate(self.prio_map)])))
306 def __init__(self, runqueue, rqdata): argument
310 RunQueueScheduler.__init__(self, runqueue, rqdata)
313 for tid in self.rqdata.runtaskentries:
314 weight = self.rqdata.runtaskentries[tid].weight
319 self.prio_map = []
322 self.prio_map.append(w)
324 self.prio_map.reverse()
337 def __init__(self, runqueue, rqdata): argument
338 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
346 for taskid in self.prio_map:
412 self.dump_prio('original priorities')
414 for index in range(task_index, self.numTasks):
415 taskid = self.prio_map[index]
418 del self.prio_map[index]
419 self.prio_map.insert(task_index, taskid)
421 self.dump_prio('completion priorities')
424 def __init__(self): argument
425 self.depends = set()
426 self.revdeps = set()
427 self.hash = None
428 self.unihash = None
429 self.task = None
430 self.weight = 1
436 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets): argument
437 self.cooker = cooker
438 self.dataCaches = dataCaches
439 self.taskData = taskData
440 self.targets = targets
441 self.rq = rq
442 self.warn_multi_bb = False
444 self.multi_provider_allowed = (cfgData.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
445 self.setscene_ignore_tasks = get_setscene_enforce_ignore_tasks(cfgData, targets)
446 self.setscene_ignore_tasks_checked = False
447 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
448 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
450 self.reset()
452 def reset(self): argument
453 self.runtaskentries = {}
455 def runq_depends_names(self, ids): argument
464 def get_task_hash(self, tid): argument
465 return self.runtaskentries[tid].hash
467 def get_task_unihash(self, tid): argument
468 return self.runtaskentries[tid].unihash
470 def get_user_idstring(self, tid, task_name_suffix = ""): argument
473 def get_short_user_idstring(self, task, task_name_suffix = ""): argument
475 pn = self.dataCaches[mc].pkg_fn[taskfn]
479 def circular_depchains_handler(self, tasks): argument
529 total_deps.extend(self.runtaskentries[tid].revdeps)
530 for revdep in self.runtaskentries[tid].revdeps:
541 …msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries…
572 def calculate_task_weights(self, endpoints): argument
581 numTasks = len(self.runtaskentries)
586 for tid in self.runtaskentries:
589 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
598 for revdep in self.runtaskentries[tid].depends:
610 for tid in self.runtaskentries:
615 self.runtaskentries[tid].weight = weight[tid]
623 msgs = self.circular_depchains_handler(problem_tasks)
632 def prepare(self): argument
643 taskData = self.taskData
646 for mc in self.taskData:
654 self.init_progress_reporter.start()
655 self.init_progress_reporter.next_stage()
715 task_deps = self.dataCaches[mc].task_deps[taskfn]
717 self.runtaskentries[tid] = RunTaskEntry()
795 self.runtaskentries[tid].depends = depends
796 # Remove all self references
797 self.runtaskentries[tid].depends.discard(tid)
799 #self.dump_data()
801 self.init_progress_reporter.next_stage()
820 for tid in self.runtaskentries:
821 deps[tid] = set(self.runtaskentries[tid].depends)
825 for tid in self.runtaskentries:
826 for dep in self.runtaskentries[tid].depends:
830 for tid in self.runtaskentries:
857 totaldeps = set(self.runtaskentries[tid].depends)
861 if dep not in self.runtaskentries:
863 totaldeps.update(self.runtaskentries[dep].depends)
875 … if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
877 self.runtaskentries[tid].depends.add(newtid)
881 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
882 … deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
887 if newtid in self.runtaskentries:
889 self.runtaskentries[tid].depends.add(newtid)
895 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
897 self.init_progress_reporter.next_stage()
899 #self.dump_data()
920 depends = self.runtaskentries[tid].depends
926 taskdep = self.dataCaches[mc].task_deps[taskfn]
936 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
938 self.target_tids = []
939 for (mc, target, task, fn) in self.targets:
957 self.target_tids.append(tid)
973 for i in self.runtaskentries[tid].depends:
978 self.init_progress_reporter.next_stage()
985 if self.cooker.configuration.runall:
987 reduced_tasklist = set(self.runtaskentries.keys())
988 for tid in list(self.runtaskentries.keys()):
993 for task in self.cooker.configuration.runall:
999 if wanttid in self.runtaskentries:
1004 if self.cooker.configuration.force:
1008 for tid in list(self.runtaskentries.keys()):
1011 del self.runtaskentries[tid]
1013 if self.cooker.configuration.runall:
1014 if not self.runtaskentries:
1015 …he recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self
1017 self.init_progress_reporter.next_stage()
1020 if self.cooker.configuration.runonly:
1024 for task in self.cooker.configuration.runonly:
1027 … runonly_tids = [k for k in self.runtaskentries.keys() if taskname_from_tid(k) == task]
1031 if self.cooker.configuration.force:
1034 for tid in list(self.runtaskentries.keys()):
1037 del self.runtaskentries[tid]
1039 if not self.runtaskentries:
1040 …o run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self
1047 if not self.runtaskentries:
1053 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
1057 self.init_progress_reporter.next_stage()
1060 for tid in self.runtaskentries:
1061 for dep in self.runtaskentries[tid].depends:
1062 self.runtaskentries[dep].revdeps.add(tid)
1064 self.init_progress_reporter.next_stage()
1069 for tid in self.runtaskentries:
1070 revdeps = self.runtaskentries[tid].revdeps
1074 if dep in self.runtaskentries[tid].depends:
1080 self.init_progress_reporter.next_stage()
1084 self.runq_weight = self.calculate_task_weights(endpoints)
1086 self.init_progress_reporter.next_stage()
1089 for mc in self.dataCaches:
1092 for tid in self.runtaskentries:
1099 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1107 if prov in self.multi_provider_allowed:
1112 pn = self.dataCaches[mc].pkg_fn[fn]
1127 for tid in self.runtaskentries:
1131 for dep in self.runtaskentries[tid].revdeps:
1153 provides = set(self.dataCaches[mc].fn_provides[provfn])
1155 for rprovide in self.dataCaches[mc].rproviders:
1156 if provfn in self.dataCaches[mc].rproviders[rprovide]:
1158 for package in self.dataCaches[mc].packages:
1159 if provfn in self.dataCaches[mc].packages[package]:
1161 for package in self.dataCaches[mc].packages_dynamic:
1162 if provfn in self.dataCaches[mc].packages_dynamic[package]:
1180 if self.warn_multi_bb:
1185 self.init_progress_reporter.next_stage()
1186 self.init_progress_reporter.next_stage()
1189 self.runq_setscene_tids = set()
1190 if not self.cooker.configuration.nosetscene:
1191 for tid in self.runtaskentries:
1196 self.runq_setscene_tids.add(tid)
1198 self.init_progress_reporter.next_stage()
1201 if self.cooker.configuration.force:
1202 for tid in self.target_tids:
1206 if self.cooker.configuration.invalidate_stamp:
1207 for tid in self.target_tids:
1209 for st in self.cooker.configuration.invalidate_stamp.split(','):
1214 self.init_progress_reporter.next_stage()
1221 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1224 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1226 self.init_progress_reporter.next_stage()
1228 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
1232 todeal = set(self.runtaskentries)
1235 if not (self.runtaskentries[tid].depends - dealtwith):
1238 self.prepare_task_hash(tid)
1242 #self.dump_data()
1243 return len(self.runtaskentries)
1245 def prepare_task_hash(self, tid): argument
1246 dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid))
1247 bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc)
1248self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends…
1249 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
1251 def dump_data(self): argument
1256 for tid in self.runtaskentries:
1258 self.runtaskentries[tid].weight,
1259 self.runtaskentries[tid].depends,
1260 self.runtaskentries[tid].revdeps)
1263 def __init__(self, process, pipe): argument
1264 self.process = process
1265 self.pipe = pipe
1268 def __init__(self, cooker, cfgData, dataCaches, taskData, targets): argument
1270 self.cooker = cooker
1271 self.cfgData = cfgData
1272 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
1274 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1275 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
1277 self.state = runQueuePrepare
1285 self.dm = monitordisk.diskMonitor(cfgData)
1286 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1287 self.dm_event_handler_registered = False
1288 self.rqexe = None
1289 self.worker = {}
1290 self.fakeworker = {}
1292 def _start_worker(self, mc, fakeroot = False, rqexec = None): argument
1295 if self.cooker.configuration.profile:
1300 mcdata = self.cooker.databuilder.mcdata[mc]
1307 fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs
1311 …workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlo…
1314 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1315 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1316 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1317 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
1320 "build_verbose_shell" : self.cooker.configuration.build_verbose_shell,
1321 "build_verbose_stdout" : self.cooker.configuration.build_verbose_stdout,
1323 "prhost" : self.cooker.prhost,
1324 "buildname" : self.cfgData.getVar("BUILDNAME"),
1325 "date" : self.cfgData.getVar("DATE"),
1326 "time" : self.cfgData.getVar("TIME"),
1327 "hashservaddr" : self.cooker.hashservaddr,
1328 "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"),
1331 …worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>…
1332 …worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extracon…
1338 def _teardown_worker(self, worker): argument
1355 def start_worker(self): argument
1356 if self.worker:
1357 self.teardown_workers()
1358 self.teardown = False
1359 for mc in self.rqdata.dataCaches:
1360 self.worker[mc] = self._start_worker(mc)
1362 def start_fakeworker(self, rqexec, mc): argument
1363 if not mc in self.fakeworker:
1364 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
1366 def teardown_workers(self): argument
1367 self.teardown = True
1368 for mc in self.worker:
1369 self._teardown_worker(self.worker[mc])
1370 self.worker = {}
1371 for mc in self.fakeworker:
1372 self._teardown_worker(self.fakeworker[mc])
1373 self.fakeworker = {}
1375 def read_workers(self): argument
1376 for mc in self.worker:
1377 self.worker[mc].pipe.read()
1378 for mc in self.fakeworker:
1379 self.fakeworker[mc].pipe.read()
1381 def active_fds(self): argument
1383 for mc in self.worker:
1384 fds.append(self.worker[mc].pipe.input)
1385 for mc in self.fakeworker:
1386 fds.append(self.fakeworker[mc].pipe.input)
1389 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None): argument
1402 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
1409 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1422 for dep in self.rqdata.runtaskentries[tid].depends:
1425 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1426 … stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
1448 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1454 def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True): argument
1456 if self.hashvalidate:
1463 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1464 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1465 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
1467 valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary)
1471 def validate_hash(self, sq_data, d, siginfo, currentcount, summary): argument
1475 …call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summa…
1479 def _execute_runqueue(self): argument
1488 if self.state is runQueuePrepare:
1495self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.dat…
1498 if self.rqdata.prepare() == 0:
1499 self.state = runQueueComplete
1501 self.state = runQueueSceneInit
1504 if self.state is runQueueSceneInit:
1505 self.rqdata.init_progress_reporter.next_stage()
1509 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1510 self.rqdata.init_progress_reporter.next_stage()
1511 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1513 if not self.dm_event_handler_registered:
1514 res = bb.event.register(self.dm_event_handler_name,
1515 … lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
1516 ('bb.event.HeartbeatEvent',), data=self.cfgData)
1517 self.dm_event_handler_registered = True
1519 dump = self.cooker.configuration.dump_signatures
1521 self.rqdata.init_progress_reporter.finish()
1523 invalidtasks = self.print_diffscenetasks()
1524 self.dump_signatures(dump)
1526 self.write_diffscenetasks(invalidtasks)
1527 self.state = runQueueComplete
1529 if self.state is runQueueSceneInit:
1530 self.rqdata.init_progress_reporter.next_stage()
1531 self.start_worker()
1532 self.rqdata.init_progress_reporter.next_stage()
1533 self.rqexe = RunQueueExecute(self)
1536 if not self.rqdata.runq_setscene_tids:
1538 for tid in self.rqdata.runtaskentries:
1539 if not self.rqdata.runtaskentries[tid].depends:
1540 self.rqexe.setbuildable(tid)
1541 self.rqexe.tasks_notcovered.add(tid)
1542 self.rqexe.sqdone = True
1544 self.state = runQueueRunning
1546 if self.state is runQueueRunning:
1547 retval = self.rqexe.execute()
1549 if self.state is runQueueCleanUp:
1550 retval = self.rqexe.finish()
1552 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1554 if build_done and self.dm_event_handler_registered:
1555 bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData)
1556 self.dm_event_handler_registered = False
1558 if build_done and self.rqexe:
1560 self.teardown_workers()
1561 if self.rqexe:
1562 if self.rqexe.stats.failed:
1563 …need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe…
1566 …ks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe…
1568 if self.state is runQueueFailed:
1569 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
1571 if self.state is runQueueComplete:
1578 def execute_runqueue(self): argument
1581 return self._execute_runqueue()
1588 self.teardown_workers()
1591 self.state = runQueueComplete
1596 self.teardown_workers()
1599 self.state = runQueueComplete
1602 def finish_runqueue(self, now = False): argument
1603 if not self.rqexe:
1604 self.state = runQueueComplete
1608 self.rqexe.finish_now()
1610 self.rqexe.finish()
1612 def rq_dump_sigfn(self, fn, options): argument
1613 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
1615 the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn))
1617 dataCaches = self.rqdata.dataCaches
1620 def dump_signatures(self, options): argument
1624 for tid in self.rqdata.runtaskentries:
1628 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1634 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1644 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
1648 def print_diffscenetasks(self): argument
1653 for tid in self.rqdata.runtaskentries:
1655 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1663 valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False)
1669 if tid not in self.rqdata.runq_setscene_tids:
1671 for dep in self.rqdata.runtaskentries[tid].depends:
1673 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1674 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
1682 for tid in self.rqdata.runtaskentries:
1693 for dep in self.rqdata.runtaskentries[t].depends:
1712 def write_diffscenetasks(self, invalidtasks): argument
1717 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1731 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
1732 h = self.rqdata.runtaskentries[tid].hash
1733 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc])
1750 def __init__(self, rq): argument
1751 self.rq = rq
1752 self.cooker = rq.cooker
1753 self.cfgData = rq.cfgData
1754 self.rqdata = rq.rqdata
1756 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1757 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
1758 self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU")
1759 self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO")
1760 self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY")
1762 self.sq_buildable = set()
1763 self.sq_running = set()
1764 self.sq_live = set()
1766 self.updated_taskhash_queue = []
1767 self.pending_migrations = set()
1769 self.runq_buildable = set()
1770 self.runq_running = set()
1771 self.runq_complete = set()
1772 self.runq_tasksrun = set()
1774 self.build_stamps = {}
1775 self.build_stamps2 = []
1776 self.failed_tids = []
1777 self.sq_deferred = {}
1779 self.stampcache = {}
1781 self.holdoff_tasks = set()
1782 self.holdoff_need_update = True
1783 self.sqdone = False
1785self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids))
1788 rq.worker[mc].pipe.setrunqueueexec(self)
1790 rq.fakeworker[mc].pipe.setrunqueueexec(self)
1792 if self.number_tasks <= 0:
1793 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1797 if self.max_cpu_pressure:
1798 self.max_cpu_pressure = float(self.max_cpu_pressure)
1799 if self.max_cpu_pressure < lower_limit:
1800 …bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_li…
1801 if self.max_cpu_pressure > upper_limit:
1802 …to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure))
1804 if self.max_io_pressure:
1805 self.max_io_pressure = float(self.max_io_pressure)
1806 if self.max_io_pressure < lower_limit:
1807 …bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limi…
1808 if self.max_io_pressure > upper_limit:
1809 … to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
1811 if self.max_memory_pressure:
1812 self.max_memory_pressure = float(self.max_memory_pressure)
1813 if self.max_memory_pressure < lower_limit:
1814 …bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lo…
1815 if self.max_memory_pressure > upper_limit:
1816 … to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
1819 self.scenequeue_covered = set()
1821 self.tasks_covered = set()
1822 self.tasks_scenequeue_done = set()
1823 self.scenequeue_notcovered = set()
1824 self.tasks_notcovered = set()
1825 self.scenequeue_notneeded = set()
1828 self.cantskip = set(self.rqdata.target_tids)
1829 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1830 self.cantskip.intersection_update(self.rqdata.runtaskentries)
1832 schedulers = self.get_schedulers()
1834 if self.scheduler == scheduler.name:
1835 self.sched = scheduler(self, self.rqdata)
1840 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1842 #if self.rqdata.runq_setscene_tids:
1843 self.sqdata = SQData()
1844 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
1846 def runqueue_process_waitpid(self, task, status, fakerootlog=None): argument
1848 # self.build_stamps[pid] may not exist when use shared work directory.
1849 if task in self.build_stamps:
1850 self.build_stamps2.remove(self.build_stamps[task])
1851 del self.build_stamps[task]
1853 if task in self.sq_live:
1855 self.sq_task_fail(task, status)
1857 self.sq_task_complete(task)
1858 self.sq_live.remove(task)
1859 self.stats.updateActiveSetscene(len(self.sq_live))
1862 self.task_fail(task, status, fakerootlog=fakerootlog)
1864 self.task_complete(task)
1867 def finish_now(self): argument
1868 for mc in self.rq.worker:
1870 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1871 self.rq.worker[mc].process.stdin.flush()
1875 for mc in self.rq.fakeworker:
1877 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1878 self.rq.fakeworker[mc].process.stdin.flush()
1883 if self.failed_tids:
1884 self.rq.state = runQueueFailed
1887 self.rq.state = runQueueComplete
1890 def finish(self): argument
1891 self.rq.state = runQueueCleanUp
1893 active = self.stats.active + len(self.sq_live)
1895 bb.event.fire(runQueueExitWait(active), self.cfgData)
1896 self.rq.read_workers()
1897 return self.rq.active_fds()
1899 if self.failed_tids:
1900 self.rq.state = runQueueFailed
1903 self.rq.state = runQueueComplete
1907 def check_dependencies(self, task, taskdeps): argument
1908 if not self.rq.depvalidate:
1918 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
1920 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
1921 …s = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.co…
1925 def can_start_task(self): argument
1926 active = self.stats.active + len(self.sq_live)
1927 can_start = active < self.number_tasks
1930 def get_schedulers(self): argument
1935 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
1952 def setbuildable(self, task): argument
1953 self.runq_buildable.add(task)
1954 self.sched.newbuildable(task)
1956 def task_completeoutright(self, task): argument
1962 self.runq_complete.add(task)
1963 for revdep in self.rqdata.runtaskentries[task].revdeps:
1964 if revdep in self.runq_running:
1966 if revdep in self.runq_buildable:
1969 for dep in self.rqdata.runtaskentries[revdep].depends:
1970 if dep not in self.runq_complete:
1974 self.setbuildable(revdep)
1977 for t in self.sq_deferred.copy():
1978 if self.sq_deferred[t] == task:
1980 del self.sq_deferred[t]
1981 …update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self,…
1983 def task_complete(self, task): argument
1984 self.stats.taskCompleted()
1985 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1986 self.task_completeoutright(task)
1987 self.runq_tasksrun.add(task)
1989 def task_fail(self, task, exitcode, fakerootlog=None): argument
1994 self.stats.taskFailed()
1995 self.failed_tids.append(task)
2012 …vent.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_l…
2014 if self.rqdata.taskData[''].halt:
2015 self.rq.state = runQueueCleanUp
2017 def task_skip(self, task, reason): argument
2018 self.runq_running.add(task)
2019 self.setbuildable(task)
2020 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
2021 self.task_completeoutright(task)
2022 self.stats.taskSkipped()
2023 self.stats.taskCompleted()
2025 def summarise_scenequeue_errors(self): argument
2027 if not self.sqdone:
2028 logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
2029 completeevent = sceneQueueComplete(self.stats, self.rq)
2030 bb.event.fire(completeevent, self.cfgData)
2031 if self.sq_deferred:
2032 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
2034 if self.updated_taskhash_queue:
2035 …Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_qu…
2037 if self.holdoff_tasks:
2038 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
2041 for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered):
2045 for tid in self.rqdata.runq_setscene_tids:
2046 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2049 if tid not in self.sq_buildable:
2052 if tid not in self.sq_running:
2056 for x in self.rqdata.runtaskentries:
2057 if x not in self.tasks_covered and x not in self.tasks_notcovered:
2060 if x not in self.tasks_scenequeue_done:
2063 if not self.rqdata.runtaskentries[x].depends and x not in self.runq_buildable:
2069 def execute(self): argument
2074 self.rq.read_workers()
2075 if self.updated_taskhash_queue or self.pending_migrations:
2076 self.process_possible_migrations()
2078 if not hasattr(self, "sorted_setscene_tids"):
2080 self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
2083 if not self.sqdone and self.can_start_task():
2085 for nexttask in self.sorted_setscene_tids:
2086 …if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nextta…
2087self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and self.sqdata.sq_revdeps[nexttask].…
2088 if nexttask not in self.rqdata.target_tids:
2090 self.sq_task_skip(nexttask)
2091 self.scenequeue_notneeded.add(nexttask)
2092 if nexttask in self.sq_deferred:
2093 del self.sq_deferred[nexttask]
2096 for t in self.sqdata.sq_covered_tasks[nexttask]:
2097 if t in self.runq_running and t not in self.runq_complete:
2099 if nexttask in self.sq_deferred:
2100 if self.sq_deferred[nexttask] not in self.runq_complete:
2103 del self.sq_deferred[nexttask]
2104 … valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
2107 self.sq_task_failoutright(nexttask)
2109 if nexttask in self.sqdata.outrightfail:
2111 self.sq_task_failoutright(nexttask)
2113 if nexttask in self.sqdata.unskippable:
2120 … if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2122 self.sq_task_failoutright(task)
2125 if self.cooker.configuration.force:
2126 if task in self.rqdata.target_tids:
2127 self.sq_task_failoutright(task)
2130 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2132 self.sq_task_skip(task)
2135 if self.cooker.configuration.skipsetscene:
2137 self.sq_task_failoutright(task)
2140 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2141 bb.event.fire(startevent, self.cfgData)
2143 taskdepdata = self.sq_build_taskdepdata(task)
2145 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2146 taskhash = self.rqdata.get_task_hash(task)
2147 unihash = self.rqdata.get_task_unihash(task)
2148 …if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry…
2149 if not mc in self.rq.fakeworker:
2150 self.rq.start_fakeworker(self, mc)
2151self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, ta…
2152 self.rq.fakeworker[mc].process.stdin.flush()
2154self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskha…
2155 self.rq.worker[mc].process.stdin.flush()
2157self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra…
2158 self.build_stamps2.append(self.build_stamps[task])
2159 self.sq_running.add(task)
2160 self.sq_live.add(task)
2161 self.stats.updateActiveSetscene(len(self.sq_live))
2162 if self.can_start_task():
2165 self.update_holdofftasks()
2167 …if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_que…
2170 err = self.summarise_scenequeue_errors()
2172 self.rq.state = runQueueFailed
2175 if self.cooker.configuration.setsceneonly:
2176 self.rq.state = runQueueComplete
2178 self.sqdone = True
2180 if self.stats.total == 0:
2182 self.rq.state = runQueueComplete
2185 if self.cooker.configuration.setsceneonly:
2188 task = self.sched.next()
2192 if self.rqdata.setscene_ignore_tasks is not None:
2193 if self.check_setscene_ignore_tasks(task):
2194 self.task_fail(task, "setscene ignore_tasks")
2197 if task in self.tasks_covered:
2199 self.task_skip(task, "covered")
2202 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2205 self.task_skip(task, "existing")
2206 self.runq_tasksrun.add(task)
2209 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2211 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2213 bb.event.fire(startevent, self.cfgData)
2214 self.runq_running.add(task)
2215 self.stats.taskActive()
2216 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
2217 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
2218 self.task_complete(task)
2221 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2222 bb.event.fire(startevent, self.cfgData)
2224 taskdepdata = self.build_taskdepdata(task)
2226 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2227 taskhash = self.rqdata.get_task_hash(task)
2228 unihash = self.rqdata.get_task_unihash(task)
2229 …in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.…
2230 if not mc in self.rq.fakeworker:
2232 self.rq.start_fakeworker(self, mc)
2235 self.rq.state = runQueueFailed
2236 self.stats.taskFailed()
2238self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, ta…
2239 self.rq.fakeworker[mc].process.stdin.flush()
2241self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskha…
2242 self.rq.worker[mc].process.stdin.flush()
2244self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra…
2245 self.build_stamps2.append(self.build_stamps[task])
2246 self.runq_running.add(task)
2247 self.stats.taskActive()
2248 if self.can_start_task():
2251 if self.stats.active > 0 or self.sq_live:
2252 self.rq.read_workers()
2253 return self.rq.active_fds()
2256 if self.sq_deferred:
2257 deferred_tid = list(self.sq_deferred.keys())[0]
2258 blocking_tid = self.sq_deferred.pop(deferred_tid)
2262 if self.failed_tids:
2263 self.rq.state = runQueueFailed
2267 err = self.summarise_scenequeue_errors()
2268 for task in self.rqdata.runtaskentries:
2269 if task not in self.runq_buildable:
2272 elif task not in self.runq_running:
2275 elif task not in self.runq_complete:
2280 self.rq.state = runQueueFailed
2282 self.rq.state = runQueueComplete
2286 def filtermcdeps(self, task, mc, deps): argument
2297 def build_taskdepdata(self, task): argument
2300 next = self.rqdata.runtaskentries[task].depends.copy()
2302 next = self.filtermcdeps(task, mc, next)
2307 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2308 deps = self.rqdata.runtaskentries[revdep].depends
2309 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2310 taskhash = self.rqdata.runtaskentries[revdep].hash
2311 unihash = self.rqdata.runtaskentries[revdep].unihash
2312 deps = self.filtermcdeps(task, mc, deps)
2322 def update_holdofftasks(self): argument
2324 if not self.holdoff_need_update:
2327 notcovered = set(self.scenequeue_notcovered)
2328 notcovered |= self.cantskip
2329 for tid in self.scenequeue_notcovered:
2330 notcovered |= self.sqdata.sq_covered_tasks[tid]
2331 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2332 notcovered.intersection_update(self.tasks_scenequeue_done)
2334 covered = set(self.scenequeue_covered)
2335 for tid in self.scenequeue_covered:
2336 covered |= self.sqdata.sq_covered_tasks[tid]
2338 covered.intersection_update(self.tasks_scenequeue_done)
2341 if not self.rqdata.runtaskentries[tid].depends:
2342 self.setbuildable(tid)
2343 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2344 self.setbuildable(tid)
2346 self.tasks_covered = covered
2347 self.tasks_notcovered = notcovered
2349 self.holdoff_tasks = set()
2351 for tid in self.rqdata.runq_setscene_tids:
2352 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2353 self.holdoff_tasks.add(tid)
2355 for tid in self.holdoff_tasks.copy():
2356 for dep in self.sqdata.sq_covered_tasks[tid]:
2357 if dep not in self.runq_complete:
2358 self.holdoff_tasks.add(dep)
2360 self.holdoff_need_update = False
2362 def process_possible_migrations(self): argument
2366 for tid, unihash in self.updated_taskhash_queue.copy():
2367 if tid in self.runq_running and tid not in self.runq_complete:
2370 self.updated_taskhash_queue.remove((tid, unihash))
2372 if unihash != self.rqdata.runtaskentries[tid].unihash:
2375 for deftid in self.sq_deferred:
2376 if self.sq_deferred[deftid] == tid:
2380 self.rqdata.runtaskentries[hashtid].unihash = unihash
2391 next |= self.rqdata.runtaskentries[p].revdeps
2397 next |= self.rqdata.runtaskentries[ntid].revdeps
2403 if not self.rqdata.runtaskentries[p].depends:
2405 elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
2413 …if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoin…
2415 orighash = self.rqdata.runtaskentries[tid].hash
2416 dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid))
2417 … newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc)
2418 origuni = self.rqdata.runtaskentries[tid].unihash
2425 elif tid in self.scenequeue_covered or tid in self.sq_live:
2427 … bb.parse.siggen.report_unihash_equiv(tid, newhash, origuni, newuni, self.rqdata.dataCaches)
2433 self.rqdata.runtaskentries[tid].hash = newhash
2434 self.rqdata.runtaskentries[tid].unihash = newuni
2437 next |= self.rqdata.runtaskentries[tid].revdeps
2442 for mc in self.rq.worker:
2443self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskh…
2444 for mc in self.rq.fakeworker:
2445self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_t…
2450 if tid not in self.rqdata.runq_setscene_tids:
2452 if tid not in self.pending_migrations:
2453 self.pending_migrations.add(tid)
2456 for tid in self.pending_migrations.copy():
2457 if tid in self.runq_running or tid in self.sq_live:
2459 self.pending_migrations.remove(tid)
2464 for dep in self.sqdata.sq_covered_tasks[tid]:
2465 if dep in self.runq_running and dep not in self.runq_complete:
2472 self.pending_migrations.remove(tid)
2475 if tid in self.tasks_scenequeue_done:
2476 self.tasks_scenequeue_done.remove(tid)
2477 for dep in self.sqdata.sq_covered_tasks[tid]:
2478 if dep in self.runq_complete and dep not in self.runq_tasksrun:
2480 self.failed_tids.append(tid)
2481 self.rq.state = runQueueCleanUp
2484 if dep not in self.runq_complete:
2485 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2486 self.tasks_scenequeue_done.remove(dep)
2488 if tid in self.sq_buildable:
2489 self.sq_buildable.remove(tid)
2490 if tid in self.sq_running:
2491 self.sq_running.remove(tid)
2492 if tid in self.sqdata.outrightfail:
2493 self.sqdata.outrightfail.remove(tid)
2494 if tid in self.scenequeue_notcovered:
2495 self.scenequeue_notcovered.remove(tid)
2496 if tid in self.scenequeue_covered:
2497 self.scenequeue_covered.remove(tid)
2498 if tid in self.scenequeue_notneeded:
2499 self.scenequeue_notneeded.remove(tid)
2502self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], t…
2504 if tid in self.stampcache:
2505 del self.stampcache[tid]
2507 if tid in self.build_stamps:
2508 del self.build_stamps[tid]
2515 for t in self.sqdata.sq_harddeps:
2516 if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
2519 …if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequ…
2520 if tid not in self.sq_buildable:
2521 self.sq_buildable.add(tid)
2522 if not self.sqdata.sq_revdeps[tid]:
2523 self.sq_buildable.add(tid)
2525 update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid))
2528 self.sqdone = False
2529 for mc in sorted(self.sqdata.multiconfigs):
2533 h = pending_hash_index(tid, self.rqdata)
2534 if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
2535 self.sq_deferred[tid] = self.sqdata.hashes[h]
2536 bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
2537 …queue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.sta…
2540 if tid in self.sqdata.valid and not origvalid:
2543 self.sq_task_failoutright(tid)
2546 self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
2547 self.holdoff_need_update = True
2549 def scenequeue_updatecounters(self, task, fail=False): argument
2551 for dep in sorted(self.sqdata.sq_deps[task]):
2552 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
2553 if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered:
2556 … noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache)
2560 self.sq_task_failoutright(dep)
2562 … if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2563 if dep not in self.sq_buildable:
2564 self.sq_buildable.add(dep)
2570 self.tasks_scenequeue_done.add(t)
2573 for dep in self.rqdata.runtaskentries[t].depends:
2574 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2576 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2580 self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
2581 self.holdoff_need_update = True
2583 def sq_task_completeoutright(self, task): argument
2591 self.scenequeue_covered.add(task)
2592 self.scenequeue_updatecounters(task)
2594 def sq_check_taskfail(self, task): argument
2595 if self.rqdata.setscene_ignore_tasks is not None:
2598 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2599 … if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks):
2601 self.rq.state = runQueueCleanUp
2603 def sq_task_complete(self, task): argument
2604 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2605 self.sq_task_completeoutright(task)
2607 def sq_task_fail(self, task, result): argument
2608 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2609 self.scenequeue_notcovered.add(task)
2610 self.scenequeue_updatecounters(task, True)
2611 self.sq_check_taskfail(task)
2613 def sq_task_failoutright(self, task): argument
2614 self.sq_running.add(task)
2615 self.sq_buildable.add(task)
2616 self.scenequeue_notcovered.add(task)
2617 self.scenequeue_updatecounters(task, True)
2619 def sq_task_skip(self, task): argument
2620 self.sq_running.add(task)
2621 self.sq_buildable.add(task)
2622 self.sq_task_completeoutright(task)
2624 def sq_build_taskdepdata(self, task): argument
2629 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2631 if depname not in self.rqdata.taskData[mc].build_targets:
2634 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2648 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2650 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2651 taskhash = self.rqdata.runtaskentries[revdep].hash
2652 unihash = self.rqdata.runtaskentries[revdep].unihash
2662 def check_setscene_ignore_tasks(self, tid): argument
2666 if tid in self.tasks_covered:
2669 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2672 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2676 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2677 if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks):
2678 if tid in self.rqdata.runq_setscene_tids:
2682 for t in self.scenequeue_notcovered:
2683 …msg.append("\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.…
2684 …ng setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcover…
2690 def __init__(self): argument
2692 self.sq_deps = {}
2694 self.sq_revdeps = {}
2696 self.sq_harddeps = {}
2698 self.stamps = {}
2700 self.unskippable = set()
2702 self.outrightfail = set()
2704 self.sq_covered_tasks = {}
2972 def __init__(self, x): argument
2973 self.args = x
2981 def __init__(self, remain): argument
2982 self.remain = remain
2983 self.message = "Waiting for %s active tasks to finish" % remain
2984 bb.event.Event.__init__(self)
2990 def __init__(self, task, stats, rq): argument
2991 self.taskid = task
2992 self.taskstring = task
2993 self.taskname = taskname_from_tid(task)
2994 self.taskfile = fn_from_tid(task)
2995 self.taskhash = rq.rqdata.get_task_hash(task)
2996 self.stats = stats.copy()
2997 bb.event.Event.__init__(self)
3003 def __init__(self, task, stats, rq, noexec=False): argument
3004 runQueueEvent.__init__(self, task, stats, rq)
3005 self.taskstring = task + "_setscene"
3006 self.taskname = taskname_from_tid(task) + "_setscene"
3007 self.taskfile = fn_from_tid(task)
3008 self.taskhash = rq.rqdata.get_task_hash(task)
3014 def __init__(self, task, stats, rq, noexec=False): argument
3015 runQueueEvent.__init__(self, task, stats, rq)
3016 self.noexec = noexec
3022 def __init__(self, task, stats, rq, noexec=False): argument
3023 sceneQueueEvent.__init__(self, task, stats, rq)
3024 self.noexec = noexec
3030 def __init__(self, task, stats, exitcode, rq, fakeroot_log=None): argument
3031 runQueueEvent.__init__(self, task, stats, rq)
3032 self.exitcode = exitcode
3033 self.fakeroot_log = fakeroot_log
3035 def __str__(self): argument
3036 if self.fakeroot_log:
3037 …ask (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fak…
3039 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
3045 def __init__(self, task, stats, exitcode, rq): argument
3046 sceneQueueEvent.__init__(self, task, stats, rq)
3047 self.exitcode = exitcode
3049 def __str__(self): argument
3050 …(%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
3056 def __init__(self, stats, rq): argument
3057 self.stats = stats.copy()
3058 bb.event.Event.__init__(self)
3074 def __init__(self, task, stats, rq, reason): argument
3075 runQueueEvent.__init__(self, task, stats, rq)
3076 self.reason = reason
3082 def __init__(self, task, unihash): argument
3083 self.taskid = task
3084 self.unihash = unihash
3085 bb.event.Event.__init__(self)
3091 def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None): argument
3092 self.input = pipein
3095 bb.utils.nonblockingfd(self.input)
3096 self.queue = b""
3097 self.d = d
3098 self.rq = rq
3099 self.rqexec = rqexec
3100 self.fakerootlogs = fakerootlogs
3102 def setrunqueueexec(self, rqexec): argument
3103 self.rqexec = rqexec
3105 def read(self): argument
3106 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
3109 if worker.process.returncode is not None and not self.rq.teardown:
3111 self.rq.finish_runqueue(True)
3113 start = len(self.queue)
3115 self.queue = self.queue + (self.input.read(102400) or b"")
3119 end = len(self.queue)
3121 while found and self.queue:
3123 index = self.queue.find(b"</event>")
3124 while index != -1 and self.queue.startswith(b"<event>"):
3126 event = pickle.loads(self.queue[7:index])
3131 index = self.queue.find(b"</event>", index + 1)
3133 … bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
3134 bb.event.fire_from_worker(event, self.d)
3136 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
3138 self.queue = self.queue[index+8:]
3139 index = self.queue.find(b"</event>")
3140 index = self.queue.find(b"</exitcode>")
3141 while index != -1 and self.queue.startswith(b"<exitcode>"):
3143 task, status = pickle.loads(self.queue[10:index])
3145 … bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
3148 if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs:
3149 fakerootlog = self.fakerootlogs[taskfn]
3150 self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog)
3152 self.queue = self.queue[index+11:]
3153 index = self.queue.find(b"</exitcode>")
3156 def close(self): argument
3157 while self.read():
3159 if self.queue:
3160 print("Warning, worker left partial message: %s" % self.queue)
3161 self.input.close()