xref: /OK3568_Linux_fs/yocto/poky/meta/classes/sstate.bbclass (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1SSTATE_VERSION = "10"
2
3SSTATE_ZSTD_CLEVEL ??= "8"
4
5SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
6SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
7
8def generate_sstatefn(spec, hash, taskname, siginfo, d):
9    if taskname is None:
10       return ""
11    extension = ".tar.zst"
12    # 8 chars reserved for siginfo
13    limit = 254 - 8
14    if siginfo:
15        limit = 254
16        extension = ".tar.zst.siginfo"
17    if not hash:
18        hash = "INVALID"
19    fn = spec + hash + "_" + taskname + extension
20    # If the filename is too long, attempt to reduce it
21    if len(fn) > limit:
22        components = spec.split(":")
23        # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
24        # 7 is for the separators
25        avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
26        components[2] = components[2][:avail]
27        components[3] = components[3][:avail]
28        components[4] = components[4][:avail]
29        spec = ":".join(components)
30        fn = spec + hash + "_" + taskname + extension
31        if len(fn) > limit:
32            bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
33    return hash[:2] + "/" + hash[2:4] + "/" + fn
34
35SSTATE_PKGARCH    = "${PACKAGE_ARCH}"
36SSTATE_PKGSPEC    = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
37SSTATE_SWSPEC     = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
38SSTATE_PKGNAME    = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
39SSTATE_PKG        = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
40SSTATE_EXTRAPATH   = ""
41SSTATE_EXTRAPATHWILDCARD = ""
42SSTATE_PATHSPEC   = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
43
44# explicitly make PV to depend on evaluated value of PV variable
45PV[vardepvalue] = "${PV}"
46
47# We don't want the sstate to depend on things like the distro string
48# of the system, we let the sstate paths take care of this.
49SSTATE_EXTRAPATH[vardepvalue] = ""
50SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
51
52# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
53SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
54# Avoid docbook/sgml catalog warnings for now
55SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
56# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
57SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
58SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
59# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
60SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
61# Archive the sources for many architectures in one deploy folder
62SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
63# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
65SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
66SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
67SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
68
69SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
70SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
71SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
72SSTATE_HASHEQUIV_FILEMAP ?= " \
73    populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
74    populate_sysroot:*/postinst-useradd-*:${COREBASE} \
75    populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
76    populate_sysroot:*/crossscripts/*:${TMPDIR} \
77    populate_sysroot:*/crossscripts/*:${COREBASE} \
78    "
79
80BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
81
82SSTATE_ARCHS = " \
83    ${BUILD_ARCH} \
84    ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
85    ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
86    ${SDK_ARCH}_${SDK_OS} \
87    ${SDK_ARCH}_${PACKAGE_ARCH} \
88    allarch \
89    ${PACKAGE_ARCH} \
90    ${PACKAGE_EXTRA_ARCHS} \
91    ${MACHINE_ARCH}"
92SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
93
94SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
95
96SSTATECREATEFUNCS += "sstate_hardcode_path"
97SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
98SSTATEPOSTCREATEFUNCS = ""
99SSTATEPREINSTFUNCS = ""
100SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
101SSTATEPOSTINSTFUNCS = ""
102EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
103
104# Check whether sstate exists for tasks that support sstate and are in the
105# locked signatures file.
106SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
107
108# Check whether the task's computed hash matches the task's hash in the
109# locked signatures file.
110SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
111
112# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
113# not sign)
114SSTATE_SIG_KEY ?= ""
115SSTATE_SIG_PASSPHRASE ?= ""
116# Whether to verify the GnUPG signatures when extracting sstate archives
117SSTATE_VERIFY_SIG ?= "0"
118# List of signatures to consider valid.
119SSTATE_VALID_SIGS ??= ""
120SSTATE_VALID_SIGS[vardepvalue] = ""
121
122SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
123SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
124    the output hash for a task, which in turn is used to determine equivalency. \
125    "
126
127SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
128SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
129    hash equivalency server, such as PN, PV, taskname, etc. This information \
130    is very useful for developers looking at task data, but may leak sensitive \
131    data if the equivalence server is public. \
132    "
133
134python () {
135    if bb.data.inherits_class('native', d):
136        d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
137    elif bb.data.inherits_class('crosssdk', d):
138        d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
139    elif bb.data.inherits_class('cross', d):
140        d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
141    elif bb.data.inherits_class('nativesdk', d):
142        d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
143    elif bb.data.inherits_class('cross-canadian', d):
144        d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
145    elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
146        d.setVar('SSTATE_PKGARCH', "allarch")
147    else:
148        d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
149
150    if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
151        d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
152        d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
153        d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
154
155    unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
156    d.setVar('SSTATETASKS', " ".join(unique_tasks))
157    for task in unique_tasks:
158        d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
159        d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
160        d.setVarFlag(task, 'network', '1')
161        d.setVarFlag(task + "_setscene", 'network', '1')
162}
163
164def sstate_init(task, d):
165    ss = {}
166    ss['task'] = task
167    ss['dirs'] = []
168    ss['plaindirs'] = []
169    ss['lockfiles'] = []
170    ss['lockfiles-shared'] = []
171    return ss
172
173def sstate_state_fromvars(d, task = None):
174    if task is None:
175        task = d.getVar('BB_CURRENTTASK')
176        if not task:
177            bb.fatal("sstate code running without task context?!")
178        task = task.replace("_setscene", "")
179
180    if task.startswith("do_"):
181        task = task[3:]
182    inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
183    outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
184    plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
185    lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
186    lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
187    interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
188    fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
189    if not task or len(inputs) != len(outputs):
190        bb.fatal("sstate variables not setup correctly?!")
191
192    if task == "populate_lic":
193        d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
194        d.setVar("SSTATE_EXTRAPATH", "")
195        d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
196
197    ss = sstate_init(task, d)
198    for i in range(len(inputs)):
199        sstate_add(ss, inputs[i], outputs[i], d)
200    ss['lockfiles'] = lockfiles
201    ss['lockfiles-shared'] = lockfilesshared
202    ss['plaindirs'] = plaindirs
203    ss['interceptfuncs'] = interceptfuncs
204    ss['fixmedir'] = fixmedir
205    return ss
206
207def sstate_add(ss, source, dest, d):
208    if not source.endswith("/"):
209         source = source + "/"
210    if not dest.endswith("/"):
211         dest = dest + "/"
212    source = os.path.normpath(source)
213    dest = os.path.normpath(dest)
214    srcbase = os.path.basename(source)
215    ss['dirs'].append([srcbase, source, dest])
216    return ss
217
218def sstate_install(ss, d):
219    import oe.path
220    import oe.sstatesig
221    import subprocess
222
223    sharedfiles = []
224    shareddirs = []
225    bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
226
227    sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
228
229    manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
230
231    if os.access(manifest, os.R_OK):
232        bb.fatal("Package already staged (%s)?!" % manifest)
233
234    d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
235
236    locks = []
237    for lock in ss['lockfiles-shared']:
238        locks.append(bb.utils.lockfile(lock, True))
239    for lock in ss['lockfiles']:
240        locks.append(bb.utils.lockfile(lock))
241
242    for state in ss['dirs']:
243        bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
244        for walkroot, dirs, files in os.walk(state[1]):
245            for file in files:
246                srcpath = os.path.join(walkroot, file)
247                dstpath = srcpath.replace(state[1], state[2])
248                #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
249                sharedfiles.append(dstpath)
250            for dir in dirs:
251                srcdir = os.path.join(walkroot, dir)
252                dstdir = srcdir.replace(state[1], state[2])
253                #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
254                if os.path.islink(srcdir):
255                    sharedfiles.append(dstdir)
256                    continue
257                if not dstdir.endswith("/"):
258                    dstdir = dstdir + "/"
259                shareddirs.append(dstdir)
260
261    # Check the file list for conflicts against files which already exist
262    overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
263    match = []
264    for f in sharedfiles:
265        if os.path.exists(f) and not os.path.islink(f):
266            f = os.path.normpath(f)
267            realmatch = True
268            for w in overlap_allowed:
269                w = os.path.normpath(w)
270                if f.startswith(w):
271                    realmatch = False
272                    break
273            if realmatch:
274                match.append(f)
275                sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
276                search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
277                if search_output:
278                    match.append("  (matched in %s)" % search_output.decode('utf-8').rstrip())
279                else:
280                    match.append("  (not matched to any task)")
281    if match:
282        bb.error("The recipe %s is trying to install files into a shared " \
283          "area when those files already exist. Those files and their manifest " \
284          "location are:\n  %s\nPlease verify which recipe should provide the " \
285          "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
286          "break things - if not now, possibly in the future (we've seen builds fail " \
287          "several months later). If the system knew how to recover from this " \
288          "automatically it would, however there are several different scenarios " \
289          "which can result in this and we don't know which one this is. It may be " \
290          "you have switched providers of something like virtual/kernel (e.g. from " \
291          "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
292          "clean task for both recipes and it will resolve this error. It may be " \
293          "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
294          "those recipes should again resolve this error, however switching " \
295          "DISTRO_FEATURES on an existing build directory is not supported - you " \
296          "should really clean out tmp and rebuild (reusing sstate should be safe). " \
297          "It could be the overlapping files detected are harmless in which case " \
298          "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
299          "also be your build is including two different conflicting versions of " \
300          "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
301          "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
302          "sharing the error and filelist above." % \
303          (d.getVar('PN'), "\n  ".join(match)))
304        bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
305
306    if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
307        sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
308        sharedfiles.append(ss['fixmedir'] + "/fixmepath")
309
310    # Write out the manifest
311    f = open(manifest, "w")
312    for file in sharedfiles:
313        f.write(file + "\n")
314
315    # We want to ensure that directories appear at the end of the manifest
316    # so that when we test to see if they should be deleted any contents
317    # added by the task will have been removed first.
318    dirs = sorted(shareddirs, key=len)
319    # Must remove children first, which will have a longer path than the parent
320    for di in reversed(dirs):
321        f.write(di + "\n")
322    f.close()
323
324    # Append to the list of manifests for this PACKAGE_ARCH
325
326    i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
327    l = bb.utils.lockfile(i + ".lock")
328    filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
329    manifests = []
330    if os.path.exists(i):
331        with open(i, "r") as f:
332            manifests = f.readlines()
333    # We append new entries, we don't remove older entries which may have the same
334    # manifest name but different versions from stamp/workdir. See below.
335    if filedata not in manifests:
336        with open(i, "a+") as f:
337            f.write(filedata)
338    bb.utils.unlockfile(l)
339
340    # Run the actual file install
341    for state in ss['dirs']:
342        if os.path.exists(state[1]):
343            oe.path.copyhardlinktree(state[1], state[2])
344
345    for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
346        # All hooks should run in the SSTATE_INSTDIR
347        bb.build.exec_func(postinst, d, (sstateinst,))
348
349    for lock in locks:
350        bb.utils.unlockfile(lock)
351
352sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
353sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
354
355def sstate_installpkg(ss, d):
356    from oe.gpg_sign import get_signer
357
358    sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
359    d.setVar("SSTATE_CURRTASK", ss['task'])
360    sstatefetch = d.getVar('SSTATE_PKGNAME')
361    sstatepkg = d.getVar('SSTATE_PKG')
362
363    if not os.path.exists(sstatepkg):
364        pstaging_fetch(sstatefetch, d)
365
366    if not os.path.isfile(sstatepkg):
367        bb.note("Sstate package %s does not exist" % sstatepkg)
368        return False
369
370    sstate_clean(ss, d)
371
372    d.setVar('SSTATE_INSTDIR', sstateinst)
373
374    if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
375        if not os.path.isfile(sstatepkg + '.sig'):
376            bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
377            return False
378        signer = get_signer(d, 'local')
379        if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
380            bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
381            return False
382
383    # Empty sstateinst directory, ensure its clean
384    if os.path.exists(sstateinst):
385        oe.path.remove(sstateinst)
386    bb.utils.mkdirhier(sstateinst)
387
388    sstateinst = d.getVar("SSTATE_INSTDIR")
389    d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
390
391    for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
392        # All hooks should run in the SSTATE_INSTDIR
393        bb.build.exec_func(f, d, (sstateinst,))
394
395    return sstate_installpkgdir(ss, d)
396
397def sstate_installpkgdir(ss, d):
398    import oe.path
399    import subprocess
400
401    sstateinst = d.getVar("SSTATE_INSTDIR")
402    d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
403
404    for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
405        # All hooks should run in the SSTATE_INSTDIR
406        bb.build.exec_func(f, d, (sstateinst,))
407
408    def prepdir(dir):
409        # remove dir if it exists, ensure any parent directories do exist
410        if os.path.exists(dir):
411            oe.path.remove(dir)
412        bb.utils.mkdirhier(dir)
413        oe.path.remove(dir)
414
415    for state in ss['dirs']:
416        prepdir(state[1])
417        bb.utils.rename(sstateinst + state[0], state[1])
418    sstate_install(ss, d)
419
420    for plain in ss['plaindirs']:
421        workdir = d.getVar('WORKDIR')
422        sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
423        src = sstateinst + "/" + plain.replace(workdir, '')
424        if sharedworkdir in plain:
425            src = sstateinst + "/" + plain.replace(sharedworkdir, '')
426        dest = plain
427        bb.utils.mkdirhier(src)
428        prepdir(dest)
429        bb.utils.rename(src, dest)
430
431    return True
432
433python sstate_hardcode_path_unpack () {
434    # Fixup hardcoded paths
435    #
436    # Note: The logic below must match the reverse logic in
437    # sstate_hardcode_path(d)
438    import subprocess
439
440    sstateinst = d.getVar('SSTATE_INSTDIR')
441    sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
442    fixmefn = sstateinst + "fixmepath"
443    if os.path.isfile(fixmefn):
444        staging_target = d.getVar('RECIPE_SYSROOT')
445        staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
446
447        if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
448            sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
449        elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
450            sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
451        else:
452            sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
453
454        extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
455        for fixmevar in extra_staging_fixmes.split():
456            fixme_path = d.getVar(fixmevar)
457            sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
458
459        # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
460        sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
461
462        # Defer do_populate_sysroot relocation command
463        if sstatefixmedir:
464            bb.utils.mkdirhier(sstatefixmedir)
465            with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
466                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
467                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
468                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
469                sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
470                f.write(sstate_hardcode_cmd)
471            bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
472            return
473
474        bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
475        subprocess.check_call(sstate_hardcode_cmd, shell=True)
476
477        # Need to remove this or we'd copy it into the target directory and may
478        # conflict with another writer
479        os.remove(fixmefn)
480}
481
482def sstate_clean_cachefile(ss, d):
483    import oe.path
484
485    if d.getVarFlag('do_%s' % ss['task'], 'task'):
486        d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
487        sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
488        bb.note("Removing %s" % sstatepkgfile)
489        oe.path.remove(sstatepkgfile)
490
491def sstate_clean_cachefiles(d):
492    for task in (d.getVar('SSTATETASKS') or "").split():
493        ld = d.createCopy()
494        ss = sstate_state_fromvars(ld, task)
495        sstate_clean_cachefile(ss, ld)
496
497def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
498    import oe.path
499
500    mfile = open(manifest)
501    entries = mfile.readlines()
502    mfile.close()
503
504    for entry in entries:
505        entry = entry.strip()
506        if prefix and not entry.startswith("/"):
507            entry = prefix + "/" + entry
508        bb.debug(2, "Removing manifest: %s" % entry)
509        # We can race against another package populating directories as we're removing them
510        # so we ignore errors here.
511        try:
512            if entry.endswith("/"):
513                if os.path.islink(entry[:-1]):
514                    os.remove(entry[:-1])
515                elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
516                    # Removing directories whilst builds are in progress exposes a race. Only
517                    # do it in contexts where it is safe to do so.
518                    os.rmdir(entry[:-1])
519            else:
520                os.remove(entry)
521        except OSError:
522            pass
523
524    postrm = manifest + ".postrm"
525    if os.path.exists(manifest + ".postrm"):
526        import subprocess
527        os.chmod(postrm, 0o755)
528        subprocess.check_call(postrm, shell=True)
529        oe.path.remove(postrm)
530
531    oe.path.remove(manifest)
532
533def sstate_clean(ss, d):
534    import oe.path
535    import glob
536
537    d2 = d.createCopy()
538    stamp_clean = d.getVar("STAMPCLEAN")
539    extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
540    if extrainf:
541        d2.setVar("SSTATE_MANMACH", extrainf)
542        wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
543    else:
544        wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
545
546    manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
547
548    if os.path.exists(manifest):
549        locks = []
550        for lock in ss['lockfiles-shared']:
551            locks.append(bb.utils.lockfile(lock))
552        for lock in ss['lockfiles']:
553            locks.append(bb.utils.lockfile(lock))
554
555        sstate_clean_manifest(manifest, d, canrace=True)
556
557        for lock in locks:
558            bb.utils.unlockfile(lock)
559
560    # Remove the current and previous stamps, but keep the sigdata.
561    #
562    # The glob() matches do_task* which may match multiple tasks, for
563    # example: do_package and do_package_write_ipk, so we need to
564    # exactly match *.do_task.* and *.do_task_setscene.*
565    rm_stamp = '.do_%s.' % ss['task']
566    rm_setscene = '.do_%s_setscene.' % ss['task']
567    # For BB_SIGNATURE_HANDLER = "noop"
568    rm_nohash = ".do_%s" % ss['task']
569    for stfile in glob.glob(wildcard_stfile):
570        # Keep the sigdata
571        if ".sigdata." in stfile or ".sigbasedata." in stfile:
572            continue
573        # Preserve taint files in the stamps directory
574        if stfile.endswith('.taint'):
575            continue
576        if rm_stamp in stfile or rm_setscene in stfile or \
577                stfile.endswith(rm_nohash):
578            oe.path.remove(stfile)
579
580sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
581
582CLEANFUNCS += "sstate_cleanall"
583
584python sstate_cleanall() {
585    bb.note("Removing shared state for package %s" % d.getVar('PN'))
586
587    manifest_dir = d.getVar('SSTATE_MANIFESTS')
588    if not os.path.exists(manifest_dir):
589        return
590
591    tasks = d.getVar('SSTATETASKS').split()
592    for name in tasks:
593        ld = d.createCopy()
594        shared_state = sstate_state_fromvars(ld, name)
595        sstate_clean(shared_state, ld)
596}
597
598python sstate_hardcode_path () {
599    import subprocess, platform
600
601    # Need to remove hardcoded paths and fix these when we install the
602    # staging packages.
603    #
604    # Note: the logic in this function needs to match the reverse logic
605    # in sstate_installpkg(ss, d)
606
607    staging_target = d.getVar('RECIPE_SYSROOT')
608    staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
609    sstate_builddir = d.getVar('SSTATE_BUILDDIR')
610
611    sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
612    if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
613        sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
614    elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
615        sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
616        sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
617    else:
618        sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
619        sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
620
621    extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
622    for fixmevar in extra_staging_fixmes.split():
623        fixme_path = d.getVar(fixmevar)
624        sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
625        sstate_grep_cmd += " -e '%s'" % (fixme_path)
626
627    fixmefn =  sstate_builddir + "fixmepath"
628
629    sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
630    sstate_filelist_cmd = "tee %s" % (fixmefn)
631
632    # fixmepath file needs relative paths, drop sstate_builddir prefix
633    sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
634
635    xargs_no_empty_run_cmd = '--no-run-if-empty'
636    if platform.system() == 'Darwin':
637        xargs_no_empty_run_cmd = ''
638
639    # Limit the fixpaths and sed operations based on the initial grep search
640    # This has the side effect of making sure the vfs cache is hot
641    sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
642
643    bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
644    subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
645
646        # If the fixmefn is empty, remove it..
647    if os.stat(fixmefn).st_size == 0:
648        os.remove(fixmefn)
649    else:
650        bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
651        subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
652}
653
654def sstate_package(ss, d):
655    import oe.path
656    import time
657
658    tmpdir = d.getVar('TMPDIR')
659
660    fixtime = False
661    if ss['task'] == "package":
662        fixtime = True
663
664    def fixtimestamp(root, path):
665        f = os.path.join(root, path)
666        if os.lstat(f).st_mtime > sde:
667            os.utime(f, (sde, sde), follow_symlinks=False)
668
669    sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
670    sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
671    d.setVar("SSTATE_CURRTASK", ss['task'])
672    bb.utils.remove(sstatebuild, recurse=True)
673    bb.utils.mkdirhier(sstatebuild)
674    for state in ss['dirs']:
675        if not os.path.exists(state[1]):
676            continue
677        srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
678        # Find and error for absolute symlinks. We could attempt to relocate but its not
679        # clear where the symlink is relative to in this context. We could add that markup
680        # to sstate tasks but there aren't many of these so better just avoid them entirely.
681        for walkroot, dirs, files in os.walk(state[1]):
682            for file in files + dirs:
683                if fixtime:
684                    fixtimestamp(walkroot, file)
685                srcpath = os.path.join(walkroot, file)
686                if not os.path.islink(srcpath):
687                    continue
688                link = os.readlink(srcpath)
689                if not os.path.isabs(link):
690                    continue
691                if not link.startswith(tmpdir):
692                    continue
693                bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
694        bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
695        bb.utils.rename(state[1], sstatebuild + state[0])
696
697    workdir = d.getVar('WORKDIR')
698    sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
699    for plain in ss['plaindirs']:
700        pdir = plain.replace(workdir, sstatebuild)
701        if sharedworkdir in plain:
702            pdir = plain.replace(sharedworkdir, sstatebuild)
703        bb.utils.mkdirhier(plain)
704        bb.utils.mkdirhier(pdir)
705        bb.utils.rename(plain, pdir)
706        if fixtime:
707            fixtimestamp(pdir, "")
708            for walkroot, dirs, files in os.walk(pdir):
709                for file in files + dirs:
710                    fixtimestamp(walkroot, file)
711
712    d.setVar('SSTATE_BUILDDIR', sstatebuild)
713    d.setVar('SSTATE_INSTDIR', sstatebuild)
714
715    if d.getVar('SSTATE_SKIP_CREATION') == '1':
716        return
717
718    sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
719    if d.getVar('SSTATE_SIG_KEY'):
720        sstate_create_package.append('sstate_sign_package')
721
722    for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
723             sstate_create_package + \
724             (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
725        # All hooks should run in SSTATE_BUILDDIR.
726        bb.build.exec_func(f, d, (sstatebuild,))
727
728    # SSTATE_PKG may have been changed by sstate_report_unihash
729    siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
730    if not os.path.exists(siginfo):
731        bb.siggen.dump_this_task(siginfo, d)
732    else:
733        try:
734            os.utime(siginfo, None)
735        except PermissionError:
736            pass
737        except OSError as e:
738            # Handle read-only file systems gracefully
739            import errno
740            if e.errno != errno.EROFS:
741                raise e
742
743    return
744
745sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
746
747def pstaging_fetch(sstatefetch, d):
748    import bb.fetch2
749
750    # Only try and fetch if the user has configured a mirror
751    mirrors = d.getVar('SSTATE_MIRRORS')
752    if not mirrors:
753        return
754
755    # Copy the data object and override DL_DIR and SRC_URI
756    localdata = bb.data.createCopy(d)
757
758    dldir = localdata.expand("${SSTATE_DIR}")
759    bb.utils.mkdirhier(dldir)
760
761    localdata.delVar('MIRRORS')
762    localdata.setVar('FILESPATH', dldir)
763    localdata.setVar('DL_DIR', dldir)
764    localdata.setVar('PREMIRRORS', mirrors)
765    localdata.setVar('SRCPV', d.getVar('SRCPV'))
766
767    # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
768    # we'll want to allow network access for the current set of fetches.
769    if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
770            bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
771        localdata.delVar('BB_NO_NETWORK')
772
773    # Try a fetch from the sstate mirror, if it fails just return and
774    # we will build the package
775    uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
776            'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
777    if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
778        uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
779
780    for srcuri in uris:
781        localdata.setVar('SRC_URI', srcuri)
782        try:
783            fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
784            fetcher.checkstatus()
785            fetcher.download()
786
787        except bb.fetch2.BBFetchException:
788            pass
789
790pstaging_fetch[vardepsexclude] += "SRCPV"
791
792
793def sstate_setscene(d):
794    shared_state = sstate_state_fromvars(d)
795    accelerate = sstate_installpkg(shared_state, d)
796    if not accelerate:
797        msg = "No sstate archive obtainable, will run full task instead."
798        bb.warn(msg)
799        raise bb.BBHandledException(msg)
800
801python sstate_task_prefunc () {
802    shared_state = sstate_state_fromvars(d)
803    sstate_clean(shared_state, d)
804}
805sstate_task_prefunc[dirs] = "${WORKDIR}"
806
807python sstate_task_postfunc () {
808    shared_state = sstate_state_fromvars(d)
809
810    for intercept in shared_state['interceptfuncs']:
811        bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
812
813    omask = os.umask(0o002)
814    if omask != 0o002:
815       bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
816    sstate_package(shared_state, d)
817    os.umask(omask)
818
819    sstateinst = d.getVar("SSTATE_INSTDIR")
820    d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
821
822    sstate_installpkgdir(shared_state, d)
823
824    bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
825}
826sstate_task_postfunc[dirs] = "${WORKDIR}"
827
828
829#
830# Shell function to generate a sstate package from a directory
831# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
832#
833sstate_create_package () {
834	# Exit early if it already exists
835	if [ -e ${SSTATE_PKG} ]; then
836		touch ${SSTATE_PKG} 2>/dev/null || true
837		return
838	fi
839
840	mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
841	TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
842
843	OPT="-cS"
844	ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
845	# Use pzstd if available
846	if [ -x "$(command -v pzstd)" ]; then
847		ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
848	fi
849
850	# Need to handle empty directories
851	if [ "$(ls -A)" ]; then
852		set +e
853		tar -I "$ZSTD" $OPT -f $TFILE *
854		ret=$?
855		if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
856			exit 1
857		fi
858		set -e
859	else
860		tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
861	fi
862	chmod 0664 $TFILE
863	# Skip if it was already created by some other process
864	if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
865		# There is a symbolic link, but it links to nothing.
866		# Forcefully replace it with the new file.
867		ln -f $TFILE ${SSTATE_PKG} || true
868	elif [ ! -e ${SSTATE_PKG} ]; then
869		# Move into place using ln to attempt an atomic op.
870		# Abort if it already exists
871		ln $TFILE ${SSTATE_PKG} || true
872	else
873		touch ${SSTATE_PKG} 2>/dev/null || true
874	fi
875	rm $TFILE
876}
877
878python sstate_sign_package () {
879    from oe.gpg_sign import get_signer
880
881
882    signer = get_signer(d, 'local')
883    sstate_pkg = d.getVar('SSTATE_PKG')
884    if os.path.exists(sstate_pkg + '.sig'):
885        os.unlink(sstate_pkg + '.sig')
886    signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
887                       d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
888}
889
890python sstate_report_unihash() {
891    report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
892
893    if report_unihash:
894        ss = sstate_state_fromvars(d)
895        report_unihash(os.getcwd(), ss['task'], d)
896}
897
898#
899# Shell function to decompress and prepare a package for installation
900# Will be run from within SSTATE_INSTDIR.
901#
902sstate_unpack_package () {
903	ZSTD="zstd -T${ZSTD_THREADS}"
904	# Use pzstd if available
905	if [ -x "$(command -v pzstd)" ]; then
906		ZSTD="pzstd -p ${ZSTD_THREADS}"
907	fi
908
909	tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
910	# update .siginfo atime on local/NFS mirror if it is a symbolic link
911	[ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
912	# update each symbolic link instead of any referenced file
913	touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
914	[ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
915	[ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
916}
917
918BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
919
920def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
921    found = set()
922    missed = set()
923
924    def gethash(task):
925        return sq_data['unihash'][task]
926
927    def getpathcomponents(task, d):
928        # Magic data from BB_HASHFILENAME
929        splithashfn = sq_data['hashfn'][task].split(" ")
930        spec = splithashfn[1]
931        if splithashfn[0] == "True":
932            extrapath = d.getVar("NATIVELSBSTRING") + "/"
933        else:
934            extrapath = ""
935
936        tname = bb.runqueue.taskname_from_tid(task)[3:]
937
938        if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
939            spec = splithashfn[2]
940            extrapath = ""
941
942        return spec, extrapath, tname
943
944    def getsstatefile(tid, siginfo, d):
945        spec, extrapath, tname = getpathcomponents(tid, d)
946        return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
947
948    for tid in sq_data['hash']:
949
950        sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
951
952        if os.path.exists(sstatefile):
953            found.add(tid)
954            bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
955        else:
956            missed.add(tid)
957            bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
958
959    foundLocal = len(found)
960    mirrors = d.getVar("SSTATE_MIRRORS")
961    if mirrors:
962        # Copy the data object and override DL_DIR and SRC_URI
963        localdata = bb.data.createCopy(d)
964
965        dldir = localdata.expand("${SSTATE_DIR}")
966        localdata.delVar('MIRRORS')
967        localdata.setVar('FILESPATH', dldir)
968        localdata.setVar('DL_DIR', dldir)
969        localdata.setVar('PREMIRRORS', mirrors)
970
971        bb.debug(2, "SState using premirror of: %s" % mirrors)
972
973        # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
974        # we'll want to allow network access for the current set of fetches.
975        if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
976                bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
977            localdata.delVar('BB_NO_NETWORK')
978
979        from bb.fetch2 import FetchConnectionCache
980        def checkstatus_init(thread_worker):
981            thread_worker.connection_cache = FetchConnectionCache()
982
983        def checkstatus_end(thread_worker):
984            thread_worker.connection_cache.close_connections()
985
986        def checkstatus(thread_worker, arg):
987            (tid, sstatefile) = arg
988
989            localdata2 = bb.data.createCopy(localdata)
990            srcuri = "file://" + sstatefile
991            localdata2.setVar('SRC_URI', srcuri)
992            bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
993
994            import traceback
995
996            try:
997                fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
998                            connection_cache=thread_worker.connection_cache)
999                fetcher.checkstatus()
1000                bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1001                found.add(tid)
1002                missed.remove(tid)
1003            except bb.fetch2.FetchError as e:
1004                bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1005            except Exception as e:
1006                bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1007
1008            if progress:
1009                bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
1010
1011        tasklist = []
1012        for tid in missed:
1013            sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1014            tasklist.append((tid, sstatefile))
1015
1016        if tasklist:
1017            nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1018
1019            progress = len(tasklist) >= 100
1020            if progress:
1021                msg = "Checking sstate mirror object availability"
1022                bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1023
1024            # Have to setup the fetcher environment here rather than in each thread as it would race
1025            fetcherenv = bb.fetch2.get_fetcher_environment(d)
1026            with bb.utils.environment(**fetcherenv):
1027                bb.event.enable_threadlock()
1028                pool = oe.utils.ThreadedPool(nproc, len(tasklist),
1029                        worker_init=checkstatus_init, worker_end=checkstatus_end,
1030                        name="sstate_checkhashes-")
1031                for t in tasklist:
1032                    pool.add_task(checkstatus, t)
1033                pool.start()
1034                pool.wait_completion()
1035                bb.event.disable_threadlock()
1036
1037            if progress:
1038                bb.event.fire(bb.event.ProcessFinished(msg), d)
1039
1040    inheritlist = d.getVar("INHERIT")
1041    if "toaster" in inheritlist:
1042        evdata = {'missed': [], 'found': []};
1043        for tid in missed:
1044            sstatefile = d.expand(getsstatefile(tid, False, d))
1045            evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1046        for tid in found:
1047            sstatefile = d.expand(getsstatefile(tid, False, d))
1048            evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1049        bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1050
1051    if summary:
1052        # Print some summary statistics about the current task completion and how much sstate
1053        # reuse there was. Avoid divide by zero errors.
1054        total = len(sq_data['hash'])
1055        complete = 0
1056        if currentcount:
1057            complete = (len(found) + currentcount) / (total + currentcount) * 100
1058        match = 0
1059        if total:
1060            match = len(found) / total * 100
1061        bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1062            (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1063
1064    if hasattr(bb.parse.siggen, "checkhashes"):
1065        bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1066
1067    return found
1068setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1069
1070BB_SETSCENE_DEPVALID = "setscene_depvalid"
1071
1072def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1073    # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1074    # task is included in taskdependees too
1075    # Return - False - We need this dependency
1076    #        - True - We can skip this dependency
1077    import re
1078
1079    def logit(msg, log):
1080        if log is not None:
1081            log.append(msg)
1082        else:
1083            bb.debug(2, msg)
1084
1085    logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1086
1087    directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
1088
1089    def isNativeCross(x):
1090        return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1091
1092    # We only need to trigger deploy_source_date_epoch through direct dependencies
1093    if taskdependees[task][1] in directtasks:
1094        return True
1095
1096    # We only need to trigger packagedata through direct dependencies
1097    # but need to preserve packagedata on packagedata links
1098    if taskdependees[task][1] == "do_packagedata":
1099        for dep in taskdependees:
1100            if taskdependees[dep][1] == "do_packagedata":
1101                return False
1102        return True
1103
1104    for dep in taskdependees:
1105        logit("  considering dependency: %s" % (str(taskdependees[dep])), log)
1106        if task == dep:
1107            continue
1108        if dep in notneeded:
1109            continue
1110        # do_package_write_* and do_package doesn't need do_package
1111        if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1112            continue
1113        # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1114        if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1115            return False
1116        # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1117        if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1118            continue
1119        # Native/Cross packages don't exist and are noexec anyway
1120        if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1121            continue
1122
1123        # This is due to the [depends] in useradd.bbclass complicating matters
1124        # The logic *is* reversed here due to the way hard setscene dependencies are injected
1125        if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1126            continue
1127
1128        # Consider sysroot depending on sysroot tasks
1129        if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1130            # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1131            # specific dependency itself, rather than relying on one of its dependees to pull
1132            # them in.
1133            # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1134            not_needed = False
1135            excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1136            if excludedeps is None:
1137                # Cache the regular expressions for speed
1138                excludedeps = []
1139                for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1140                    excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1141                d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1142            for excl in excludedeps:
1143                if excl[0].match(taskdependees[dep][0]):
1144                    if excl[1].match(taskdependees[task][0]):
1145                        not_needed = True
1146                        break
1147            if not_needed:
1148                continue
1149            # For meta-extsdk-toolchain we want all sysroot dependencies
1150            if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1151                return False
1152            # Native/Cross populate_sysroot need their dependencies
1153            if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1154                return False
1155            # Target populate_sysroot depended on by cross tools need to be installed
1156            if isNativeCross(taskdependees[dep][0]):
1157                return False
1158            # Native/cross tools depended upon by target sysroot are not needed
1159            # Add an exception for shadow-native as required by useradd.bbclass
1160            if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1161                continue
1162            # Target populate_sysroot need their dependencies
1163            return False
1164
1165        if taskdependees[dep][1] in directtasks:
1166            continue
1167
1168        # Safe fallthrough default
1169        logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1170        return False
1171    return True
1172
1173addhandler sstate_eventhandler
1174sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1175python sstate_eventhandler() {
1176    d = e.data
1177    writtensstate = d.getVar('SSTATE_CURRTASK')
1178    if not writtensstate:
1179        taskname = d.getVar("BB_RUNTASK")[3:]
1180        spec = d.getVar('SSTATE_PKGSPEC')
1181        swspec = d.getVar('SSTATE_SWSPEC')
1182        if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1183            d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1184            d.setVar("SSTATE_EXTRAPATH", "")
1185        d.setVar("SSTATE_CURRTASK", taskname)
1186        siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1187        if not os.path.exists(siginfo):
1188            bb.siggen.dump_this_task(siginfo, d)
1189        else:
1190            try:
1191                os.utime(siginfo, None)
1192            except PermissionError:
1193                pass
1194            except OSError as e:
1195                # Handle read-only file systems gracefully
1196                import errno
1197                if e.errno != errno.EROFS:
1198                    raise e
1199
1200}
1201
1202SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1203
1204#
1205# Event handler which removes manifests and stamps file for recipes which are no
1206# longer 'reachable' in a build where they once were. 'Reachable' refers to
1207# whether a recipe is parsed so recipes in a layer which was removed would no
1208# longer be reachable. Switching between systemd and sysvinit where recipes
1209# became skipped would be another example.
1210#
1211# Also optionally removes the workdir of those tasks/recipes
1212#
1213addhandler sstate_eventhandler_reachablestamps
1214sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1215python sstate_eventhandler_reachablestamps() {
1216    import glob
1217    d = e.data
1218    stamps = e.stamps.values()
1219    removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1220    preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1221    preservestamps = []
1222    if os.path.exists(preservestampfile):
1223        with open(preservestampfile, 'r') as f:
1224            preservestamps = f.readlines()
1225    seen = []
1226
1227    # The machine index contains all the stamps this machine has ever seen in this build directory.
1228    # We should only remove things which this machine once accessed but no longer does.
1229    machineindex = set()
1230    bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1231    mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1232    if os.path.exists(mi):
1233        with open(mi, "r") as f:
1234            machineindex = set(line.strip() for line in f.readlines())
1235
1236    for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1237        toremove = []
1238        i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1239        if not os.path.exists(i):
1240            continue
1241        manseen = set()
1242        ignore = []
1243        with open(i, "r") as f:
1244            lines = f.readlines()
1245            for l in reversed(lines):
1246                try:
1247                    (stamp, manifest, workdir) = l.split()
1248                    # The index may have multiple entries for the same manifest as the code above only appends
1249                    # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1250                    # The last entry in the list is the valid one, any earlier entries with matching manifests
1251                    # should be ignored.
1252                    if manifest in manseen:
1253                        ignore.append(l)
1254                        continue
1255                    manseen.add(manifest)
1256                    if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1257                        toremove.append(l)
1258                        if stamp not in seen:
1259                            bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1260                            seen.append(stamp)
1261                except ValueError:
1262                    bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1263
1264        if toremove:
1265            msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1266            bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1267
1268            removed = 0
1269            for r in toremove:
1270                (stamp, manifest, workdir) = r.split()
1271                for m in glob.glob(manifest + ".*"):
1272                    if m.endswith(".postrm"):
1273                        continue
1274                    sstate_clean_manifest(m, d)
1275                bb.utils.remove(stamp + "*")
1276                if removeworkdir:
1277                    bb.utils.remove(workdir, recurse = True)
1278                lines.remove(r)
1279                removed = removed + 1
1280                bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1281
1282            bb.event.fire(bb.event.ProcessFinished(msg), d)
1283
1284        with open(i, "w") as f:
1285            for l in lines:
1286                if l in ignore:
1287                    continue
1288                f.write(l)
1289    machineindex |= set(stamps)
1290    with open(mi, "w") as f:
1291        for l in machineindex:
1292            f.write(l + "\n")
1293
1294    if preservestamps:
1295        os.remove(preservestampfile)
1296}
1297
1298
1299#
1300# Bitbake can generate an event showing which setscene tasks are 'stale',
1301# i.e. which ones will be rerun. These are ones where a stamp file is present but
1302# it is stable (e.g. taskhash doesn't match). With that list we can go through
1303# the manifests for matching tasks and "uninstall" those manifests now. We do
1304# this now rather than mid build since the distribution of files between sstate
1305# objects may have changed, new tasks may run first and if those new tasks overlap
1306# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1307# removing these files is fast.
1308#
1309addhandler sstate_eventhandler_stalesstate
1310sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1311python sstate_eventhandler_stalesstate() {
1312    d = e.data
1313    tasks = e.tasks
1314
1315    bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1316
1317    for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1318        toremove = []
1319        i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1320        if not os.path.exists(i):
1321            continue
1322        with open(i, "r") as f:
1323            lines = f.readlines()
1324            for l in lines:
1325                try:
1326                    (stamp, manifest, workdir) = l.split()
1327                    for tid in tasks:
1328                        for s in tasks[tid]:
1329                            if s.startswith(stamp):
1330                                taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1331                                manname = manifest + "." + taskname
1332                                if os.path.exists(manname):
1333                                    bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1334                                    toremove.append((manname, tid, tasks[tid]))
1335                                    break
1336                except ValueError:
1337                    bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1338
1339        if toremove:
1340            msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1341            bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1342
1343            removed = 0
1344            for (manname, tid, stamps) in toremove:
1345                sstate_clean_manifest(manname, d)
1346                for stamp in stamps:
1347                    bb.utils.remove(stamp)
1348                removed = removed + 1
1349                bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1350
1351            bb.event.fire(bb.event.ProcessFinished(msg), d)
1352}
1353