utils.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. #
  2. # Copyright OpenEmbedded Contributors
  3. #
  4. # SPDX-License-Identifier: GPL-2.0-only
  5. #
  6. import subprocess
  7. import multiprocessing
  8. import traceback
  9. import errno
  10. import bb.parse
  11. def read_file(filename):
  12. try:
  13. f = open( filename, "r" )
  14. except IOError as reason:
  15. return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
  16. else:
  17. data = f.read().strip()
  18. f.close()
  19. return data
  20. return None
  21. def ifelse(condition, iftrue = True, iffalse = False):
  22. if condition:
  23. return iftrue
  24. else:
  25. return iffalse
  26. def conditional(variable, checkvalue, truevalue, falsevalue, d):
  27. if d.getVar(variable) == checkvalue:
  28. return truevalue
  29. else:
  30. return falsevalue
  31. def vartrue(var, iftrue, iffalse, d):
  32. import oe.types
  33. if oe.types.boolean(d.getVar(var)):
  34. return iftrue
  35. else:
  36. return iffalse
  37. def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
  38. if float(d.getVar(variable)) <= float(checkvalue):
  39. return truevalue
  40. else:
  41. return falsevalue
  42. def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
  43. result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
  44. if result <= 0:
  45. return truevalue
  46. else:
  47. return falsevalue
  48. def both_contain(variable1, variable2, checkvalue, d):
  49. val1 = d.getVar(variable1)
  50. val2 = d.getVar(variable2)
  51. val1 = set(val1.split())
  52. val2 = set(val2.split())
  53. if isinstance(checkvalue, str):
  54. checkvalue = set(checkvalue.split())
  55. else:
  56. checkvalue = set(checkvalue)
  57. if checkvalue.issubset(val1) and checkvalue.issubset(val2):
  58. return " ".join(checkvalue)
  59. else:
  60. return ""
  61. def set_intersect(variable1, variable2, d):
  62. """
  63. Expand both variables, interpret them as lists of strings, and return the
  64. intersection as a flattened string.
  65. For example:
  66. s1 = "a b c"
  67. s2 = "b c d"
  68. s3 = set_intersect(s1, s2)
  69. => s3 = "b c"
  70. """
  71. val1 = set(d.getVar(variable1).split())
  72. val2 = set(d.getVar(variable2).split())
  73. return " ".join(val1 & val2)
  74. def prune_suffix(var, suffixes, d):
  75. # See if var ends with any of the suffixes listed and
  76. # remove it if found
  77. for suffix in suffixes:
  78. if suffix and var.endswith(suffix):
  79. var = var[:-len(suffix)]
  80. prefix = d.getVar("MLPREFIX")
  81. if prefix and var.startswith(prefix):
  82. var = var[len(prefix):]
  83. return var
  84. def str_filter(f, str, d):
  85. from re import match
  86. return " ".join([x for x in str.split() if match(f, x, 0)])
  87. def str_filter_out(f, str, d):
  88. from re import match
  89. return " ".join([x for x in str.split() if not match(f, x, 0)])
  90. def build_depends_string(depends, task):
  91. """Append a taskname to a string of dependencies as used by the [depends] flag"""
  92. return " ".join(dep + ":" + task for dep in depends.split())
  93. def inherits(d, *classes):
  94. """Return True if the metadata inherits any of the specified classes"""
  95. return any(bb.data.inherits_class(cls, d) for cls in classes)
  96. def features_backfill(var,d):
  97. # This construct allows the addition of new features to variable specified
  98. # as var
  99. # Example for var = "DISTRO_FEATURES"
  100. # This construct allows the addition of new features to DISTRO_FEATURES
  101. # that if not present would disable existing functionality, without
  102. # disturbing distributions that have already set DISTRO_FEATURES.
  103. # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
  104. # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
  105. features = (d.getVar(var) or "").split()
  106. backfill = (d.getVar(var+"_BACKFILL") or "").split()
  107. considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
  108. addfeatures = []
  109. for feature in backfill:
  110. if feature not in features and feature not in considered:
  111. addfeatures.append(feature)
  112. if addfeatures:
  113. d.appendVar(var, " " + " ".join(addfeatures))
  114. def all_distro_features(d, features, truevalue="1", falsevalue=""):
  115. """
  116. Returns truevalue if *all* given features are set in DISTRO_FEATURES,
  117. else falsevalue. The features can be given as single string or anything
  118. that can be turned into a set.
  119. This is a shorter, more flexible version of
  120. bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
  121. Without explicit true/false values it can be used directly where
  122. Python expects a boolean:
  123. if oe.utils.all_distro_features(d, "foo bar"):
  124. bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
  125. With just a truevalue, it can be used to include files that are meant to be
  126. used only when requested via DISTRO_FEATURES:
  127. require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
  128. """
  129. return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
  130. def any_distro_features(d, features, truevalue="1", falsevalue=""):
  131. """
  132. Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
  133. else falsevalue. The features can be given as single string or anything
  134. that can be turned into a set.
  135. This is a shorter, more flexible version of
  136. bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
  137. Without explicit true/false values it can be used directly where
  138. Python expects a boolean:
  139. if not oe.utils.any_distro_features(d, "foo bar"):
  140. bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
  141. With just a truevalue, it can be used to include files that are meant to be
  142. used only when requested via DISTRO_FEATURES:
  143. require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
  144. """
  145. return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
  146. def parallel_make(d, makeinst=False):
  147. """
  148. Return the integer value for the number of parallel threads to use when
  149. building, scraped out of PARALLEL_MAKE. If no parallelization option is
  150. found, returns None
  151. e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
  152. """
  153. if makeinst:
  154. pm = (d.getVar('PARALLEL_MAKEINST') or '').split()
  155. else:
  156. pm = (d.getVar('PARALLEL_MAKE') or '').split()
  157. # look for '-j' and throw other options (e.g. '-l') away
  158. while pm:
  159. opt = pm.pop(0)
  160. if opt == '-j':
  161. v = pm.pop(0)
  162. elif opt.startswith('-j'):
  163. v = opt[2:].strip()
  164. else:
  165. continue
  166. return int(v)
  167. return ''
  168. def parallel_make_argument(d, fmt, limit=None, makeinst=False):
  169. """
  170. Helper utility to construct a parallel make argument from the number of
  171. parallel threads specified in PARALLEL_MAKE.
  172. Returns the input format string `fmt` where a single '%d' will be expanded
  173. with the number of parallel threads to use. If `limit` is specified, the
  174. number of parallel threads will be no larger than it. If no parallelization
  175. option is found in PARALLEL_MAKE, returns an empty string
  176. e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
  177. "-n 10"
  178. """
  179. v = parallel_make(d, makeinst)
  180. if v:
  181. if limit:
  182. v = min(limit, v)
  183. return fmt % v
  184. return ''
  185. def packages_filter_out_system(d):
  186. """
  187. Return a list of packages from PACKAGES with the "system" packages such as
  188. PN-dbg PN-doc PN-locale-eb-gb removed.
  189. """
  190. pn = d.getVar('PN')
  191. pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
  192. localepkg = pn + "-locale-"
  193. pkgs = []
  194. for pkg in d.getVar('PACKAGES').split():
  195. if pkg not in pkgfilter and localepkg not in pkg:
  196. pkgs.append(pkg)
  197. return pkgs
  198. def getstatusoutput(cmd):
  199. return subprocess.getstatusoutput(cmd)
  200. def trim_version(version, num_parts=2):
  201. """
  202. Return just the first <num_parts> of <version>, split by periods. For
  203. example, trim_version("1.2.3", 2) will return "1.2".
  204. """
  205. if type(version) is not str:
  206. raise TypeError("Version should be a string")
  207. if num_parts < 1:
  208. raise ValueError("Cannot split to parts < 1")
  209. parts = version.split(".")
  210. trimmed = ".".join(parts[:num_parts])
  211. return trimmed
  212. def cpu_count(at_least=1, at_most=64):
  213. cpus = len(os.sched_getaffinity(0))
  214. return max(min(cpus, at_most), at_least)
  215. def execute_pre_post_process(d, cmds):
  216. if cmds is None:
  217. return
  218. cmds = cmds.replace(";", " ")
  219. for cmd in cmds.split():
  220. bb.note("Executing %s ..." % cmd)
  221. bb.build.exec_func(cmd, d)
  222. @bb.parse.vardepsexclude("BB_NUMBER_THREADS")
  223. def get_bb_number_threads(d):
  224. return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
  225. def multiprocess_launch(target, items, d, extraargs=None):
  226. max_process = get_bb_number_threads(d)
  227. return multiprocess_launch_mp(target, items, max_process, extraargs)
  228. # For each item in items, call the function 'target' with item as the first
  229. # argument, extraargs as the other arguments and handle any exceptions in the
  230. # parent thread
  231. def multiprocess_launch_mp(target, items, max_process, extraargs=None):
  232. class ProcessLaunch(multiprocessing.Process):
  233. def __init__(self, *args, **kwargs):
  234. multiprocessing.Process.__init__(self, *args, **kwargs)
  235. self._pconn, self._cconn = multiprocessing.Pipe()
  236. self._exception = None
  237. self._result = None
  238. def run(self):
  239. try:
  240. ret = self._target(*self._args, **self._kwargs)
  241. self._cconn.send((None, ret))
  242. except Exception as e:
  243. tb = traceback.format_exc()
  244. self._cconn.send((e, tb))
  245. def update(self):
  246. if self._pconn.poll():
  247. (e, tb) = self._pconn.recv()
  248. if e is not None:
  249. self._exception = (e, tb)
  250. else:
  251. self._result = tb
  252. @property
  253. def exception(self):
  254. self.update()
  255. return self._exception
  256. @property
  257. def result(self):
  258. self.update()
  259. return self._result
  260. launched = []
  261. errors = []
  262. results = []
  263. items = list(items)
  264. while (items and not errors) or launched:
  265. if not errors and items and len(launched) < max_process:
  266. args = items.pop()
  267. if not type(args) is tuple:
  268. args = (args,)
  269. if extraargs is not None:
  270. args = args + extraargs
  271. p = ProcessLaunch(target=target, args=args)
  272. p.start()
  273. launched.append(p)
  274. for q in launched:
  275. # Have to manually call update() to avoid deadlocks. The pipe can be full and
  276. # transfer stalled until we try and read the results object but the subprocess won't exit
  277. # as it still has data to write (https://bugs.python.org/issue8426)
  278. q.update()
  279. # The finished processes are joined when calling is_alive()
  280. if not q.is_alive():
  281. if q.exception:
  282. errors.append(q.exception)
  283. if q.result:
  284. results.append(q.result)
  285. launched.remove(q)
  286. # Paranoia doesn't hurt
  287. for p in launched:
  288. p.join()
  289. if errors:
  290. msg = ""
  291. for (e, tb) in errors:
  292. if isinstance(e, subprocess.CalledProcessError) and e.output:
  293. msg = msg + str(e) + "\n"
  294. msg = msg + "Subprocess output:"
  295. msg = msg + e.output.decode("utf-8", errors="ignore")
  296. else:
  297. msg = msg + str(e) + ": " + str(tb) + "\n"
  298. bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
  299. return results
  300. def squashspaces(string):
  301. import re
  302. return re.sub(r"\s+", " ", string).strip()
  303. def rprovides_map(pkgdata_dir, pkg_dict):
  304. # Map file -> pkg provider
  305. rprov_map = {}
  306. for pkg in pkg_dict:
  307. path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
  308. if not os.path.isfile(path_to_pkgfile):
  309. continue
  310. with open(path_to_pkgfile) as f:
  311. for line in f:
  312. if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
  313. # List all components provided by pkg.
  314. # Exclude version strings, i.e. those starting with (
  315. provides = [x for x in line.split()[1:] if not x.startswith('(')]
  316. for prov in provides:
  317. if prov in rprov_map:
  318. rprov_map[prov].append(pkg)
  319. else:
  320. rprov_map[prov] = [pkg]
  321. return rprov_map
  322. def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
  323. output = []
  324. if ret_format == "arch":
  325. for pkg in sorted(pkg_dict):
  326. output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
  327. elif ret_format == "file":
  328. for pkg in sorted(pkg_dict):
  329. output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
  330. elif ret_format == "ver":
  331. for pkg in sorted(pkg_dict):
  332. output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
  333. elif ret_format == "deps":
  334. rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
  335. for pkg in sorted(pkg_dict):
  336. for dep in pkg_dict[pkg]["deps"]:
  337. if dep in rprov_map:
  338. # There could be multiple providers within the image
  339. for pkg_provider in rprov_map[dep]:
  340. output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
  341. else:
  342. output.append("%s|%s" % (pkg, dep))
  343. else:
  344. for pkg in sorted(pkg_dict):
  345. output.append(pkg)
  346. output_str = '\n'.join(output)
  347. if output_str:
  348. # make sure last line is newline terminated
  349. output_str += '\n'
  350. return output_str
  351. # Helper function to get the host compiler version
  352. # Do not assume the compiler is gcc
  353. def get_host_compiler_version(d, taskcontextonly=False):
  354. import re, subprocess
  355. if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
  356. return
  357. compiler = d.getVar("BUILD_CC")
  358. # Get rid of ccache since it is not present when parsing.
  359. if compiler.startswith('ccache '):
  360. compiler = compiler[7:]
  361. try:
  362. env = os.environ.copy()
  363. # datastore PATH does not contain session PATH as set by environment-setup-...
  364. # this breaks the install-buildtools use-case
  365. # env["PATH"] = d.getVar("PATH")
  366. output = subprocess.check_output("%s --version" % compiler, \
  367. shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
  368. except subprocess.CalledProcessError as e:
  369. bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
  370. match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
  371. if not match:
  372. bb.fatal("Can't get compiler version from %s --version output" % compiler)
  373. version = match.group(1)
  374. return compiler, version
  375. @bb.parse.vardepsexclude("DEFAULTTUNE_MULTILIB_ORIGINAL", "OVERRIDES")
  376. def get_multilib_datastore(variant, d):
  377. localdata = bb.data.createCopy(d)
  378. if variant:
  379. overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
  380. localdata.setVar("OVERRIDES", overrides)
  381. localdata.setVar("MLPREFIX", variant + "-")
  382. else:
  383. origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
  384. if origdefault:
  385. localdata.setVar("DEFAULTTUNE", origdefault)
  386. overrides = localdata.getVar("OVERRIDES", False).split(":")
  387. overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
  388. localdata.setVar("OVERRIDES", overrides)
  389. localdata.setVar("MLPREFIX", "")
  390. return localdata
  391. def sh_quote(string):
  392. import shlex
  393. return shlex.quote(string)
  394. def directory_size(root, blocksize=4096):
  395. """
  396. Calculate the size of the directory, taking into account hard links,
  397. rounding up every size to multiples of the blocksize.
  398. """
  399. def roundup(size):
  400. """
  401. Round the size up to the nearest multiple of the block size.
  402. """
  403. import math
  404. return math.ceil(size / blocksize) * blocksize
  405. def getsize(filename):
  406. """
  407. Get the size of the filename, not following symlinks, taking into
  408. account hard links.
  409. """
  410. stat = os.lstat(filename)
  411. if stat.st_ino not in inodes:
  412. inodes.add(stat.st_ino)
  413. return stat.st_size
  414. else:
  415. return 0
  416. inodes = set()
  417. total = 0
  418. for root, dirs, files in os.walk(root):
  419. total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
  420. total += roundup(getsize(root))
  421. return total
  422. # Update the mtime of a file, skip if permission/read-only issues
  423. def touch(filename):
  424. try:
  425. os.utime(filename, None)
  426. except PermissionError:
  427. pass
  428. except OSError as e:
  429. # Handle read-only file systems gracefully
  430. if e.errno != errno.EROFS:
  431. raise e