bitbake-worker 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. #!/usr/bin/env python
  2. import os
  3. import sys
  4. import warnings
  5. sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
  6. from bb import fetch2
  7. import logging
  8. import bb
  9. import select
  10. import errno
  11. import signal
  12. # Users shouldn't be running this code directly
  13. if len(sys.argv) != 2 or sys.argv[1] != "decafbad":
  14. print("bitbake-worker is meant for internal execution by bitbake itself, please don't use it standalone.")
  15. sys.exit(1)
  16. logger = logging.getLogger("BitBake")
  17. try:
  18. import cPickle as pickle
  19. except ImportError:
  20. import pickle
  21. bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
  22. worker_pipe = sys.stdout.fileno()
  23. bb.utils.nonblockingfd(worker_pipe)
  24. handler = bb.event.LogHandler()
  25. logger.addHandler(handler)
  26. if 0:
  27. # Code to write out a log file of all events passing through the worker
  28. logfilename = "/tmp/workerlogfile"
  29. format_str = "%(levelname)s: %(message)s"
  30. conlogformat = bb.msg.BBLogFormatter(format_str)
  31. consolelog = logging.FileHandler(logfilename)
  32. bb.msg.addDefaultlogFilter(consolelog)
  33. consolelog.setFormatter(conlogformat)
  34. logger.addHandler(consolelog)
  35. worker_queue = ""
  36. def worker_fire(event, d):
  37. data = "<event>" + pickle.dumps(event) + "</event>"
  38. worker_fire_prepickled(data)
  39. def worker_fire_prepickled(event):
  40. global worker_queue
  41. worker_queue = worker_queue + event
  42. worker_flush()
  43. def worker_flush():
  44. global worker_queue, worker_pipe
  45. if not worker_queue:
  46. return
  47. try:
  48. written = os.write(worker_pipe, worker_queue)
  49. worker_queue = worker_queue[written:]
  50. except (IOError, OSError) as e:
  51. if e.errno != errno.EAGAIN:
  52. raise
  53. def worker_child_fire(event, d):
  54. global worker_pipe
  55. data = "<event>" + pickle.dumps(event) + "</event>"
  56. worker_pipe.write(data)
  57. bb.event.worker_fire = worker_fire
  58. lf = None
  59. #lf = open("/tmp/workercommandlog", "w+")
  60. def workerlog_write(msg):
  61. if lf:
  62. lf.write(msg)
  63. lf.flush()
  64. def fork_off_task(cfg, data, workerdata, fn, task, taskname, appends, taskdepdata, quieterrors=False):
  65. # We need to setup the environment BEFORE the fork, since
  66. # a fork() or exec*() activates PSEUDO...
  67. envbackup = {}
  68. fakeenv = {}
  69. umask = None
  70. taskdep = workerdata["taskdeps"][fn]
  71. if 'umask' in taskdep and taskname in taskdep['umask']:
  72. # umask might come in as a number or text string..
  73. try:
  74. umask = int(taskdep['umask'][taskname],8)
  75. except TypeError:
  76. umask = taskdep['umask'][taskname]
  77. # We can't use the fakeroot environment in a dry run as it possibly hasn't been built
  78. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not cfg.dry_run:
  79. envvars = (workerdata["fakerootenv"][fn] or "").split()
  80. for key, value in (var.split('=') for var in envvars):
  81. envbackup[key] = os.environ.get(key)
  82. os.environ[key] = value
  83. fakeenv[key] = value
  84. fakedirs = (workerdata["fakerootdirs"][fn] or "").split()
  85. for p in fakedirs:
  86. bb.utils.mkdirhier(p)
  87. logger.debug(2, 'Running %s:%s under fakeroot, fakedirs: %s' %
  88. (fn, taskname, ', '.join(fakedirs)))
  89. else:
  90. envvars = (workerdata["fakerootnoenv"][fn] or "").split()
  91. for key, value in (var.split('=') for var in envvars):
  92. envbackup[key] = os.environ.get(key)
  93. os.environ[key] = value
  94. fakeenv[key] = value
  95. sys.stdout.flush()
  96. sys.stderr.flush()
  97. try:
  98. pipein, pipeout = os.pipe()
  99. pipein = os.fdopen(pipein, 'rb', 4096)
  100. pipeout = os.fdopen(pipeout, 'wb', 0)
  101. pid = os.fork()
  102. except OSError as e:
  103. bb.msg.fatal("RunQueue", "fork failed: %d (%s)" % (e.errno, e.strerror))
  104. if pid == 0:
  105. global worker_pipe
  106. pipein.close()
  107. signal.signal(signal.SIGTERM, signal.SIG_DFL)
  108. # Save out the PID so that the event can include it the
  109. # events
  110. bb.event.worker_pid = os.getpid()
  111. bb.event.worker_fire = worker_child_fire
  112. worker_pipe = pipeout
  113. # Make the child the process group leader
  114. os.setpgid(0, 0)
  115. # No stdin
  116. newsi = os.open(os.devnull, os.O_RDWR)
  117. os.dup2(newsi, sys.stdin.fileno())
  118. if umask:
  119. os.umask(umask)
  120. data.setVar("BB_WORKERCONTEXT", "1")
  121. data.setVar("BB_TASKDEPDATA", taskdepdata)
  122. data.setVar("BUILDNAME", workerdata["buildname"])
  123. data.setVar("DATE", workerdata["date"])
  124. data.setVar("TIME", workerdata["time"])
  125. bb.parse.siggen.set_taskdata(workerdata["hashes"], workerdata["hash_deps"], workerdata["sigchecksums"])
  126. ret = 0
  127. try:
  128. the_data = bb.cache.Cache.loadDataFull(fn, appends, data)
  129. the_data.setVar('BB_TASKHASH', workerdata["runq_hash"][task])
  130. for h in workerdata["hashes"]:
  131. the_data.setVar("BBHASH_%s" % h, workerdata["hashes"][h])
  132. for h in workerdata["hash_deps"]:
  133. the_data.setVar("BBHASHDEPS_%s" % h, workerdata["hash_deps"][h])
  134. # exported_vars() returns a generator which *cannot* be passed to os.environ.update()
  135. # successfully. We also need to unset anything from the environment which shouldn't be there
  136. exports = bb.data.exported_vars(the_data)
  137. bb.utils.empty_environment()
  138. for e, v in exports:
  139. os.environ[e] = v
  140. for e in fakeenv:
  141. os.environ[e] = fakeenv[e]
  142. the_data.setVar(e, fakeenv[e])
  143. the_data.setVarFlag(e, 'export', "1")
  144. if quieterrors:
  145. the_data.setVarFlag(taskname, "quieterrors", "1")
  146. except Exception as exc:
  147. if not quieterrors:
  148. logger.critical(str(exc))
  149. os._exit(1)
  150. try:
  151. if not cfg.dry_run:
  152. ret = bb.build.exec_task(fn, taskname, the_data, cfg.profile)
  153. os._exit(ret)
  154. except:
  155. os._exit(1)
  156. else:
  157. for key, value in envbackup.iteritems():
  158. if value is None:
  159. del os.environ[key]
  160. else:
  161. os.environ[key] = value
  162. return pid, pipein, pipeout
  163. class runQueueWorkerPipe():
  164. """
  165. Abstraction for a pipe between a worker thread and the worker server
  166. """
  167. def __init__(self, pipein, pipeout):
  168. self.input = pipein
  169. if pipeout:
  170. pipeout.close()
  171. bb.utils.nonblockingfd(self.input)
  172. self.queue = ""
  173. def read(self):
  174. start = len(self.queue)
  175. try:
  176. self.queue = self.queue + self.input.read(102400)
  177. except (OSError, IOError) as e:
  178. if e.errno != errno.EAGAIN:
  179. raise
  180. end = len(self.queue)
  181. index = self.queue.find("</event>")
  182. while index != -1:
  183. worker_fire_prepickled(self.queue[:index+8])
  184. self.queue = self.queue[index+8:]
  185. index = self.queue.find("</event>")
  186. return (end > start)
  187. def close(self):
  188. while self.read():
  189. continue
  190. if len(self.queue) > 0:
  191. print("Warning, worker child left partial message: %s" % self.queue)
  192. self.input.close()
  193. normalexit = False
  194. class BitbakeWorker(object):
  195. def __init__(self, din):
  196. self.input = din
  197. bb.utils.nonblockingfd(self.input)
  198. self.queue = ""
  199. self.cookercfg = None
  200. self.databuilder = None
  201. self.data = None
  202. self.build_pids = {}
  203. self.build_pipes = {}
  204. signal.signal(signal.SIGTERM, self.sigterm_exception)
  205. def sigterm_exception(self, signum, stackframe):
  206. bb.warn("Worker recieved SIGTERM, shutting down...")
  207. self.handle_finishnow(None)
  208. signal.signal(signal.SIGTERM, signal.SIG_DFL)
  209. os.kill(os.getpid(), signal.SIGTERM)
  210. def serve(self):
  211. while True:
  212. (ready, _, _) = select.select([self.input] + [i.input for i in self.build_pipes.values()], [] , [], 1)
  213. if self.input in ready or len(self.queue):
  214. start = len(self.queue)
  215. try:
  216. self.queue = self.queue + self.input.read()
  217. except (OSError, IOError):
  218. pass
  219. end = len(self.queue)
  220. self.handle_item("cookerconfig", self.handle_cookercfg)
  221. self.handle_item("workerdata", self.handle_workerdata)
  222. self.handle_item("runtask", self.handle_runtask)
  223. self.handle_item("finishnow", self.handle_finishnow)
  224. self.handle_item("ping", self.handle_ping)
  225. self.handle_item("quit", self.handle_quit)
  226. for pipe in self.build_pipes:
  227. self.build_pipes[pipe].read()
  228. if len(self.build_pids):
  229. self.process_waitpid()
  230. worker_flush()
  231. def handle_item(self, item, func):
  232. if self.queue.startswith("<" + item + ">"):
  233. index = self.queue.find("</" + item + ">")
  234. while index != -1:
  235. func(self.queue[(len(item) + 2):index])
  236. self.queue = self.queue[(index + len(item) + 3):]
  237. index = self.queue.find("</" + item + ">")
  238. def handle_cookercfg(self, data):
  239. self.cookercfg = pickle.loads(data)
  240. self.databuilder = bb.cookerdata.CookerDataBuilder(self.cookercfg, worker=True)
  241. self.databuilder.parseBaseConfiguration()
  242. self.data = self.databuilder.data
  243. def handle_workerdata(self, data):
  244. self.workerdata = pickle.loads(data)
  245. bb.msg.loggerDefaultDebugLevel = self.workerdata["logdefaultdebug"]
  246. bb.msg.loggerDefaultVerbose = self.workerdata["logdefaultverbose"]
  247. bb.msg.loggerVerboseLogs = self.workerdata["logdefaultverboselogs"]
  248. bb.msg.loggerDefaultDomains = self.workerdata["logdefaultdomain"]
  249. self.data.setVar("PRSERV_HOST", self.workerdata["prhost"])
  250. def handle_ping(self, _):
  251. workerlog_write("Handling ping\n")
  252. logger.warn("Pong from bitbake-worker!")
  253. def handle_quit(self, data):
  254. workerlog_write("Handling quit\n")
  255. global normalexit
  256. normalexit = True
  257. sys.exit(0)
  258. def handle_runtask(self, data):
  259. fn, task, taskname, quieterrors, appends, taskdepdata = pickle.loads(data)
  260. workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname))
  261. pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.workerdata, fn, task, taskname, appends, taskdepdata, quieterrors)
  262. self.build_pids[pid] = task
  263. self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout)
  264. def process_waitpid(self):
  265. """
  266. Return none is there are no processes awaiting result collection, otherwise
  267. collect the process exit codes and close the information pipe.
  268. """
  269. try:
  270. pid, status = os.waitpid(-1, os.WNOHANG)
  271. if pid == 0 or os.WIFSTOPPED(status):
  272. return None
  273. except OSError:
  274. return None
  275. workerlog_write("Exit code of %s for pid %s\n" % (status, pid))
  276. if os.WIFEXITED(status):
  277. status = os.WEXITSTATUS(status)
  278. elif os.WIFSIGNALED(status):
  279. # Per shell conventions for $?, when a process exits due to
  280. # a signal, we return an exit code of 128 + SIGNUM
  281. status = 128 + os.WTERMSIG(status)
  282. task = self.build_pids[pid]
  283. del self.build_pids[pid]
  284. self.build_pipes[pid].close()
  285. del self.build_pipes[pid]
  286. worker_fire_prepickled("<exitcode>" + pickle.dumps((task, status)) + "</exitcode>")
  287. def handle_finishnow(self, _):
  288. if self.build_pids:
  289. logger.info("Sending SIGTERM to remaining %s tasks", len(self.build_pids))
  290. for k, v in self.build_pids.iteritems():
  291. try:
  292. os.kill(-k, signal.SIGTERM)
  293. os.waitpid(-1, 0)
  294. except:
  295. pass
  296. for pipe in self.build_pipes:
  297. self.build_pipes[pipe].read()
  298. try:
  299. worker = BitbakeWorker(sys.stdin)
  300. worker.serve()
  301. except BaseException as e:
  302. if not normalexit:
  303. import traceback
  304. sys.stderr.write(traceback.format_exc())
  305. sys.stderr.write(str(e))
  306. while len(worker_queue):
  307. worker_flush()
  308. workerlog_write("exitting")
  309. sys.exit(0)