summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2015-09-29 09:28:24 +0100
committerRichard Purdie <richard.purdie@linuxfoundation.org>2015-09-29 14:11:13 +0100
commit3cb55bdf06148943960e438291f9a562857340a3 (patch)
treece15f58c97bc70795ead9197f9a735036a5c8159
parent139e851831eea682aba0f9403dcc2eea7c1b05bd (diff)
downloadbitbake-3cb55bdf06148943960e438291f9a562857340a3.zip
bitbake-worker: Guard against multiprocessing corruption of event data
In the forked child, we may use multiprocessing. There is only one event pipe to the worker controlling process and if we're unlucky, multiple processes can write to it at once corrupting the data by intermixing it. We don't see this often but when we do, its quite puzzling. I suspect it only happens in tasks which use multiprocessng (do_rootfs, do_package) and is much more likely to happen when we have long messages, usually many times PAGE_SIZE since PAGE_SIZE writes are atomic. This makes it much more likely within do_roofs, when for example a subprocess lists the contents of a rootfs. To fix this, we give each child a Lock() object and use this to serialise writes to the controlling worker. Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-rwxr-xr-xbin/bitbake-worker9
1 files changed, 9 insertions, 0 deletions
diff --git a/bin/bitbake-worker b/bin/bitbake-worker
index af17b874..3390f637 100755
--- a/bin/bitbake-worker
+++ b/bin/bitbake-worker
@@ -10,6 +10,7 @@ import bb
import select
import errno
import signal
+from multiprocessing import Lock
# Users shouldn't be running this code directly
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
@@ -44,6 +45,9 @@ except ImportError:
worker_pipe = sys.stdout.fileno()
bb.utils.nonblockingfd(worker_pipe)
+# Need to guard against multiprocessing being used in child processes
+# and multiple processes trying to write to the parent at the same time
+worker_pipe_lock = None
handler = bb.event.LogHandler()
logger.addHandler(handler)
@@ -85,10 +89,13 @@ def worker_flush():
def worker_child_fire(event, d):
global worker_pipe
+ global worker_pipe_lock
data = "<event>" + pickle.dumps(event) + "</event>"
try:
+ worker_pipe_lock.acquire()
worker_pipe.write(data)
+ worker_pipe_lock.release()
except IOError:
sigterm_handler(None, None)
raise
@@ -157,6 +164,7 @@ def fork_off_task(cfg, data, workerdata, fn, task, taskname, appends, taskdepdat
if pid == 0:
def child():
global worker_pipe
+ global worker_pipe_lock
pipein.close()
signal.signal(signal.SIGTERM, sigterm_handler)
@@ -169,6 +177,7 @@ def fork_off_task(cfg, data, workerdata, fn, task, taskname, appends, taskdepdat
bb.event.worker_pid = os.getpid()
bb.event.worker_fire = worker_child_fire
worker_pipe = pipeout
+ worker_pipe_lock = Lock()
# Make the child the process group leader and ensure no
# child process will be controlled by the current terminal