1#!/usr/bin/env python3 2# Copyright (C) 2019 The Android Open Source Project 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); 5# you may not use this file except in compliance with the License. 6# You may obtain a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, 12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13# See the License for the specific language governing permissions and 14# limitations under the License. 15''' Runs the given job in an isolated docker container. 16 17Also streams stdout/err onto the firebase realtime DB. 18''' 19 20import fcntl 21import logging 22import json 23import os 24import queue 25import signal 26import socket 27import shutil 28import subprocess 29import sys 30import threading 31import time 32 33from datetime import datetime, timedelta 34from oauth2client.client import GoogleCredentials 35from config import DB, SANDBOX_IMG 36from common_utils import init_logging, req, ConcurrentModificationError, SCOPES 37 38CUR_DIR = os.path.dirname(__file__) 39SCOPES.append('https://www.googleapis.com/auth/firebase.database') 40SCOPES.append('https://www.googleapis.com/auth/userinfo.email') 41 42 43def read_nonblock(fd): 44 fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) 45 res = '' 46 while True: 47 try: 48 buf = os.read(fd.fileno(), 1024) 49 if not buf: 50 break 51 res += buf.decode() 52 except OSError: 53 break 54 return res 55 56 57def log_thread(job_id, queue): 58 ''' Uploads stdout/stderr from the queue to the firebase DB. 59 60 Each line is logged as an invidivual entry in the DB, as follows: 61 MMMMMM-NNNN log line, where M: hex-encodeed timestamp, N: monotonic counter. 62 ''' 63 uri = '%s/logs/%s.json' % (DB, job_id) 64 req('DELETE', uri) 65 while True: 66 batch = queue.get() 67 if batch is None: 68 break # EOF 69 req('PATCH', uri, body=batch) 70 logging.debug('Uploader thread terminated') 71 72 73def main(argv): 74 init_logging() 75 if len(argv) != 2: 76 print('Usage: %s job_id' % argv[0]) 77 return 1 78 79 job_id = argv[1] 80 res = 42 81 82 # The container name will be worker-N-sandbox. 83 container = socket.gethostname() + '-sandbox' 84 85 # Remove stale jobs, if any. 86 subprocess.call(['sudo', 'docker', 'rm', '-f', container]) 87 88 q = queue.Queue() 89 90 # Conversely to real programs, signal handlers in python aren't really async 91 # but are queued on the main thread. Hence We need to keep the main thread 92 # responsive to react to signals. This is to handle timeouts and graceful 93 # termination of the worker container, which dispatches a SIGTERM on stop. 94 def sig_handler(sig, _): 95 logging.warning('Job runner got signal %s, terminating job %s', sig, job_id) 96 subprocess.call(['sudo', 'docker', 'kill', container]) 97 os._exit(1) # sys.exit throws a SystemExit exception, _exit really exits. 98 99 signal.signal(signal.SIGTERM, sig_handler) 100 101 log_thd = threading.Thread(target=log_thread, args=(job_id, q)) 102 log_thd.start() 103 104 # SYS_PTRACE is required for gtest death tests and LSan. 105 cmd = [ 106 'sudo', 'docker', 'run', '--name', container, '--hostname', container, 107 '--cap-add', 'SYS_PTRACE', '--rm', '--env', 108 'PERFETTO_TEST_JOB=%s' % job_id, '--tmpfs', '/tmp:exec' 109 ] 110 111 # Propagate environment variables coming from the job config. 112 for kv in [kv for kv in os.environ.items() if kv[0].startswith('PERFETTO_')]: 113 cmd += ['--env', '%s=%s' % kv] 114 115 # We use the tmpfs mount created by gce-startup-script.sh, if present. The 116 # problem is that Docker doesn't allow to both override the tmpfs-size and 117 # prevent the "-o noexec". In turn the default tmpfs-size depends on the host 118 # phisical memory size. 119 if os.getenv('SANDBOX_TMP'): 120 cmd += ['-v', '%s:/ci/ramdisk' % os.getenv('SANDBOX_TMP')] 121 else: 122 cmd += ['--tmpfs', '/ci/ramdisk:exec'] 123 124 # Rationale for the conditional branches below: when running in the real GCE 125 # environment, the gce-startup-script.sh mounts these directories in the right 126 # locations, so that they are shared between all workers. 127 # When running the worker container outside of GCE (i.e.for local testing) we 128 # leave these empty. The VOLUME directive in the dockerfile will cause docker 129 # to automatically mount a scratch volume for those. 130 # This is so that the CI containers can be tested without having to do the 131 # work that gce-startup-script.sh does. 132 if os.getenv('SHARED_WORKER_CACHE'): 133 cmd += ['--volume=%s:/ci/cache' % os.getenv('SHARED_WORKER_CACHE')] 134 135 artifacts_dir = None 136 if os.getenv('ARTIFACTS_DIR'): 137 artifacts_dir = os.path.join(os.getenv('ARTIFACTS_DIR'), job_id) 138 subprocess.call(['sudo', 'rm', '-rf', artifacts_dir]) 139 os.mkdir(artifacts_dir) 140 cmd += ['--volume=%s:/ci/artifacts' % artifacts_dir] 141 142 cmd += os.getenv('SANDBOX_NETWORK_ARGS', '').split() 143 cmd += [SANDBOX_IMG] 144 145 logging.info('Starting %s', ' '.join(cmd)) 146 proc = subprocess.Popen( 147 cmd, 148 stdin=open(os.devnull), 149 stdout=subprocess.PIPE, 150 stderr=subprocess.STDOUT, 151 bufsize=65536) 152 stdout = '' 153 tstart = time.time() 154 while True: 155 ms_elapsed = int((time.time() - tstart) * 1000) 156 stdout += read_nonblock(proc.stdout) 157 158 # stdout/err pipes are not atomic w.r.t. '\n'. Extract whole lines out into 159 # |olines| and keep the last partial line (-1) in the |stdout| buffer. 160 lines = stdout.split('\n') 161 stdout = lines[-1] 162 lines = lines[:-1] 163 164 # Each line has a key of the form <time-from-start><out|err><counter> 165 # |counter| is relative to the batch and is only used to disambiguate lines 166 # fetched at the same time, preserving the ordering. 167 batch = {} 168 for counter, line in enumerate(lines): 169 batch['%06x-%04x' % (ms_elapsed, counter)] = line 170 if batch: 171 q.put(batch) 172 if proc.poll() is not None: 173 res = proc.returncode 174 logging.info('Job subprocess terminated with code %s', res) 175 break 176 177 # Large sleeps favour batching in the log uploader. 178 # Small sleeps favour responsiveness of the signal handler. 179 time.sleep(1) 180 181 q.put(None) # EOF maker 182 log_thd.join() 183 184 if artifacts_dir: 185 artifacts_uploader = os.path.join(CUR_DIR, 'artifacts_uploader.py') 186 cmd = ['setsid', artifacts_uploader, '--job-id=%s' % job_id, '--rm'] 187 subprocess.call(cmd) 188 189 return res 190 191 192if __name__ == '__main__': 193 sys.exit(main(sys.argv)) 194