1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/test/launcher/test_launcher.h"
6
7 #include <stdio.h>
8
9 #include <algorithm>
10 #include <map>
11 #include <random>
12 #include <string_view>
13 #include <unordered_map>
14 #include <unordered_set>
15 #include <utility>
16
17 #include "base/at_exit.h"
18 #include "base/clang_profiling_buildflags.h"
19 #include "base/command_line.h"
20 #include "base/containers/adapters.h"
21 #include "base/containers/contains.h"
22 #include "base/environment.h"
23 #include "base/files/file_enumerator.h"
24 #include "base/files/file_path.h"
25 #include "base/files/file_util.h"
26 #include "base/files/scoped_file.h"
27 #include "base/files/scoped_temp_dir.h"
28 #include "base/format_macros.h"
29 #include "base/functional/bind.h"
30 #include "base/hash/hash.h"
31 #include "base/lazy_instance.h"
32 #include "base/location.h"
33 #include "base/logging.h"
34 #include "base/memory/ptr_util.h"
35 #include "base/memory/raw_ptr.h"
36 #include "base/memory/raw_ref.h"
37 #include "base/numerics/safe_conversions.h"
38 #include "base/process/kill.h"
39 #include "base/process/launch.h"
40 #include "base/ranges/algorithm.h"
41 #include "base/run_loop.h"
42 #include "base/strings/pattern.h"
43 #include "base/strings/strcat.h"
44 #include "base/strings/string_number_conversions.h"
45 #include "base/strings/string_piece.h"
46 #include "base/strings/string_split.h"
47 #include "base/strings/string_util.h"
48 #include "base/strings/stringize_macros.h"
49 #include "base/strings/stringprintf.h"
50 #include "base/strings/utf_string_conversions.h"
51 #include "base/system/sys_info.h"
52 #include "base/task/post_job.h"
53 #include "base/task/single_thread_task_runner.h"
54 #include "base/task/thread_pool.h"
55 #include "base/task/thread_pool/thread_pool_instance.h"
56 #include "base/test/gtest_util.h"
57 #include "base/test/gtest_xml_util.h"
58 #include "base/test/launcher/test_launcher_tracer.h"
59 #include "base/test/launcher/test_results_tracker.h"
60 #include "base/test/scoped_logging_settings.h"
61 #include "base/test/test_file_util.h"
62 #include "base/test/test_switches.h"
63 #include "base/test/test_timeouts.h"
64 #include "base/threading/platform_thread.h"
65 #include "base/threading/thread_restrictions.h"
66 #include "base/time/time.h"
67 #include "build/build_config.h"
68 #include "build/chromeos_buildflags.h"
69 #include "testing/gtest/include/gtest/gtest.h"
70
71 #if BUILDFLAG(IS_POSIX)
72 #include <fcntl.h>
73
74 #include "base/files/file_descriptor_watcher_posix.h"
75 #endif
76
77 #if BUILDFLAG(IS_APPLE)
78 #include "base/apple/scoped_nsautorelease_pool.h"
79 #endif
80
81 #if BUILDFLAG(IS_WIN)
82 #include "base/strings/string_util_win.h"
83
84 #include <windows.h>
85
86 // To avoid conflicts with the macro from the Windows SDK...
87 #undef GetCommandLine
88 #endif
89
90 #if BUILDFLAG(IS_FUCHSIA)
91 #include <lib/fdio/namespace.h>
92 #include <lib/zx/job.h>
93 #include <lib/zx/time.h>
94 #include "base/atomic_sequence_num.h"
95 #include "base/fuchsia/default_job.h"
96 #include "base/fuchsia/file_utils.h"
97 #include "base/fuchsia/fuchsia_logging.h"
98 #endif
99
100 #if BUILDFLAG(IS_IOS)
101 #include "base/path_service.h"
102 #endif
103
104 namespace base {
105
106 // See
107 // https://groups.google.com/a/chromium.org/d/msg/chromium-dev/nkdTP7sstSc/uT3FaE_sgkAJ
108 using ::operator<<;
109
110 // The environment variable name for the total number of test shards.
111 const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
112 // The environment variable name for the test shard index.
113 const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
114
115 // Prefix indicating test has to run prior to the other test.
116 const char kPreTestPrefix[] = "PRE_";
117
118 // Prefix indicating test is disabled, will not run unless specified.
119 const char kDisabledTestPrefix[] = "DISABLED_";
120
ResultWatcher(FilePath result_file,size_t num_tests)121 ResultWatcher::ResultWatcher(FilePath result_file, size_t num_tests)
122 : result_file_(std::move(result_file)), num_tests_(num_tests) {}
123
PollUntilDone(TimeDelta timeout_per_test)124 bool ResultWatcher::PollUntilDone(TimeDelta timeout_per_test) {
125 CHECK(timeout_per_test.is_positive());
126 TimeTicks batch_deadline = TimeTicks::Now() + num_tests_ * timeout_per_test;
127 TimeDelta time_to_next_check = timeout_per_test;
128 do {
129 if (WaitWithTimeout(time_to_next_check)) {
130 return true;
131 }
132 time_to_next_check = PollOnce(timeout_per_test);
133 } while (TimeTicks::Now() < batch_deadline &&
134 time_to_next_check.is_positive());
135 // The process may have exited or is about to exit. Give the process a grace
136 // period to exit on its own.
137 return WaitWithTimeout(TestTimeouts::tiny_timeout());
138 }
139
PollOnce(TimeDelta timeout_per_test)140 TimeDelta ResultWatcher::PollOnce(TimeDelta timeout_per_test) {
141 std::vector<TestResult> test_results;
142 // If the result watcher is unlucky enough to read the results while the
143 // runner process is writing an update, it is possible to read an incomplete
144 // XML entry, in which case `ProcessGTestOutput` will return false.
145 if (!ProcessGTestOutput(result_file_, &test_results, nullptr)) {
146 return TestTimeouts::tiny_timeout();
147 }
148 Time latest_completion = LatestCompletionTimestamp(test_results);
149 // Didn't complete a single test before timeout, fail.
150 if (latest_completion.is_null()) {
151 return TimeDelta();
152 }
153 // The gtest result writer gets timestamps from `Time::Now`.
154 TimeDelta time_since_latest_completion = Time::Now() - latest_completion;
155 // This heuristic attempts to prevent unrelated clock changes between the
156 // latest write and read from being falsely identified as a test timeout.
157 // For example, daylight savings time starting or ending can add an
158 // artificial delta of +1 or -1 hour to `time_since_latest_completion`.
159 if (time_since_latest_completion.is_negative() ||
160 time_since_latest_completion > kDaylightSavingsThreshold) {
161 return timeout_per_test;
162 }
163 // Expect another test to complete no later than `timeout_per_test` after
164 // the latest completion.
165 return timeout_per_test - time_since_latest_completion;
166 }
167
LatestCompletionTimestamp(const std::vector<TestResult> & test_results)168 Time ResultWatcher::LatestCompletionTimestamp(
169 const std::vector<TestResult>& test_results) {
170 CHECK_LE(test_results.size(), num_tests_);
171 // Since the result file is append-only, timestamps should already be in
172 // ascending order.
173 for (const TestResult& result : Reversed(test_results)) {
174 if (result.completed()) {
175 Time test_start = result.timestamp.value_or(Time());
176 return test_start + result.elapsed_time;
177 }
178 }
179 return Time();
180 }
181
182 // Watch results generated by a child test process. Wait for the child process
183 // to exit between result checks.
184 class ProcessResultWatcher : public ResultWatcher {
185 public:
ProcessResultWatcher(FilePath result_file,size_t num_tests,Process & process)186 ProcessResultWatcher(FilePath result_file, size_t num_tests, Process& process)
187 : ResultWatcher(result_file, num_tests), process_(process) {}
188
189 // Get the exit code of the process, or -1 if the process has not exited yet.
190 int GetExitCode();
191
192 bool WaitWithTimeout(TimeDelta timeout) override;
193
194 private:
195 const raw_ref<Process> process_;
196 int exit_code_ = -1;
197 };
198
GetExitCode()199 int ProcessResultWatcher::GetExitCode() {
200 return exit_code_;
201 }
202
WaitWithTimeout(TimeDelta timeout)203 bool ProcessResultWatcher::WaitWithTimeout(TimeDelta timeout) {
204 return process_->WaitForExitWithTimeout(timeout, &exit_code_);
205 }
206
207 namespace {
208
209 // Global tag for test runs where the results are unreliable for any reason.
210 const char kUnreliableResultsTag[] = "UNRELIABLE_RESULTS";
211
212 // Maximum time of no output after which we print list of processes still
213 // running. This deliberately doesn't use TestTimeouts (which is otherwise
214 // a recommended solution), because they can be increased. This would defeat
215 // the purpose of this timeout, which is 1) to avoid buildbot "no output for
216 // X seconds" timeout killing the process 2) help communicate status of
217 // the test launcher to people looking at the output (no output for a long
218 // time is mysterious and gives no info about what is happening) 3) help
219 // debugging in case the process hangs anyway.
220 constexpr TimeDelta kOutputTimeout = Seconds(15);
221
222 // Limit of output snippet lines when printing to stdout.
223 // Avoids flooding the logs with amount of output that gums up
224 // the infrastructure.
225 const size_t kOutputSnippetLinesLimit = 5000;
226
227 // Limit of output snippet size. Exceeding this limit
228 // results in truncating the output and failing the test.
229 const size_t kOutputSnippetBytesLimit = 300 * 1024;
230
231 // Limit of seed values for gtest shuffling. Arbitrary, but based on
232 // gtest's similarly arbitrary choice.
233 const uint32_t kRandomSeedUpperBound = 100000;
234
235 // Set of live launch test processes with corresponding lock (it is allowed
236 // for callers to launch processes on different threads).
GetLiveProcessesLock()237 Lock* GetLiveProcessesLock() {
238 static auto* lock = new Lock;
239 return lock;
240 }
241
GetLiveProcesses()242 std::map<ProcessHandle, CommandLine>* GetLiveProcesses() {
243 static auto* map = new std::map<ProcessHandle, CommandLine>;
244 return map;
245 }
246
247 // Performance trace generator.
GetTestLauncherTracer()248 TestLauncherTracer* GetTestLauncherTracer() {
249 static auto* tracer = new TestLauncherTracer;
250 return tracer;
251 }
252
253 #if BUILDFLAG(IS_FUCHSIA)
WaitForJobExit(const zx::job & job)254 zx_status_t WaitForJobExit(const zx::job& job) {
255 zx::time deadline =
256 zx::deadline_after(zx::duration(kOutputTimeout.ToZxDuration()));
257 zx_signals_t to_wait_for = ZX_JOB_NO_JOBS | ZX_JOB_NO_PROCESSES;
258 while (to_wait_for) {
259 zx_signals_t observed = 0;
260 zx_status_t status = job.wait_one(to_wait_for, deadline, &observed);
261 if (status != ZX_OK)
262 return status;
263 to_wait_for &= ~observed;
264 }
265 return ZX_OK;
266 }
267 #endif // BUILDFLAG(IS_FUCHSIA)
268
269 #if BUILDFLAG(IS_POSIX)
270 // Self-pipe that makes it possible to do complex shutdown handling
271 // outside of the signal handler.
272 int g_shutdown_pipe[2] = { -1, -1 };
273
ShutdownPipeSignalHandler(int signal)274 void ShutdownPipeSignalHandler(int signal) {
275 HANDLE_EINTR(write(g_shutdown_pipe[1], "q", 1));
276 }
277
KillSpawnedTestProcesses()278 void KillSpawnedTestProcesses() {
279 // Keep the lock until exiting the process to prevent further processes
280 // from being spawned.
281 AutoLock lock(*GetLiveProcessesLock());
282
283 fprintf(stdout, "Sending SIGTERM to %zu child processes... ",
284 GetLiveProcesses()->size());
285 fflush(stdout);
286
287 for (const auto& pair : *GetLiveProcesses()) {
288 // Send the signal to entire process group.
289 kill((-1) * (pair.first), SIGTERM);
290 }
291
292 fprintf(stdout,
293 "done.\nGiving processes a chance to terminate cleanly... ");
294 fflush(stdout);
295
296 PlatformThread::Sleep(Milliseconds(500));
297
298 fprintf(stdout, "done.\n");
299 fflush(stdout);
300
301 fprintf(stdout, "Sending SIGKILL to %zu child processes... ",
302 GetLiveProcesses()->size());
303 fflush(stdout);
304
305 for (const auto& pair : *GetLiveProcesses()) {
306 // Send the signal to entire process group.
307 kill((-1) * (pair.first), SIGKILL);
308 }
309
310 fprintf(stdout, "done.\n");
311 fflush(stdout);
312 }
313 #endif // BUILDFLAG(IS_POSIX)
314
315 // Parses the environment variable var as an Int32. If it is unset, returns
316 // true. If it is set, unsets it then converts it to Int32 before
317 // returning it in |result|. Returns true on success.
TakeInt32FromEnvironment(const char * const var,int32_t * result)318 bool TakeInt32FromEnvironment(const char* const var, int32_t* result) {
319 std::unique_ptr<Environment> env(Environment::Create());
320 std::string str_val;
321
322 if (!env->GetVar(var, &str_val))
323 return true;
324
325 if (!env->UnSetVar(var)) {
326 LOG(ERROR) << "Invalid environment: we could not unset " << var << ".\n";
327 return false;
328 }
329
330 if (!StringToInt(str_val, result)) {
331 LOG(ERROR) << "Invalid environment: " << var << " is not an integer.\n";
332 return false;
333 }
334
335 return true;
336 }
337
338 // Unsets the environment variable |name| and returns true on success.
339 // Also returns true if the variable just doesn't exist.
UnsetEnvironmentVariableIfExists(const std::string & name)340 bool UnsetEnvironmentVariableIfExists(const std::string& name) {
341 std::unique_ptr<Environment> env(Environment::Create());
342 std::string str_val;
343 if (!env->GetVar(name, &str_val))
344 return true;
345 return env->UnSetVar(name);
346 }
347
348 // Returns true if bot mode has been requested, i.e. defaults optimized
349 // for continuous integration bots. This way developers don't have to remember
350 // special command-line flags.
BotModeEnabled(const CommandLine * command_line)351 bool BotModeEnabled(const CommandLine* command_line) {
352 std::unique_ptr<Environment> env(Environment::Create());
353 return command_line->HasSwitch(switches::kTestLauncherBotMode) ||
354 env->HasVar("CHROMIUM_TEST_LAUNCHER_BOT_MODE");
355 }
356
357 // Returns command line command line after gtest-specific processing
358 // and applying |wrapper|.
PrepareCommandLineForGTest(const CommandLine & command_line,const std::string & wrapper,const size_t retries_left)359 CommandLine PrepareCommandLineForGTest(const CommandLine& command_line,
360 const std::string& wrapper,
361 const size_t retries_left) {
362 CommandLine new_command_line(command_line.GetProgram());
363 CommandLine::SwitchMap switches = command_line.GetSwitches();
364
365 // Handled by the launcher process.
366 switches.erase(kGTestRepeatFlag);
367 switches.erase(kIsolatedScriptTestRepeatFlag);
368
369 // Don't try to write the final XML report in child processes.
370 switches.erase(kGTestOutputFlag);
371
372 #if BUILDFLAG(IS_IOS)
373 // We only need the xctest flag for the parent process. Passing it to
374 // child processes will cause the tests not to run, so remove it.
375 switches.erase(switches::kEnableRunIOSUnittestsWithXCTest);
376 #endif
377
378 if (switches.find(switches::kTestLauncherRetriesLeft) == switches.end()) {
379 switches[switches::kTestLauncherRetriesLeft] =
380 #if BUILDFLAG(IS_WIN)
381 base::NumberToWString(
382 #else
383 base::NumberToString(
384 #endif
385 retries_left);
386 }
387
388 for (CommandLine::SwitchMap::const_iterator iter = switches.begin();
389 iter != switches.end(); ++iter) {
390 new_command_line.AppendSwitchNative((*iter).first, (*iter).second);
391 }
392
393 // Prepend wrapper after last CommandLine quasi-copy operation. CommandLine
394 // does not really support removing switches well, and trying to do that
395 // on a CommandLine with a wrapper is known to break.
396 // TODO(phajdan.jr): Give it a try to support CommandLine removing switches.
397 #if BUILDFLAG(IS_WIN)
398 new_command_line.PrependWrapper(UTF8ToWide(wrapper));
399 #else
400 new_command_line.PrependWrapper(wrapper);
401 #endif
402
403 return new_command_line;
404 }
405
406 // Launches a child process using |command_line|. If a test is still running
407 // after |timeout|, the child process is terminated and |*was_timeout| is set to
408 // true. Returns exit code of the process.
LaunchChildTestProcessWithOptions(const CommandLine & command_line,const LaunchOptions & options,int flags,const FilePath & result_file,TimeDelta timeout_per_test,size_t num_tests,TestLauncherDelegate * delegate,bool * was_timeout)409 int LaunchChildTestProcessWithOptions(const CommandLine& command_line,
410 const LaunchOptions& options,
411 int flags,
412 const FilePath& result_file,
413 TimeDelta timeout_per_test,
414 size_t num_tests,
415 TestLauncherDelegate* delegate,
416 bool* was_timeout) {
417 #if BUILDFLAG(IS_POSIX)
418 // Make sure an option we rely on is present - see LaunchChildGTestProcess.
419 DCHECK(options.new_process_group);
420 #endif
421
422 LaunchOptions new_options(options);
423
424 #if BUILDFLAG(IS_WIN)
425 DCHECK(!new_options.job_handle);
426
427 win::ScopedHandle job_handle;
428 if (flags & TestLauncher::USE_JOB_OBJECTS) {
429 job_handle.Set(CreateJobObject(NULL, NULL));
430 if (!job_handle.is_valid()) {
431 LOG(ERROR) << "Could not create JobObject.";
432 return -1;
433 }
434
435 DWORD job_flags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
436
437 if (!SetJobObjectLimitFlags(job_handle.get(), job_flags)) {
438 LOG(ERROR) << "Could not SetJobObjectLimitFlags.";
439 return -1;
440 }
441
442 new_options.job_handle = job_handle.get();
443 }
444 #elif BUILDFLAG(IS_FUCHSIA)
445 DCHECK(!new_options.job_handle);
446
447 // Set the clone policy, deliberately omitting FDIO_SPAWN_CLONE_NAMESPACE so
448 // that we can install a different /data.
449 new_options.spawn_flags = FDIO_SPAWN_CLONE_STDIO | FDIO_SPAWN_CLONE_JOB;
450
451 const base::FilePath kDataPath(base::kPersistedDataDirectoryPath);
452 const base::FilePath kCachePath(base::kPersistedCacheDirectoryPath);
453
454 // Clone all namespace entries from the current process, except /data and
455 // /cache, which are overridden below.
456 fdio_flat_namespace_t* flat_namespace = nullptr;
457 zx_status_t result = fdio_ns_export_root(&flat_namespace);
458 ZX_CHECK(ZX_OK == result, result) << "fdio_ns_export_root";
459 for (size_t i = 0; i < flat_namespace->count; ++i) {
460 base::FilePath path(flat_namespace->path[i]);
461 if (path == kDataPath || path == kCachePath) {
462 result = zx_handle_close(flat_namespace->handle[i]);
463 ZX_CHECK(ZX_OK == result, result) << "zx_handle_close";
464 } else {
465 new_options.paths_to_transfer.push_back(
466 {path, flat_namespace->handle[i]});
467 }
468 }
469 free(flat_namespace);
470
471 zx::job job_handle;
472 result = zx::job::create(*GetDefaultJob(), 0, &job_handle);
473 ZX_CHECK(ZX_OK == result, result) << "zx_job_create";
474 new_options.job_handle = job_handle.get();
475
476 // Give this test its own isolated /data directory by creating a new temporary
477 // subdirectory under data (/data/test-$PID) and binding paths under that to
478 // /data and /cache in the child process.
479 // Persistent data storage is mapped to /cache rather than system-provided
480 // cache storage, to avoid unexpected purges (see crbug.com/1242170).
481 CHECK(base::PathExists(kDataPath));
482
483 // Create the test subdirectory with a name that is unique to the child test
484 // process (qualified by parent PID and an autoincrementing test process
485 // index).
486 static base::AtomicSequenceNumber child_launch_index;
487 const base::FilePath child_data_path = kDataPath.AppendASCII(
488 base::StringPrintf("test-%zu-%d", base::Process::Current().Pid(),
489 child_launch_index.GetNext()));
490 CHECK(!base::DirectoryExists(child_data_path));
491 CHECK(base::CreateDirectory(child_data_path));
492 DCHECK(base::DirectoryExists(child_data_path));
493
494 const base::FilePath test_data_dir(child_data_path.AppendASCII("data"));
495 CHECK(base::CreateDirectory(test_data_dir));
496 const base::FilePath test_cache_dir(child_data_path.AppendASCII("cache"));
497 CHECK(base::CreateDirectory(test_cache_dir));
498
499 // Transfer handles to the new directories as /data and /cache in the child
500 // process' namespace.
501 new_options.paths_to_transfer.push_back(
502 {kDataPath,
503 base::OpenDirectoryHandle(test_data_dir).TakeChannel().release()});
504 new_options.paths_to_transfer.push_back(
505 {kCachePath,
506 base::OpenDirectoryHandle(test_cache_dir).TakeChannel().release()});
507 #endif // BUILDFLAG(IS_FUCHSIA)
508
509 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
510 // To prevent accidental privilege sharing to an untrusted child, processes
511 // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
512 // new child will be privileged and trusted.
513 new_options.allow_new_privs = true;
514 #endif
515
516 Process process;
517
518 {
519 // Note how we grab the lock before the process possibly gets created.
520 // This ensures that when the lock is held, ALL the processes are registered
521 // in the set.
522 AutoLock lock(*GetLiveProcessesLock());
523
524 #if BUILDFLAG(IS_WIN)
525 // Allow the handle used to capture stdio and stdout to be inherited by the
526 // child. Note that this is done under GetLiveProcessesLock() to ensure that
527 // only the desired child receives the handle.
528 if (new_options.stdout_handle) {
529 ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT,
530 HANDLE_FLAG_INHERIT);
531 }
532 #endif
533
534 process = LaunchProcess(command_line, new_options);
535
536 #if BUILDFLAG(IS_WIN)
537 // Revoke inheritance so that the handle isn't leaked into other children.
538 // Note that this is done under GetLiveProcessesLock() to ensure that only
539 // the desired child receives the handle.
540 if (new_options.stdout_handle)
541 ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT, 0);
542 #endif
543
544 if (!process.IsValid())
545 return -1;
546
547 // TODO(rvargas) crbug.com/417532: Don't store process handles.
548 GetLiveProcesses()->insert(std::make_pair(process.Handle(), command_line));
549 }
550
551 int exit_code = 0;
552 bool did_exit = false;
553
554 {
555 base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
556 if (num_tests == 1) {
557 did_exit = process.WaitForExitWithTimeout(timeout_per_test, &exit_code);
558 } else {
559 ProcessResultWatcher result_watcher(result_file, num_tests, process);
560 did_exit = result_watcher.PollUntilDone(timeout_per_test);
561 exit_code = result_watcher.GetExitCode();
562 }
563 }
564
565 if (!did_exit) {
566 if (delegate)
567 delegate->OnTestTimedOut(command_line);
568
569 *was_timeout = true;
570 exit_code = -1; // Set a non-zero exit code to signal a failure.
571
572 {
573 base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
574 // Ensure that the process terminates.
575 process.Terminate(-1, true);
576 }
577 }
578
579 #if BUILDFLAG(IS_FUCHSIA)
580 zx_status_t wait_status = WaitForJobExit(job_handle);
581 if (wait_status != ZX_OK) {
582 LOG(ERROR) << "Batch leaked jobs or processes.";
583 exit_code = -1;
584 }
585 #endif // BUILDFLAG(IS_FUCHSIA)
586
587 {
588 // Note how we grab the log before issuing a possibly broad process kill.
589 // Other code parts that grab the log kill processes, so avoid trying
590 // to do that twice and trigger all kinds of log messages.
591 AutoLock lock(*GetLiveProcessesLock());
592
593 #if BUILDFLAG(IS_FUCHSIA)
594 zx_status_t status = job_handle.kill();
595 ZX_CHECK(status == ZX_OK, status);
596
597 // Cleanup the data directory.
598 CHECK(DeletePathRecursively(child_data_path));
599 #elif BUILDFLAG(IS_POSIX)
600 // It is not possible to waitpid() on any leaked sub-processes of the test
601 // batch process, since those are not direct children of this process.
602 // kill()ing the process-group will return a result indicating whether the
603 // group was found (i.e. processes were still running in it) or not (i.e.
604 // sub-processes had exited already). Unfortunately many tests (e.g. browser
605 // tests) have processes exit asynchronously, so checking the kill() result
606 // will report false failures.
607 // Unconditionally kill the process group, regardless of the batch exit-code
608 // until a better solution is available.
609 kill(-1 * process.Handle(), SIGKILL);
610 #endif // BUILDFLAG(IS_POSIX)
611
612 GetLiveProcesses()->erase(process.Handle());
613 }
614
615 return exit_code;
616 }
617
618 struct ChildProcessResults {
619 // Total time for DoLaunchChildTest Process to execute.
620 TimeDelta elapsed_time;
621 // If stdio is redirected, pass output file content.
622 std::string output_file_contents;
623 // True if child process timed out.
624 bool was_timeout = false;
625 // Exit code of child process.
626 int exit_code;
627 // Thread ID of the runner.
628 PlatformThreadId thread_id;
629 // The sequence number of the child test process executed.
630 // It's used instead of process id to distinguish processes that process id
631 // might be reused by OS.
632 int process_num;
633 };
634
635 // Returns the path to a temporary directory within |task_temp_dir| for the
636 // child process of index |child_index|, or an empty FilePath if per-child temp
637 // dirs are not supported.
CreateChildTempDirIfSupported(const FilePath & task_temp_dir,int child_index)638 FilePath CreateChildTempDirIfSupported(const FilePath& task_temp_dir,
639 int child_index) {
640 if (!TestLauncher::SupportsPerChildTempDirs())
641 return FilePath();
642 FilePath child_temp = task_temp_dir.AppendASCII(NumberToString(child_index));
643 CHECK(CreateDirectoryAndGetError(child_temp, nullptr));
644 return child_temp;
645 }
646
647 // Adds the platform-specific variable setting |temp_dir| as a process's
648 // temporary directory to |environment|.
SetTemporaryDirectory(const FilePath & temp_dir,EnvironmentMap * environment)649 void SetTemporaryDirectory(const FilePath& temp_dir,
650 EnvironmentMap* environment) {
651 #if BUILDFLAG(IS_WIN)
652 environment->emplace(L"TMP", temp_dir.value());
653 #elif BUILDFLAG(IS_APPLE)
654 environment->emplace("MAC_CHROMIUM_TMPDIR", temp_dir.value());
655 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
656 environment->emplace("TMPDIR", temp_dir.value());
657 #endif
658 }
659
660 // This launches the child test process, waits for it to complete,
661 // and returns child process results.
DoLaunchChildTestProcess(const CommandLine & command_line,const FilePath & process_temp_dir,const FilePath & result_file,TimeDelta timeout_per_test,size_t num_tests,const TestLauncher::LaunchOptions & test_launch_options,bool redirect_stdio,TestLauncherDelegate * delegate)662 ChildProcessResults DoLaunchChildTestProcess(
663 const CommandLine& command_line,
664 const FilePath& process_temp_dir,
665 const FilePath& result_file,
666 TimeDelta timeout_per_test,
667 size_t num_tests,
668 const TestLauncher::LaunchOptions& test_launch_options,
669 bool redirect_stdio,
670 TestLauncherDelegate* delegate) {
671 TimeTicks start_time = TimeTicks::Now();
672
673 ChildProcessResults result;
674 result.thread_id = PlatformThread::CurrentId();
675
676 ScopedFILE output_file;
677 FilePath output_filename;
678 if (redirect_stdio) {
679 output_file = CreateAndOpenTemporaryStream(&output_filename);
680 CHECK(output_file);
681 #if BUILDFLAG(IS_WIN)
682 // Paint the file so that it will be deleted when all handles are closed.
683 if (!FILEToFile(output_file.get()).DeleteOnClose(true)) {
684 PLOG(WARNING) << "Failed to mark " << output_filename.AsUTF8Unsafe()
685 << " for deletion on close";
686 }
687 #endif
688 }
689
690 LaunchOptions options;
691
692 // Tell the child process to use its designated temporary directory.
693 if (!process_temp_dir.empty())
694 SetTemporaryDirectory(process_temp_dir, &options.environment);
695 #if BUILDFLAG(IS_WIN)
696
697 options.inherit_mode = test_launch_options.inherit_mode;
698 options.handles_to_inherit = test_launch_options.handles_to_inherit;
699 if (redirect_stdio) {
700 HANDLE handle =
701 reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(output_file.get())));
702 CHECK_NE(INVALID_HANDLE_VALUE, handle);
703 options.stdin_handle = INVALID_HANDLE_VALUE;
704 options.stdout_handle = handle;
705 options.stderr_handle = handle;
706 // See LaunchOptions.stdout_handle comments for why this compares against
707 // FILE_TYPE_CHAR.
708 if (options.inherit_mode == base::LaunchOptions::Inherit::kSpecific &&
709 GetFileType(handle) != FILE_TYPE_CHAR) {
710 options.handles_to_inherit.push_back(handle);
711 }
712 }
713
714 #else // if !BUILDFLAG(IS_WIN)
715
716 options.fds_to_remap = test_launch_options.fds_to_remap;
717 if (redirect_stdio) {
718 int output_file_fd = fileno(output_file.get());
719 CHECK_LE(0, output_file_fd);
720 options.fds_to_remap.push_back(
721 std::make_pair(output_file_fd, STDOUT_FILENO));
722 options.fds_to_remap.push_back(
723 std::make_pair(output_file_fd, STDERR_FILENO));
724 }
725
726 #if !BUILDFLAG(IS_FUCHSIA)
727 options.new_process_group = true;
728 #endif
729 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
730 options.kill_on_parent_death = true;
731 #endif
732
733 #endif // !BUILDFLAG(IS_WIN)
734
735 result.exit_code = LaunchChildTestProcessWithOptions(
736 command_line, options, test_launch_options.flags, result_file,
737 timeout_per_test, num_tests, delegate, &result.was_timeout);
738
739 if (redirect_stdio) {
740 fflush(output_file.get());
741
742 // Reading the file can sometimes fail when the process was killed midflight
743 // (e.g. on test suite timeout): https://crbug.com/826408. Attempt to read
744 // the output file anyways, but do not crash on failure in this case.
745 CHECK(ReadStreamToString(output_file.get(), &result.output_file_contents) ||
746 result.exit_code != 0);
747
748 output_file.reset();
749 #if !BUILDFLAG(IS_WIN)
750 // On Windows, the reset() above is enough to delete the file since it was
751 // painted for such after being opened. Lesser platforms require an explicit
752 // delete now.
753 if (!DeleteFile(output_filename))
754 LOG(WARNING) << "Failed to delete " << output_filename.AsUTF8Unsafe();
755 #endif
756 }
757 result.elapsed_time = TimeTicks::Now() - start_time;
758 result.process_num = GetTestLauncherTracer()->RecordProcessExecution(
759 start_time, result.elapsed_time);
760 return result;
761 }
762
ExtractTestsFromFilter(const std::string & filter,bool double_colon_supported)763 std::vector<std::string> ExtractTestsFromFilter(const std::string& filter,
764 bool double_colon_supported) {
765 std::vector<std::string> tests;
766 if (double_colon_supported) {
767 tests =
768 SplitString(filter, "::", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
769 }
770 if (tests.size() <= 1) {
771 tests =
772 SplitString(filter, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
773 }
774 return tests;
775 }
776
777 // A test runner object to run tests across a number of sequence runners,
778 // and control running pre tests in sequence.
779 class TestRunner {
780 public:
TestRunner(TestLauncher * launcher,size_t max_workers=1u,size_t batch_size=1u)781 explicit TestRunner(TestLauncher* launcher,
782 size_t max_workers = 1u,
783 size_t batch_size = 1u)
784 : launcher_(launcher),
785 max_workers_(max_workers),
786 batch_size_(batch_size) {}
787
788 // Sets |test_names| to be run, with |batch_size| tests per process.
789 // Posts a job to run LaunchChildGTestProcess on |max_workers| workers.
790 void Run(const std::vector<std::string>& test_names);
791
792 private:
793 // Called to check if the next batch has to run on the same
794 // sequence task runner and using the same temporary directory.
IsPreTestBatch(const std::vector<std::string> & test_names)795 static bool IsPreTestBatch(const std::vector<std::string>& test_names) {
796 return test_names.size() == 1u &&
797 test_names.front().find(kPreTestPrefix) != std::string::npos;
798 }
799
IsSingleThreaded() const800 bool IsSingleThreaded() const { return batch_size_ == 0; }
801
802 void WorkerTask(scoped_refptr<TaskRunner> main_task_runner,
803 base::JobDelegate* delegate);
804
GetMaxConcurrency(size_t worker_count)805 size_t GetMaxConcurrency(size_t worker_count) {
806 AutoLock auto_lock(lock_);
807 if (IsSingleThreaded()) {
808 return tests_to_run_.empty() ? 0 : 1;
809 }
810
811 // Round up the division to ensure enough workers for all tests.
812 return std::min((tests_to_run_.size() + batch_size_ - 1) / batch_size_,
813 max_workers_);
814 }
815
GetNextBatch()816 std::vector<std::string> GetNextBatch() EXCLUSIVE_LOCKS_REQUIRED(lock_) {
817 size_t batch_size;
818 // Single threaded case runs all tests in one batch.
819 if (IsSingleThreaded()) {
820 batch_size = tests_to_run_.size();
821 }
822 // Run remaining tests up to |batch_size_|.
823 else {
824 batch_size = std::min(batch_size_, tests_to_run_.size());
825 }
826 std::vector<std::string> batch(tests_to_run_.rbegin(),
827 tests_to_run_.rbegin() + batch_size);
828 tests_to_run_.erase(tests_to_run_.end() - batch_size, tests_to_run_.end());
829 return batch;
830 }
831
832 // Cleans up |task_temp_dir| from a previous task and quits |run_loop| if
833 // |done|.
834 void CleanupTask(base::ScopedTempDir task_temp_dir, bool done);
835
836 ThreadChecker thread_checker_;
837
838 const raw_ptr<TestLauncher> launcher_;
839 JobHandle job_handle_;
840 // Max number of workers to use.
841 const size_t max_workers_;
842 // Number of tests per process, 0 is special case for all tests.
843 const size_t batch_size_;
844 RunLoop run_loop_;
845 // Protects member used concurrently by worker tasks.
846 base::Lock lock_;
847 std::vector<std::string> tests_to_run_ GUARDED_BY(lock_);
848
849 base::WeakPtrFactory<TestRunner> weak_ptr_factory_{this};
850 };
851
Run(const std::vector<std::string> & test_names)852 void TestRunner::Run(const std::vector<std::string>& test_names) {
853 DCHECK(thread_checker_.CalledOnValidThread());
854 // No workers, fail immediately.
855 CHECK_GT(max_workers_, 0u);
856 if (test_names.empty()) {
857 return;
858 }
859
860 {
861 AutoLock auto_lock(lock_);
862 tests_to_run_ = test_names;
863 // Reverse test order to avoid copying the whole vector when removing tests.
864 std::reverse(tests_to_run_.begin(), tests_to_run_.end());
865 }
866
867 job_handle_ = base::PostJob(
868 FROM_HERE, {TaskPriority::USER_BLOCKING, MayBlock()},
869 BindRepeating(&TestRunner::WorkerTask, Unretained(this),
870 SingleThreadTaskRunner::GetCurrentDefault()),
871 BindRepeating(&TestRunner::GetMaxConcurrency, Unretained(this)));
872
873 run_loop_.Run();
874 }
875
WorkerTask(scoped_refptr<TaskRunner> main_task_runner,base::JobDelegate * delegate)876 void TestRunner::WorkerTask(scoped_refptr<TaskRunner> main_task_runner,
877 base::JobDelegate* delegate) {
878 bool done = false;
879 while (!done && !delegate->ShouldYield()) {
880 // Create a temporary directory for this task. This directory will hold the
881 // flags and results files for the child processes as well as their User
882 // Data dir, where appropriate. For platforms that support per-child temp
883 // dirs, this directory will also contain one subdirectory per child for
884 // that child's process-wide temp dir.
885 base::ScopedTempDir task_temp_dir;
886 CHECK(task_temp_dir.CreateUniqueTempDirUnderPath(GetTempDirForTesting()));
887 int child_index = 0;
888
889 std::vector<std::vector<std::string>> batches;
890 {
891 AutoLock auto_lock(lock_);
892 if (!tests_to_run_.empty()) {
893 batches.push_back(GetNextBatch());
894 while (IsPreTestBatch(batches.back())) {
895 DCHECK(!tests_to_run_.empty());
896 batches.push_back(GetNextBatch());
897 }
898 }
899 done = tests_to_run_.empty();
900 }
901 for (const auto& batch : batches) {
902 launcher_->LaunchChildGTestProcess(
903 main_task_runner, batch, task_temp_dir.GetPath(),
904 CreateChildTempDirIfSupported(task_temp_dir.GetPath(),
905 child_index++));
906 }
907
908 // Cleaning up test results is scheduled to |main_task_runner| because it
909 // must happen after all post processing step that was scheduled in
910 // LaunchChildGTestProcess to |main_task_runner|.
911 main_task_runner->PostTask(
912 FROM_HERE,
913 BindOnce(&TestRunner::CleanupTask, weak_ptr_factory_.GetWeakPtr(),
914 std::move(task_temp_dir), done));
915 }
916 }
917
CleanupTask(base::ScopedTempDir task_temp_dir,bool done)918 void TestRunner::CleanupTask(base::ScopedTempDir task_temp_dir, bool done) {
919 DCHECK(thread_checker_.CalledOnValidThread());
920
921 // delete previous temporary directory
922 if (!task_temp_dir.Delete()) {
923 // This needs to be non-fatal at least for Windows.
924 LOG(WARNING) << "Failed to delete "
925 << task_temp_dir.GetPath().AsUTF8Unsafe();
926 }
927
928 if (!done) {
929 return;
930 }
931
932 if (job_handle_) {
933 job_handle_.Cancel();
934 run_loop_.QuitWhenIdle();
935 }
936 }
937
938 // Returns the number of files and directories in |dir|, or 0 if |dir| is empty.
CountItemsInDirectory(const FilePath & dir)939 int CountItemsInDirectory(const FilePath& dir) {
940 if (dir.empty())
941 return 0;
942 int items = 0;
943 FileEnumerator file_enumerator(
944 dir, /*recursive=*/false,
945 FileEnumerator::FILES | FileEnumerator::DIRECTORIES);
946 for (FilePath name = file_enumerator.Next(); !name.empty();
947 name = file_enumerator.Next()) {
948 ++items;
949 }
950 return items;
951 }
952
953 // Truncates a snippet in the middle to the given byte limit. byte_limit should
954 // be at least 30.
TruncateSnippet(const base::StringPiece snippet,size_t byte_limit)955 std::string TruncateSnippet(const base::StringPiece snippet,
956 size_t byte_limit) {
957 if (snippet.length() <= byte_limit) {
958 return std::string(snippet);
959 }
960 std::string truncation_message =
961 StringPrintf("\n<truncated (%zu bytes)>\n", snippet.length());
962 if (truncation_message.length() > byte_limit) {
963 // Fail gracefully.
964 return truncation_message;
965 }
966 size_t remaining_limit = byte_limit - truncation_message.length();
967 size_t first_half = remaining_limit / 2;
968 return base::StrCat(
969 {snippet.substr(0, first_half), truncation_message,
970 snippet.substr(snippet.length() - (remaining_limit - first_half))});
971 }
972
973 } // namespace
974
975 const char kGTestBreakOnFailure[] = "gtest_break_on_failure";
976 const char kGTestFilterFlag[] = "gtest_filter";
977 const char kGTestFlagfileFlag[] = "gtest_flagfile";
978 const char kGTestHelpFlag[] = "gtest_help";
979 const char kGTestListTestsFlag[] = "gtest_list_tests";
980 const char kGTestRepeatFlag[] = "gtest_repeat";
981 const char kGTestRunDisabledTestsFlag[] = "gtest_also_run_disabled_tests";
982 const char kGTestOutputFlag[] = "gtest_output";
983 const char kGTestShuffleFlag[] = "gtest_shuffle";
984 const char kGTestRandomSeedFlag[] = "gtest_random_seed";
985 const char kIsolatedScriptRunDisabledTestsFlag[] =
986 "isolated-script-test-also-run-disabled-tests";
987 const char kIsolatedScriptTestFilterFlag[] = "isolated-script-test-filter";
988 const char kIsolatedScriptTestRepeatFlag[] = "isolated-script-test-repeat";
989
990 class TestLauncher::TestInfo {
991 public:
992 TestInfo() = default;
993 TestInfo(const TestInfo& other) = default;
994 TestInfo(const TestIdentifier& test_id);
995 ~TestInfo() = default;
996
997 // Returns test name excluding DISABLE_ prefix.
998 std::string GetDisabledStrippedName() const;
999
1000 // Returns full test name.
1001 std::string GetFullName() const;
1002
1003 // Returns test name with PRE_ prefix added, excluding DISABLE_ prefix.
1004 std::string GetPreName() const;
1005
1006 // Returns test name excluding DISABLED_ and PRE_ prefixes.
1007 std::string GetPrefixStrippedName() const;
1008
test_case_name() const1009 const std::string& test_case_name() const { return test_case_name_; }
test_name() const1010 const std::string& test_name() const { return test_name_; }
file() const1011 const std::string& file() const { return file_; }
line() const1012 int line() const { return line_; }
disabled() const1013 bool disabled() const { return disabled_; }
pre_test() const1014 bool pre_test() const { return pre_test_; }
1015
1016 private:
1017 std::string test_case_name_;
1018 std::string test_name_;
1019 std::string file_;
1020 int line_;
1021 bool disabled_;
1022 bool pre_test_;
1023 };
1024
TestInfo(const TestIdentifier & test_id)1025 TestLauncher::TestInfo::TestInfo(const TestIdentifier& test_id)
1026 : test_case_name_(test_id.test_case_name),
1027 test_name_(test_id.test_name),
1028 file_(test_id.file),
1029 line_(test_id.line),
1030 disabled_(false),
1031 pre_test_(false) {
1032 disabled_ = GetFullName().find(kDisabledTestPrefix) != std::string::npos;
1033 pre_test_ = test_name_.find(kPreTestPrefix) != std::string::npos;
1034 }
1035
GetDisabledStrippedName() const1036 std::string TestLauncher::TestInfo::GetDisabledStrippedName() const {
1037 std::string test_name = GetFullName();
1038 ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
1039 std::string());
1040 return test_name;
1041 }
1042
GetFullName() const1043 std::string TestLauncher::TestInfo::GetFullName() const {
1044 return FormatFullTestName(test_case_name_, test_name_);
1045 }
1046
GetPreName() const1047 std::string TestLauncher::TestInfo::GetPreName() const {
1048 std::string name = test_name_;
1049 ReplaceSubstringsAfterOffset(&name, 0, kDisabledTestPrefix, std::string());
1050 std::string case_name = test_case_name_;
1051 ReplaceSubstringsAfterOffset(&case_name, 0, kDisabledTestPrefix,
1052 std::string());
1053 return FormatFullTestName(case_name, kPreTestPrefix + name);
1054 }
1055
GetPrefixStrippedName() const1056 std::string TestLauncher::TestInfo::GetPrefixStrippedName() const {
1057 std::string test_name = GetDisabledStrippedName();
1058 ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
1059 return test_name;
1060 }
1061
1062 TestLauncherDelegate::~TestLauncherDelegate() = default;
1063
ShouldRunTest(const TestIdentifier & test)1064 bool TestLauncherDelegate::ShouldRunTest(const TestIdentifier& test) {
1065 return true;
1066 }
1067
1068 TestLauncher::LaunchOptions::LaunchOptions() = default;
1069 TestLauncher::LaunchOptions::LaunchOptions(const LaunchOptions& other) =
1070 default;
1071 TestLauncher::LaunchOptions::~LaunchOptions() = default;
1072
TestLauncher(TestLauncherDelegate * launcher_delegate,size_t parallel_jobs,size_t retry_limit)1073 TestLauncher::TestLauncher(TestLauncherDelegate* launcher_delegate,
1074 size_t parallel_jobs,
1075 size_t retry_limit)
1076 : launcher_delegate_(launcher_delegate),
1077 total_shards_(1),
1078 shard_index_(0),
1079 cycles_(1),
1080 broken_threshold_(0),
1081 test_started_count_(0),
1082 test_finished_count_(0),
1083 test_success_count_(0),
1084 test_broken_count_(0),
1085 retries_left_(0),
1086 retry_limit_(retry_limit),
1087 output_bytes_limit_(kOutputSnippetBytesLimit),
1088 force_run_broken_tests_(false),
1089 watchdog_timer_(FROM_HERE,
1090 kOutputTimeout,
1091 this,
1092 &TestLauncher::OnOutputTimeout),
1093 parallel_jobs_(parallel_jobs),
1094 print_test_stdio_(AUTO) {}
1095
~TestLauncher()1096 TestLauncher::~TestLauncher() {
1097 if (base::ThreadPoolInstance::Get()) {
1098 // Clear the ThreadPoolInstance entirely to make it clear to final cleanup
1099 // phases that they are happening in a single-threaded phase. Assertions in
1100 // code like ~ScopedFeatureList are unhappy otherwise (crbug.com/1359095).
1101 base::ThreadPoolInstance::Get()->Shutdown();
1102 base::ThreadPoolInstance::Get()->JoinForTesting();
1103 base::ThreadPoolInstance::Set(nullptr);
1104 }
1105 }
1106
Run(CommandLine * command_line)1107 bool TestLauncher::Run(CommandLine* command_line) {
1108 base::PlatformThread::SetName("TestLauncherMain");
1109
1110 if (!Init((command_line == nullptr) ? CommandLine::ForCurrentProcess()
1111 : command_line))
1112 return false;
1113
1114 #if BUILDFLAG(IS_POSIX)
1115 CHECK_EQ(0, pipe(g_shutdown_pipe));
1116
1117 struct sigaction action;
1118 memset(&action, 0, sizeof(action));
1119 sigemptyset(&action.sa_mask);
1120 action.sa_handler = &ShutdownPipeSignalHandler;
1121
1122 CHECK_EQ(0, sigaction(SIGINT, &action, nullptr));
1123 CHECK_EQ(0, sigaction(SIGQUIT, &action, nullptr));
1124 CHECK_EQ(0, sigaction(SIGTERM, &action, nullptr));
1125
1126 auto controller = base::FileDescriptorWatcher::WatchReadable(
1127 g_shutdown_pipe[0],
1128 base::BindRepeating(&TestLauncher::OnShutdownPipeReadable,
1129 Unretained(this)));
1130 #endif // BUILDFLAG(IS_POSIX)
1131
1132 // Start the watchdog timer.
1133 watchdog_timer_.Reset();
1134
1135 // Indicate a test did not succeed.
1136 bool test_failed = false;
1137 int iterations = cycles_;
1138 if (cycles_ > 1 && !stop_on_failure_) {
1139 // If we don't stop on failure, execute all the repeats in all iteration,
1140 // which allows us to parallelize the execution.
1141 iterations = 1;
1142 repeats_per_iteration_ = cycles_;
1143 }
1144 // Set to false if any iteration fails.
1145 bool run_result = true;
1146
1147 while ((iterations > 0 || iterations == -1) &&
1148 !(stop_on_failure_ && test_failed)) {
1149 OnTestIterationStart();
1150
1151 RunTests();
1152 bool retry_result = RunRetryTests();
1153 // Signal failure, but continue to run all requested test iterations.
1154 // With the summary of all iterations at the end this is a good default.
1155 run_result = run_result && retry_result;
1156
1157 if (retry_result) {
1158 fprintf(stdout, "SUCCESS: all tests passed.\n");
1159 fflush(stdout);
1160 }
1161
1162 test_failed = test_success_count_ != test_finished_count_;
1163 OnTestIterationFinished();
1164 // Special value "-1" means "repeat indefinitely".
1165 iterations = (iterations == -1) ? iterations : iterations - 1;
1166 }
1167
1168 if (cycles_ != 1)
1169 results_tracker_.PrintSummaryOfAllIterations();
1170
1171 MaybeSaveSummaryAsJSON(std::vector<std::string>());
1172
1173 return run_result;
1174 }
1175
LaunchChildGTestProcess(scoped_refptr<TaskRunner> task_runner,const std::vector<std::string> & test_names,const FilePath & task_temp_dir,const FilePath & child_temp_dir)1176 void TestLauncher::LaunchChildGTestProcess(
1177 scoped_refptr<TaskRunner> task_runner,
1178 const std::vector<std::string>& test_names,
1179 const FilePath& task_temp_dir,
1180 const FilePath& child_temp_dir) {
1181 FilePath result_file;
1182 CommandLine cmd_line = launcher_delegate_->GetCommandLine(
1183 test_names, task_temp_dir, &result_file);
1184
1185 // Record the exact command line used to launch the child.
1186 CommandLine new_command_line(PrepareCommandLineForGTest(
1187 cmd_line, launcher_delegate_->GetWrapper(), retries_left_));
1188 LaunchOptions options;
1189 options.flags = launcher_delegate_->GetLaunchOptions();
1190
1191 if (BotModeEnabled(CommandLine::ForCurrentProcess())) {
1192 LOG(INFO) << "Starting [" << base::JoinString(test_names, ", ") << "]";
1193 }
1194
1195 ChildProcessResults process_results = DoLaunchChildTestProcess(
1196 new_command_line, child_temp_dir, result_file,
1197 launcher_delegate_->GetTimeout(), test_names.size(), options,
1198 redirect_stdio_, launcher_delegate_);
1199
1200 // Invoke ProcessTestResults on the original thread, not
1201 // on a worker pool thread.
1202 task_runner->PostTask(
1203 FROM_HERE,
1204 BindOnce(&TestLauncher::ProcessTestResults, Unretained(this), test_names,
1205 result_file, process_results.output_file_contents,
1206 process_results.elapsed_time, process_results.exit_code,
1207 process_results.was_timeout, process_results.thread_id,
1208 process_results.process_num,
1209 CountItemsInDirectory(child_temp_dir)));
1210 }
1211
1212 // Determines which result status will be assigned for missing test results.
MissingResultStatus(size_t tests_to_run_count,bool was_timeout,bool exit_code)1213 TestResult::Status MissingResultStatus(size_t tests_to_run_count,
1214 bool was_timeout,
1215 bool exit_code) {
1216 // There is more than one test, cannot assess status.
1217 if (tests_to_run_count > 1u)
1218 return TestResult::TEST_SKIPPED;
1219
1220 // There is only one test and no results.
1221 // Try to determine status by timeout or exit code.
1222 if (was_timeout)
1223 return TestResult::TEST_TIMEOUT;
1224 if (exit_code != 0)
1225 return TestResult::TEST_FAILURE;
1226
1227 // It's strange case when test executed successfully,
1228 // but we failed to read machine-readable report for it.
1229 return TestResult::TEST_UNKNOWN;
1230 }
1231
1232 // Returns interpreted test results.
ProcessTestResults(const std::vector<std::string> & test_names,const FilePath & result_file,const std::string & output,TimeDelta elapsed_time,int exit_code,bool was_timeout,PlatformThreadId thread_id,int process_num,int leaked_items)1233 void TestLauncher::ProcessTestResults(
1234 const std::vector<std::string>& test_names,
1235 const FilePath& result_file,
1236 const std::string& output,
1237 TimeDelta elapsed_time,
1238 int exit_code,
1239 bool was_timeout,
1240 PlatformThreadId thread_id,
1241 int process_num,
1242 int leaked_items) {
1243 std::vector<TestResult> test_results;
1244 bool crashed = false;
1245 bool have_test_results =
1246 ProcessGTestOutput(result_file, &test_results, &crashed);
1247
1248 if (!have_test_results) {
1249 // We do not have reliable details about test results (parsing test
1250 // stdout is known to be unreliable).
1251 LOG(ERROR) << "Failed to get out-of-band test success data, "
1252 "dumping full stdio below:\n"
1253 << output << "\n";
1254 // This is odd, but sometimes ProcessGtestOutput returns
1255 // false, but TestResults is not empty.
1256 test_results.clear();
1257 }
1258
1259 TestResult::Status missing_result_status =
1260 MissingResultStatus(test_names.size(), was_timeout, exit_code);
1261
1262 // TODO(phajdan.jr): Check for duplicates and mismatches between
1263 // the results we got from XML file and tests we intended to run.
1264 std::map<std::string, TestResult> results_map;
1265 for (const auto& i : test_results)
1266 results_map[i.full_name] = i;
1267
1268 // Results to be reported back to the test launcher.
1269 std::vector<TestResult> final_results;
1270
1271 for (const auto& i : test_names) {
1272 if (Contains(results_map, i)) {
1273 TestResult test_result = results_map[i];
1274 // Fix up the test status: we forcibly kill the child process
1275 // after the timeout, so from XML results it looks just like
1276 // a crash.
1277 if ((was_timeout && test_result.status == TestResult::TEST_CRASH) ||
1278 // If we run multiple tests in a batch with a timeout applied
1279 // to the entire batch. It is possible that with other tests
1280 // running quickly some tests take longer than the per-test timeout.
1281 // For consistent handling of tests independent of order and other
1282 // factors, mark them as timing out.
1283 test_result.elapsed_time > launcher_delegate_->GetTimeout()) {
1284 test_result.status = TestResult::TEST_TIMEOUT;
1285 }
1286 final_results.push_back(test_result);
1287 } else {
1288 // TODO(phajdan.jr): Explicitly pass the info that the test didn't
1289 // run for a mysterious reason.
1290 LOG(ERROR) << "no test result for " << i;
1291 TestResult test_result;
1292 test_result.full_name = i;
1293 test_result.status = missing_result_status;
1294 final_results.push_back(test_result);
1295 }
1296 }
1297 // TODO(phajdan.jr): Handle the case where processing XML output
1298 // indicates a crash but none of the test results is marked as crashing.
1299
1300 bool has_non_success_test = false;
1301 for (const auto& i : final_results) {
1302 if (i.status != TestResult::TEST_SUCCESS) {
1303 has_non_success_test = true;
1304 break;
1305 }
1306 }
1307
1308 if (!has_non_success_test && exit_code != 0) {
1309 // This is a bit surprising case: all tests are marked as successful,
1310 // but the exit code was not zero. This can happen e.g. under memory
1311 // tools that report leaks this way. Mark all tests as a failure on exit,
1312 // and for more precise info they'd need to be retried serially.
1313 for (auto& i : final_results)
1314 i.status = TestResult::TEST_FAILURE_ON_EXIT;
1315 }
1316
1317 for (auto& i : final_results) {
1318 // Fix the output snippet after possible changes to the test result.
1319 i.output_snippet = GetTestOutputSnippet(i, output);
1320 // The thread id injected here is the worker thread that launching the child
1321 // testing process, it might be different from the current thread that
1322 // ProcessTestResults.
1323 i.thread_id = thread_id;
1324 i.process_num = process_num;
1325 }
1326
1327 if (leaked_items)
1328 results_tracker_.AddLeakedItems(leaked_items, test_names);
1329
1330 launcher_delegate_->ProcessTestResults(final_results, elapsed_time);
1331
1332 for (const auto& result : final_results)
1333 OnTestFinished(result);
1334 }
1335
OnTestFinished(const TestResult & original_result)1336 void TestLauncher::OnTestFinished(const TestResult& original_result) {
1337 ++test_finished_count_;
1338
1339 TestResult result(original_result);
1340
1341 if (result.output_snippet.length() > output_bytes_limit_) {
1342 if (result.status == TestResult::TEST_SUCCESS)
1343 result.status = TestResult::TEST_EXCESSIVE_OUTPUT;
1344
1345 result.output_snippet =
1346 TruncateSnippetFocused(result.output_snippet, output_bytes_limit_);
1347 }
1348
1349 bool print_snippet = false;
1350 if (print_test_stdio_ == AUTO) {
1351 print_snippet = (result.status != TestResult::TEST_SUCCESS);
1352 } else if (print_test_stdio_ == ALWAYS) {
1353 print_snippet = true;
1354 } else if (print_test_stdio_ == NEVER) {
1355 print_snippet = false;
1356 }
1357 if (print_snippet) {
1358 std::vector<base::StringPiece> snippet_lines =
1359 SplitStringPiece(result.output_snippet, "\n", base::KEEP_WHITESPACE,
1360 base::SPLIT_WANT_ALL);
1361 if (snippet_lines.size() > kOutputSnippetLinesLimit) {
1362 size_t truncated_size = snippet_lines.size() - kOutputSnippetLinesLimit;
1363 snippet_lines.erase(
1364 snippet_lines.begin(),
1365 snippet_lines.begin() + truncated_size);
1366 snippet_lines.insert(snippet_lines.begin(), "<truncated>");
1367 }
1368 fprintf(stdout, "%s", base::JoinString(snippet_lines, "\n").c_str());
1369 fflush(stdout);
1370 }
1371
1372 if (result.status == TestResult::TEST_SUCCESS) {
1373 ++test_success_count_;
1374 } else {
1375 // Records prefix stripped name to run all dependent tests.
1376 std::string test_name(result.full_name);
1377 ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
1378 ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
1379 std::string());
1380 tests_to_retry_.insert(test_name);
1381 }
1382
1383 // There are no results for this tests,
1384 // most likley due to another test failing in the same batch.
1385 if (result.status != TestResult::TEST_SKIPPED)
1386 results_tracker_.AddTestResult(result);
1387
1388 // TODO(phajdan.jr): Align counter (padding).
1389 std::string status_line(StringPrintf("[%zu/%zu] %s ", test_finished_count_,
1390 test_started_count_,
1391 result.full_name.c_str()));
1392 if (result.completed()) {
1393 status_line.append(StringPrintf("(%" PRId64 " ms)",
1394 result.elapsed_time.InMilliseconds()));
1395 } else if (result.status == TestResult::TEST_TIMEOUT) {
1396 status_line.append("(TIMED OUT)");
1397 } else if (result.status == TestResult::TEST_CRASH) {
1398 status_line.append("(CRASHED)");
1399 } else if (result.status == TestResult::TEST_SKIPPED) {
1400 status_line.append("(SKIPPED)");
1401 } else if (result.status == TestResult::TEST_UNKNOWN) {
1402 status_line.append("(UNKNOWN)");
1403 } else {
1404 // Fail very loudly so it's not ignored.
1405 CHECK(false) << "Unhandled test result status: " << result.status;
1406 }
1407 fprintf(stdout, "%s\n", status_line.c_str());
1408 fflush(stdout);
1409
1410 if (CommandLine::ForCurrentProcess()->HasSwitch(
1411 switches::kTestLauncherPrintTimestamps)) {
1412 ::logging::ScopedLoggingSettings scoped_logging_setting;
1413 ::logging::SetLogItems(true, true, true, true);
1414 LOG(INFO) << "Test_finished_timestamp";
1415 }
1416 // We just printed a status line, reset the watchdog timer.
1417 watchdog_timer_.Reset();
1418
1419 // Do not waste time on timeouts.
1420 if (result.status == TestResult::TEST_TIMEOUT) {
1421 test_broken_count_++;
1422 }
1423 if (!force_run_broken_tests_ && test_broken_count_ >= broken_threshold_) {
1424 fprintf(stdout, "Too many badly broken tests (%zu), exiting now.\n",
1425 test_broken_count_);
1426 fflush(stdout);
1427
1428 #if BUILDFLAG(IS_POSIX)
1429 KillSpawnedTestProcesses();
1430 #endif // BUILDFLAG(IS_POSIX)
1431
1432 MaybeSaveSummaryAsJSON({"BROKEN_TEST_EARLY_EXIT"});
1433
1434 exit(1);
1435 }
1436 }
1437
1438 // Helper used to parse test filter files. Syntax is documented in
1439 // //testing/buildbot/filters/README.md .
LoadFilterFile(const FilePath & file_path,std::vector<std::string> * positive_filter,std::vector<std::string> * negative_filter)1440 bool LoadFilterFile(const FilePath& file_path,
1441 std::vector<std::string>* positive_filter,
1442 std::vector<std::string>* negative_filter) {
1443 std::string file_content;
1444 if (!ReadFileToString(file_path, &file_content)) {
1445 LOG(ERROR) << "Failed to read the filter file.";
1446 return false;
1447 }
1448
1449 std::vector<std::string> filter_lines = SplitString(
1450 file_content, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
1451 int line_num = 0;
1452 for (const std::string& filter_line : filter_lines) {
1453 line_num++;
1454
1455 size_t hash_pos = filter_line.find('#');
1456
1457 // In case when # symbol is not in the beginning of the line and is not
1458 // proceeded with a space then it's likely that the comment was
1459 // unintentional.
1460 if (hash_pos != std::string::npos && hash_pos > 0 &&
1461 filter_line[hash_pos - 1] != ' ') {
1462 LOG(WARNING) << "Content of line " << line_num << " in " << file_path
1463 << " after # is treated as a comment, " << filter_line;
1464 }
1465
1466 // Strip comments and whitespace from each line.
1467 std::string trimmed_line(
1468 TrimWhitespaceASCII(filter_line.substr(0, hash_pos), TRIM_ALL));
1469
1470 if (trimmed_line.substr(0, 2) == "//") {
1471 LOG(ERROR) << "Line " << line_num << " in " << file_path
1472 << " starts with //, use # for comments.";
1473 return false;
1474 }
1475
1476 // Treat a line starting with '//' as a comment.
1477 if (trimmed_line.empty())
1478 continue;
1479
1480 if (trimmed_line[0] == '-')
1481 negative_filter->push_back(trimmed_line.substr(1));
1482 else
1483 positive_filter->push_back(trimmed_line);
1484 }
1485
1486 return true;
1487 }
1488
IsOnlyExactPositiveFilterFromFile(const CommandLine * command_line) const1489 bool TestLauncher::IsOnlyExactPositiveFilterFromFile(
1490 const CommandLine* command_line) const {
1491 if (command_line->HasSwitch(kGTestFilterFlag)) {
1492 LOG(ERROR) << "Found " << switches::kTestLauncherFilterFile;
1493 return false;
1494 }
1495 if (!negative_test_filter_.empty()) {
1496 LOG(ERROR) << "Found negative filters in the filter file.";
1497 return false;
1498 }
1499 for (const auto& filter : positive_test_filter_) {
1500 if (Contains(filter, '*')) {
1501 LOG(ERROR) << "Found wildcard positive filters in the filter file.";
1502 return false;
1503 }
1504 }
1505 return true;
1506 }
1507
Init(CommandLine * command_line)1508 bool TestLauncher::Init(CommandLine* command_line) {
1509 // Initialize sharding. Command line takes precedence over legacy environment
1510 // variables.
1511 if (command_line->HasSwitch(switches::kTestLauncherTotalShards) &&
1512 command_line->HasSwitch(switches::kTestLauncherShardIndex)) {
1513 if (!StringToInt(
1514 command_line->GetSwitchValueASCII(
1515 switches::kTestLauncherTotalShards),
1516 &total_shards_)) {
1517 LOG(ERROR) << "Invalid value for " << switches::kTestLauncherTotalShards;
1518 return false;
1519 }
1520 if (!StringToInt(
1521 command_line->GetSwitchValueASCII(
1522 switches::kTestLauncherShardIndex),
1523 &shard_index_)) {
1524 LOG(ERROR) << "Invalid value for " << switches::kTestLauncherShardIndex;
1525 return false;
1526 }
1527 fprintf(stdout,
1528 "Using sharding settings from command line. This is shard %d/%d\n",
1529 shard_index_, total_shards_);
1530 fflush(stdout);
1531 } else {
1532 if (!TakeInt32FromEnvironment(kTestTotalShards, &total_shards_))
1533 return false;
1534 if (!TakeInt32FromEnvironment(kTestShardIndex, &shard_index_))
1535 return false;
1536 fprintf(stdout,
1537 "Using sharding settings from environment. This is shard %d/%d\n",
1538 shard_index_, total_shards_);
1539 fflush(stdout);
1540 }
1541 if (shard_index_ < 0 ||
1542 total_shards_ < 0 ||
1543 shard_index_ >= total_shards_) {
1544 LOG(ERROR) << "Invalid sharding settings: we require 0 <= "
1545 << kTestShardIndex << " < " << kTestTotalShards
1546 << ", but you have " << kTestShardIndex << "=" << shard_index_
1547 << ", " << kTestTotalShards << "=" << total_shards_ << ".\n";
1548 return false;
1549 }
1550
1551 // Make sure we don't pass any sharding-related environment to the child
1552 // processes. This test launcher implements the sharding completely.
1553 CHECK(UnsetEnvironmentVariableIfExists("GTEST_TOTAL_SHARDS"));
1554 CHECK(UnsetEnvironmentVariableIfExists("GTEST_SHARD_INDEX"));
1555
1556 if (command_line->HasSwitch(kGTestRepeatFlag) &&
1557 !StringToInt(command_line->GetSwitchValueASCII(kGTestRepeatFlag),
1558 &cycles_)) {
1559 LOG(ERROR) << "Invalid value for " << kGTestRepeatFlag;
1560 return false;
1561 }
1562 if (command_line->HasSwitch(kIsolatedScriptTestRepeatFlag) &&
1563 !StringToInt(
1564 command_line->GetSwitchValueASCII(kIsolatedScriptTestRepeatFlag),
1565 &cycles_)) {
1566 LOG(ERROR) << "Invalid value for " << kIsolatedScriptTestRepeatFlag;
1567 return false;
1568 }
1569
1570 if (command_line->HasSwitch(switches::kTestLauncherRetryLimit)) {
1571 int retry_limit = -1;
1572 if (!StringToInt(command_line->GetSwitchValueASCII(
1573 switches::kTestLauncherRetryLimit), &retry_limit) ||
1574 retry_limit < 0) {
1575 LOG(ERROR) << "Invalid value for " << switches::kTestLauncherRetryLimit;
1576 return false;
1577 }
1578
1579 retry_limit_ = retry_limit;
1580 } else if (command_line->HasSwitch(
1581 switches::kIsolatedScriptTestLauncherRetryLimit)) {
1582 int retry_limit = -1;
1583 if (!StringToInt(command_line->GetSwitchValueASCII(
1584 switches::kIsolatedScriptTestLauncherRetryLimit),
1585 &retry_limit) ||
1586 retry_limit < 0) {
1587 LOG(ERROR) << "Invalid value for "
1588 << switches::kIsolatedScriptTestLauncherRetryLimit;
1589 return false;
1590 }
1591
1592 retry_limit_ = retry_limit;
1593 } else if (command_line->HasSwitch(kGTestRepeatFlag) ||
1594 command_line->HasSwitch(kGTestBreakOnFailure)) {
1595 // If we are repeating tests or waiting for the first test to fail, disable
1596 // retries.
1597 retry_limit_ = 0U;
1598 } else if (!BotModeEnabled(command_line) &&
1599 (command_line->HasSwitch(kGTestFilterFlag) ||
1600 command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
1601 // No retry flag specified, not in bot mode and filtered by flag
1602 // Set reties to zero
1603 retry_limit_ = 0U;
1604 }
1605
1606 retries_left_ = retry_limit_;
1607 force_run_broken_tests_ =
1608 command_line->HasSwitch(switches::kTestLauncherForceRunBrokenTests);
1609
1610 if (command_line->HasSwitch(switches::kTestLauncherOutputBytesLimit)) {
1611 int output_bytes_limit = -1;
1612 if (!StringToInt(command_line->GetSwitchValueASCII(
1613 switches::kTestLauncherOutputBytesLimit),
1614 &output_bytes_limit) ||
1615 output_bytes_limit < 0) {
1616 LOG(ERROR) << "Invalid value for "
1617 << switches::kTestLauncherOutputBytesLimit;
1618 return false;
1619 }
1620
1621 output_bytes_limit_ = output_bytes_limit;
1622 }
1623
1624 fprintf(stdout, "Using %zu parallel jobs.\n", parallel_jobs_);
1625 fflush(stdout);
1626
1627 CreateAndStartThreadPool(parallel_jobs_);
1628
1629 std::vector<std::string> positive_file_filter;
1630 std::vector<std::string> positive_gtest_filter;
1631
1632 if (command_line->HasSwitch(switches::kTestLauncherFilterFile)) {
1633 auto filter =
1634 command_line->GetSwitchValueNative(switches::kTestLauncherFilterFile);
1635 for (auto filter_file :
1636 SplitStringPiece(filter, FILE_PATH_LITERAL(";"), base::TRIM_WHITESPACE,
1637 base::SPLIT_WANT_ALL)) {
1638 #if BUILDFLAG(IS_IOS)
1639 // On iOS, the filter files are bundled with the test application.
1640 base::FilePath data_dir;
1641 PathService::Get(DIR_SRC_TEST_DATA_ROOT, &data_dir);
1642 base::FilePath filter_file_path = data_dir.Append(FilePath(filter_file));
1643 #else
1644 base::FilePath filter_file_path =
1645 base::MakeAbsoluteFilePath(FilePath(filter_file));
1646 #endif // BUILDFLAG(IS_IOS)
1647
1648 if (!LoadFilterFile(filter_file_path, &positive_file_filter,
1649 &negative_test_filter_))
1650 return false;
1651 }
1652 }
1653
1654 // If kGTestRunDisabledTestsFlag is set, force running all negative
1655 // tests in testing/buildbot/filters.
1656 if (command_line->HasSwitch(kGTestRunDisabledTestsFlag)) {
1657 negative_test_filter_.clear();
1658 }
1659
1660 // If `kEnforceExactPositiveFilter` is set, only accept exact positive
1661 // filters from the filter file.
1662 enforce_exact_postive_filter_ =
1663 command_line->HasSwitch(switches::kEnforceExactPositiveFilter);
1664 if (enforce_exact_postive_filter_ &&
1665 !IsOnlyExactPositiveFilterFromFile(command_line)) {
1666 LOG(ERROR) << "With " << switches::kEnforceExactPositiveFilter
1667 << ", only accept exact positive filters via "
1668 << switches::kTestLauncherFilterFile;
1669 return false;
1670 }
1671
1672 // Split --gtest_filter at '-', if there is one, to separate into
1673 // positive filter and negative filter portions.
1674 bool double_colon_supported = !command_line->HasSwitch(kGTestFilterFlag);
1675 std::string filter = command_line->GetSwitchValueASCII(
1676 double_colon_supported ? kIsolatedScriptTestFilterFlag
1677 : kGTestFilterFlag);
1678 size_t dash_pos = filter.find('-');
1679 if (dash_pos == std::string::npos) {
1680 positive_gtest_filter =
1681 ExtractTestsFromFilter(filter, double_colon_supported);
1682 } else {
1683 // Everything up to the dash.
1684 positive_gtest_filter = ExtractTestsFromFilter(filter.substr(0, dash_pos),
1685 double_colon_supported);
1686
1687 // Everything after the dash.
1688 for (std::string pattern : ExtractTestsFromFilter(
1689 filter.substr(dash_pos + 1), double_colon_supported)) {
1690 negative_test_filter_.push_back(pattern);
1691 }
1692 }
1693
1694 skip_disabled_tests_ =
1695 !command_line->HasSwitch(kGTestRunDisabledTestsFlag) &&
1696 !command_line->HasSwitch(kIsolatedScriptRunDisabledTestsFlag);
1697
1698 if (!InitTests())
1699 return false;
1700
1701 if (!ShuffleTests(command_line))
1702 return false;
1703
1704 if (!ProcessAndValidateTests())
1705 return false;
1706
1707 if (command_line->HasSwitch(switches::kTestLauncherPrintTestStdio)) {
1708 std::string print_test_stdio = command_line->GetSwitchValueASCII(
1709 switches::kTestLauncherPrintTestStdio);
1710 if (print_test_stdio == "auto") {
1711 print_test_stdio_ = AUTO;
1712 } else if (print_test_stdio == "always") {
1713 print_test_stdio_ = ALWAYS;
1714 } else if (print_test_stdio == "never") {
1715 print_test_stdio_ = NEVER;
1716 } else {
1717 LOG(WARNING) << "Invalid value of "
1718 << switches::kTestLauncherPrintTestStdio << ": "
1719 << print_test_stdio;
1720 return false;
1721 }
1722 }
1723
1724 stop_on_failure_ = command_line->HasSwitch(kGTestBreakOnFailure);
1725
1726 if (command_line->HasSwitch(switches::kTestLauncherSummaryOutput)) {
1727 summary_path_ = FilePath(
1728 command_line->GetSwitchValuePath(switches::kTestLauncherSummaryOutput));
1729 }
1730 if (command_line->HasSwitch(switches::kTestLauncherTrace)) {
1731 trace_path_ = FilePath(
1732 command_line->GetSwitchValuePath(switches::kTestLauncherTrace));
1733 }
1734
1735 // When running in parallel mode we need to redirect stdio to avoid mixed-up
1736 // output. We also always redirect on the bots to get the test output into
1737 // JSON summary.
1738 redirect_stdio_ = (parallel_jobs_ > 1) || BotModeEnabled(command_line);
1739
1740 CombinePositiveTestFilters(std::move(positive_gtest_filter),
1741 std::move(positive_file_filter));
1742
1743 if (!results_tracker_.Init(*command_line)) {
1744 LOG(ERROR) << "Failed to initialize test results tracker.";
1745 return true;
1746 }
1747
1748 #if defined(NDEBUG)
1749 results_tracker_.AddGlobalTag("MODE_RELEASE");
1750 #else
1751 results_tracker_.AddGlobalTag("MODE_DEBUG");
1752 #endif
1753
1754 // Operating systems (sorted alphabetically).
1755 // Note that they can deliberately overlap, e.g. OS_LINUX is a subset
1756 // of OS_POSIX.
1757 #if BUILDFLAG(IS_ANDROID)
1758 results_tracker_.AddGlobalTag("OS_ANDROID");
1759 #endif
1760
1761 #if BUILDFLAG(IS_APPLE)
1762 results_tracker_.AddGlobalTag("OS_APPLE");
1763 #endif
1764
1765 #if BUILDFLAG(IS_BSD)
1766 results_tracker_.AddGlobalTag("OS_BSD");
1767 #endif
1768
1769 #if BUILDFLAG(IS_FREEBSD)
1770 results_tracker_.AddGlobalTag("OS_FREEBSD");
1771 #endif
1772
1773 #if BUILDFLAG(IS_FUCHSIA)
1774 results_tracker_.AddGlobalTag("OS_FUCHSIA");
1775 #endif
1776
1777 #if BUILDFLAG(IS_IOS)
1778 results_tracker_.AddGlobalTag("OS_IOS");
1779 #endif
1780
1781 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
1782 results_tracker_.AddGlobalTag("OS_LINUX");
1783 #endif
1784
1785 #if BUILDFLAG(IS_CHROMEOS_ASH)
1786 results_tracker_.AddGlobalTag("OS_CHROMEOS");
1787 #endif
1788
1789 #if BUILDFLAG(IS_MAC)
1790 results_tracker_.AddGlobalTag("OS_MAC");
1791 #endif
1792
1793 #if BUILDFLAG(IS_NACL)
1794 results_tracker_.AddGlobalTag("OS_NACL");
1795 #endif
1796
1797 #if BUILDFLAG(IS_OPENBSD)
1798 results_tracker_.AddGlobalTag("OS_OPENBSD");
1799 #endif
1800
1801 #if BUILDFLAG(IS_POSIX)
1802 results_tracker_.AddGlobalTag("OS_POSIX");
1803 #endif
1804
1805 #if BUILDFLAG(IS_SOLARIS)
1806 results_tracker_.AddGlobalTag("OS_SOLARIS");
1807 #endif
1808
1809 #if BUILDFLAG(IS_WIN)
1810 results_tracker_.AddGlobalTag("OS_WIN");
1811 #endif
1812
1813 // CPU-related tags.
1814 #if defined(ARCH_CPU_32_BITS)
1815 results_tracker_.AddGlobalTag("CPU_32_BITS");
1816 #endif
1817
1818 #if defined(ARCH_CPU_64_BITS)
1819 results_tracker_.AddGlobalTag("CPU_64_BITS");
1820 #endif
1821
1822 return true;
1823 }
1824
InitTests()1825 bool TestLauncher::InitTests() {
1826 std::vector<TestIdentifier> tests;
1827 if (!launcher_delegate_->GetTests(&tests)) {
1828 LOG(ERROR) << "Failed to get list of tests.";
1829 return false;
1830 }
1831
1832 // Check for duplicate test names. These can cause difficult-to-diagnose
1833 // crashes in the test runner as well as confusion about exactly what test is
1834 // failing. See https://crbug.com/1463355 for details.
1835 std::unordered_set<std::string> full_test_names;
1836 bool dups_found = false;
1837 for (auto& test : tests) {
1838 const std::string full_test_name =
1839 test.test_case_name + "." + test.test_name;
1840 auto [it, inserted] = full_test_names.insert(full_test_name);
1841 if (!inserted) {
1842 LOG(WARNING) << "Duplicate test name found: " << full_test_name;
1843 dups_found = true;
1844 }
1845 }
1846 CHECK(!dups_found);
1847
1848 std::vector<std::string> uninstantiated_tests;
1849 for (const TestIdentifier& test_id : tests) {
1850 TestInfo test_info(test_id);
1851 if (test_id.test_case_name == "GoogleTestVerification") {
1852 // GoogleTestVerification is used by googletest to detect tests that are
1853 // parameterized but not instantiated.
1854 uninstantiated_tests.push_back(test_id.test_name);
1855 continue;
1856 }
1857 // TODO(isamsonov): crbug.com/1004417 remove when windows builders
1858 // stop flaking on MANAUAL_ tests.
1859 if (launcher_delegate_->ShouldRunTest(test_id))
1860 tests_.push_back(test_info);
1861 }
1862 if (!uninstantiated_tests.empty()) {
1863 LOG(ERROR) << "Found uninstantiated parameterized tests. These test suites "
1864 "will not run:";
1865 for (const std::string& name : uninstantiated_tests)
1866 LOG(ERROR) << " " << name;
1867 LOG(ERROR) << "Please use INSTANTIATE_TEST_SUITE_P to instantiate the "
1868 "tests, or GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST if "
1869 "the parameter list can be intentionally empty. See "
1870 "//third_party/googletest/src/docs/advanced.md";
1871 return false;
1872 }
1873 return true;
1874 }
1875
ShuffleTests(CommandLine * command_line)1876 bool TestLauncher::ShuffleTests(CommandLine* command_line) {
1877 if (command_line->HasSwitch(kGTestShuffleFlag)) {
1878 uint32_t shuffle_seed;
1879 if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1880 const std::string custom_seed_str =
1881 command_line->GetSwitchValueASCII(kGTestRandomSeedFlag);
1882 uint32_t custom_seed = 0;
1883 if (!StringToUint(custom_seed_str, &custom_seed)) {
1884 LOG(ERROR) << "Unable to parse seed \"" << custom_seed_str << "\".";
1885 return false;
1886 }
1887 if (custom_seed >= kRandomSeedUpperBound) {
1888 LOG(ERROR) << "Seed " << custom_seed << " outside of expected range "
1889 << "[0, " << kRandomSeedUpperBound << ")";
1890 return false;
1891 }
1892 shuffle_seed = custom_seed;
1893 } else {
1894 std::uniform_int_distribution<uint32_t> dist(0, kRandomSeedUpperBound);
1895 std::random_device random_dev;
1896 shuffle_seed = dist(random_dev);
1897 }
1898
1899 std::mt19937 randomizer;
1900 randomizer.seed(shuffle_seed);
1901 ranges::shuffle(tests_, randomizer);
1902
1903 fprintf(stdout, "Randomizing with seed %u\n", shuffle_seed);
1904 fflush(stdout);
1905 } else if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1906 LOG(ERROR) << kGTestRandomSeedFlag << " requires " << kGTestShuffleFlag;
1907 return false;
1908 }
1909 return true;
1910 }
1911
ProcessAndValidateTests()1912 bool TestLauncher::ProcessAndValidateTests() {
1913 bool result = true;
1914 std::unordered_set<std::string> disabled_tests;
1915 std::unordered_map<std::string, TestInfo> pre_tests;
1916
1917 // Find disabled and pre tests
1918 for (const TestInfo& test_info : tests_) {
1919 std::string test_name = test_info.GetFullName();
1920 results_tracker_.AddTest(test_name);
1921 if (test_info.disabled()) {
1922 disabled_tests.insert(test_info.GetDisabledStrippedName());
1923 results_tracker_.AddDisabledTest(test_name);
1924 }
1925 if (test_info.pre_test())
1926 pre_tests[test_info.GetDisabledStrippedName()] = test_info;
1927 }
1928
1929 std::vector<TestInfo> tests_to_run;
1930 for (const TestInfo& test_info : tests_) {
1931 std::string test_name = test_info.GetFullName();
1932 // If any test has a matching disabled test, fail and log for audit.
1933 if (base::Contains(disabled_tests, test_name)) {
1934 LOG(ERROR) << test_name << " duplicated by a DISABLED_ test";
1935 result = false;
1936 }
1937
1938 // Passes on PRE tests, those will append when final test is found.
1939 if (test_info.pre_test())
1940 continue;
1941
1942 std::vector<TestInfo> test_sequence;
1943 test_sequence.push_back(test_info);
1944 // Move Pre Tests prior to final test in order.
1945 while (base::Contains(pre_tests, test_sequence.back().GetPreName())) {
1946 test_sequence.push_back(pre_tests[test_sequence.back().GetPreName()]);
1947 pre_tests.erase(test_sequence.back().GetDisabledStrippedName());
1948 }
1949 // Skip disabled tests unless explicitly requested.
1950 if (!test_info.disabled() || !skip_disabled_tests_)
1951 tests_to_run.insert(tests_to_run.end(), test_sequence.rbegin(),
1952 test_sequence.rend());
1953 }
1954 tests_ = std::move(tests_to_run);
1955
1956 // If any tests remain in |pre_tests| map, fail and log for audit.
1957 for (const auto& i : pre_tests) {
1958 LOG(ERROR) << i.first << " is an orphaned pre test";
1959 result = false;
1960 }
1961 return result;
1962 }
1963
CreateAndStartThreadPool(size_t num_parallel_jobs)1964 void TestLauncher::CreateAndStartThreadPool(size_t num_parallel_jobs) {
1965 base::ThreadPoolInstance::Create("TestLauncher");
1966 base::ThreadPoolInstance::Get()->Start({num_parallel_jobs});
1967 }
1968
CombinePositiveTestFilters(std::vector<std::string> filter_a,std::vector<std::string> filter_b)1969 void TestLauncher::CombinePositiveTestFilters(
1970 std::vector<std::string> filter_a,
1971 std::vector<std::string> filter_b) {
1972 has_at_least_one_positive_filter_ = !filter_a.empty() || !filter_b.empty();
1973 if (!has_at_least_one_positive_filter_) {
1974 return;
1975 }
1976 // If two positive filters are present, only run tests that match a pattern
1977 // in both filters.
1978 if (!filter_a.empty() && !filter_b.empty()) {
1979 for (const auto& i : tests_) {
1980 std::string test_name = i.GetFullName();
1981 bool found_a = false;
1982 bool found_b = false;
1983 for (const auto& k : filter_a) {
1984 found_a = found_a || MatchPattern(test_name, k);
1985 }
1986 for (const auto& k : filter_b) {
1987 found_b = found_b || MatchPattern(test_name, k);
1988 }
1989 if (found_a && found_b) {
1990 positive_test_filter_.push_back(test_name);
1991 }
1992 }
1993 } else if (!filter_a.empty()) {
1994 positive_test_filter_ = std::move(filter_a);
1995 } else {
1996 positive_test_filter_ = std::move(filter_b);
1997 }
1998 }
1999
ShouldRunInCurrentShard(std::string_view prefix_stripped_name) const2000 bool TestLauncher::ShouldRunInCurrentShard(
2001 std::string_view prefix_stripped_name) const {
2002 CHECK(!StartsWith(prefix_stripped_name, kPreTestPrefix));
2003 CHECK(!StartsWith(prefix_stripped_name, kDisabledTestPrefix));
2004 return PersistentHash(prefix_stripped_name) % total_shards_ ==
2005 static_cast<uint32_t>(shard_index_);
2006 }
2007
CollectTests()2008 std::vector<std::string> TestLauncher::CollectTests() {
2009 std::vector<std::string> test_names;
2010 // To support RTS(regression test selection), which may have 100,000 or
2011 // more exact gtest filter, we first split filter into exact filter
2012 // and wildcards filter, then exact filter can match faster.
2013 std::vector<StringPiece> positive_wildcards_filter;
2014 std::unordered_set<StringPiece> positive_exact_filter;
2015 positive_exact_filter.reserve(positive_test_filter_.size());
2016 std::unordered_set<std::string> enforced_positive_tests;
2017 for (const std::string& filter : positive_test_filter_) {
2018 if (filter.find('*') != std::string::npos) {
2019 positive_wildcards_filter.push_back(filter);
2020 } else {
2021 positive_exact_filter.insert(filter);
2022 }
2023 }
2024
2025 std::vector<StringPiece> negative_wildcards_filter;
2026 std::unordered_set<StringPiece> negative_exact_filter;
2027 negative_exact_filter.reserve(negative_test_filter_.size());
2028 for (const std::string& filter : negative_test_filter_) {
2029 if (filter.find('*') != std::string::npos) {
2030 negative_wildcards_filter.push_back(filter);
2031 } else {
2032 negative_exact_filter.insert(filter);
2033 }
2034 }
2035
2036 for (const TestInfo& test_info : tests_) {
2037 std::string test_name = test_info.GetFullName();
2038
2039 std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
2040
2041 // Skip the test that doesn't match the filter (if given).
2042 if (has_at_least_one_positive_filter_) {
2043 bool found = positive_exact_filter.find(test_name) !=
2044 positive_exact_filter.end() ||
2045 positive_exact_filter.find(prefix_stripped_name) !=
2046 positive_exact_filter.end();
2047 if (found && enforce_exact_postive_filter_) {
2048 enforced_positive_tests.insert(prefix_stripped_name);
2049 }
2050 if (!found) {
2051 for (const StringPiece& filter : positive_wildcards_filter) {
2052 if (MatchPattern(test_name, filter) ||
2053 MatchPattern(prefix_stripped_name, filter)) {
2054 found = true;
2055 break;
2056 }
2057 }
2058 }
2059
2060 if (!found)
2061 continue;
2062 }
2063
2064 if (negative_exact_filter.find(test_name) != negative_exact_filter.end() ||
2065 negative_exact_filter.find(prefix_stripped_name) !=
2066 negative_exact_filter.end()) {
2067 continue;
2068 }
2069
2070 bool excluded = false;
2071 for (const StringPiece& filter : negative_wildcards_filter) {
2072 if (MatchPattern(test_name, filter) ||
2073 MatchPattern(prefix_stripped_name, filter)) {
2074 excluded = true;
2075 break;
2076 }
2077 }
2078 if (excluded)
2079 continue;
2080
2081 // Tests with the name XYZ will cause tests with the name PRE_XYZ to run. We
2082 // should bucket all of these tests together.
2083 if (!ShouldRunInCurrentShard(prefix_stripped_name)) {
2084 continue;
2085 }
2086
2087 // Report test locations after applying all filters, so that we report test
2088 // locations only for those tests that were run as part of this shard.
2089 results_tracker_.AddTestLocation(test_name, test_info.file(),
2090 test_info.line());
2091
2092 if (!test_info.pre_test()) {
2093 // Only a subset of tests that are run require placeholders -- namely,
2094 // those that will output results. Note that the results for PRE_XYZ will
2095 // be merged into XYZ's results if the former fails, so we don't need a
2096 // placeholder for it.
2097 results_tracker_.AddTestPlaceholder(test_name);
2098 }
2099
2100 test_names.push_back(test_name);
2101 }
2102
2103 // If `kEnforceExactPositiveFilter` is set, all test cases listed in the
2104 // exact positive filter for the current shard should exist in the
2105 // `enforced_positive_tests`. Otherwise, print the missing cases and fail
2106 // loudly.
2107 if (enforce_exact_postive_filter_) {
2108 bool found_exact_positive_filter_not_enforced = false;
2109 for (const auto& filter : positive_exact_filter) {
2110 if (!ShouldRunInCurrentShard(filter) ||
2111 Contains(enforced_positive_tests, std::string(filter))) {
2112 continue;
2113 }
2114 if (!found_exact_positive_filter_not_enforced) {
2115 LOG(ERROR) << "Found exact positive filter not enforced:";
2116 found_exact_positive_filter_not_enforced = true;
2117 }
2118 LOG(ERROR) << filter;
2119 }
2120 CHECK(!found_exact_positive_filter_not_enforced);
2121 }
2122
2123 return test_names;
2124 }
2125
RunTests()2126 void TestLauncher::RunTests() {
2127 std::vector<std::string> original_test_names = CollectTests();
2128
2129 std::vector<std::string> test_names;
2130 for (int i = 0; i < repeats_per_iteration_; ++i) {
2131 test_names.insert(test_names.end(), original_test_names.begin(),
2132 original_test_names.end());
2133 }
2134
2135 broken_threshold_ = std::max(static_cast<size_t>(20), tests_.size() / 10);
2136
2137 test_started_count_ = test_names.size();
2138
2139 // If there are no matching tests, warn and notify of any matches against
2140 // *<filter>*.
2141 if (test_started_count_ == 0) {
2142 PrintFuzzyMatchingTestNames();
2143 fprintf(stdout, "WARNING: No matching tests to run.\n");
2144 fflush(stdout);
2145 }
2146
2147 // Save an early test summary in case the launcher crashes or gets killed.
2148 results_tracker_.GeneratePlaceholderIteration();
2149 MaybeSaveSummaryAsJSON({"EARLY_SUMMARY"});
2150
2151 // If we are repeating the test, set batch size to 1 to ensure that batch size
2152 // does not interfere with repeats (unittests are using filter for batches and
2153 // can't run the same test twice in the same batch).
2154 size_t batch_size =
2155 repeats_per_iteration_ > 1 ? 1U : launcher_delegate_->GetBatchSize();
2156
2157 TestRunner test_runner(this, parallel_jobs_, batch_size);
2158 test_runner.Run(test_names);
2159 }
2160
PrintFuzzyMatchingTestNames()2161 void TestLauncher::PrintFuzzyMatchingTestNames() {
2162 for (auto filter : positive_test_filter_) {
2163 if (filter.empty())
2164 continue;
2165 std::string almost_filter;
2166 if (filter.front() != '*')
2167 almost_filter += '*';
2168 almost_filter += filter;
2169 if (filter.back() != '*')
2170 almost_filter += '*';
2171
2172 for (const TestInfo& test_info : tests_) {
2173 std::string test_name = test_info.GetFullName();
2174 std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
2175 if (MatchPattern(test_name, almost_filter) ||
2176 MatchPattern(prefix_stripped_name, almost_filter)) {
2177 fprintf(stdout, "Filter \"%s\" would have matched: %s\n",
2178 almost_filter.c_str(), test_name.c_str());
2179 fflush(stdout);
2180 }
2181 }
2182 }
2183 }
2184
RunRetryTests()2185 bool TestLauncher::RunRetryTests() {
2186 while (!tests_to_retry_.empty() && retries_left_ > 0) {
2187 // Retry all tests that depend on a failing test.
2188 std::vector<std::string> test_names;
2189 for (const TestInfo& test_info : tests_) {
2190 if (base::Contains(tests_to_retry_, test_info.GetPrefixStrippedName()))
2191 test_names.push_back(test_info.GetFullName());
2192 }
2193 tests_to_retry_.clear();
2194
2195 size_t retry_started_count = test_names.size();
2196 test_started_count_ += retry_started_count;
2197
2198 // Only invoke RunLoop if there are any tasks to run.
2199 if (retry_started_count == 0)
2200 return false;
2201
2202 fprintf(stdout, "Retrying %zu test%s (retry #%zu)\n", retry_started_count,
2203 retry_started_count > 1 ? "s" : "", retry_limit_ - retries_left_);
2204 fflush(stdout);
2205
2206 --retries_left_;
2207 TestRunner test_runner(this);
2208 test_runner.Run(test_names);
2209 }
2210 return tests_to_retry_.empty();
2211 }
2212
OnTestIterationStart()2213 void TestLauncher::OnTestIterationStart() {
2214 test_started_count_ = 0;
2215 test_finished_count_ = 0;
2216 test_success_count_ = 0;
2217 test_broken_count_ = 0;
2218 tests_to_retry_.clear();
2219 results_tracker_.OnTestIterationStarting();
2220 }
2221
2222 #if BUILDFLAG(IS_POSIX)
2223 // I/O watcher for the reading end of the self-pipe above.
2224 // Terminates any launched child processes and exits the process.
OnShutdownPipeReadable()2225 void TestLauncher::OnShutdownPipeReadable() {
2226 fprintf(stdout, "\nCaught signal. Killing spawned test processes...\n");
2227 fflush(stdout);
2228
2229 KillSpawnedTestProcesses();
2230
2231 MaybeSaveSummaryAsJSON({"CAUGHT_TERMINATION_SIGNAL"});
2232
2233 // The signal would normally kill the process, so exit now.
2234 _exit(1);
2235 }
2236 #endif // BUILDFLAG(IS_POSIX)
2237
MaybeSaveSummaryAsJSON(const std::vector<std::string> & additional_tags)2238 void TestLauncher::MaybeSaveSummaryAsJSON(
2239 const std::vector<std::string>& additional_tags) {
2240 if (!summary_path_.empty()) {
2241 if (!results_tracker_.SaveSummaryAsJSON(summary_path_, additional_tags)) {
2242 LOG(ERROR) << "Failed to save test launcher output summary.";
2243 }
2244 }
2245 if (!trace_path_.empty()) {
2246 if (!GetTestLauncherTracer()->Dump(trace_path_)) {
2247 LOG(ERROR) << "Failed to save test launcher trace.";
2248 }
2249 }
2250 }
2251
OnTestIterationFinished()2252 void TestLauncher::OnTestIterationFinished() {
2253 TestResultsTracker::TestStatusMap tests_by_status(
2254 results_tracker_.GetTestStatusMapForCurrentIteration());
2255 if (!tests_by_status[TestResult::TEST_UNKNOWN].empty())
2256 results_tracker_.AddGlobalTag(kUnreliableResultsTag);
2257
2258 results_tracker_.PrintSummaryOfCurrentIteration();
2259 }
2260
OnOutputTimeout()2261 void TestLauncher::OnOutputTimeout() {
2262 DCHECK(thread_checker_.CalledOnValidThread());
2263
2264 AutoLock lock(*GetLiveProcessesLock());
2265
2266 fprintf(stdout, "Still waiting for the following processes to finish:\n");
2267
2268 for (const auto& pair : *GetLiveProcesses()) {
2269 #if BUILDFLAG(IS_WIN)
2270 fwprintf(stdout, L"\t%s\n", pair.second.GetCommandLineString().c_str());
2271 #else
2272 fprintf(stdout, "\t%s\n", pair.second.GetCommandLineString().c_str());
2273 #endif
2274 }
2275
2276 fflush(stdout);
2277
2278 if (CommandLine::ForCurrentProcess()->HasSwitch(
2279 switches::kTestLauncherPrintTimestamps)) {
2280 ::logging::ScopedLoggingSettings scoped_logging_setting;
2281 ::logging::SetLogItems(true, true, true, true);
2282 LOG(INFO) << "Waiting_timestamp";
2283 }
2284 // Arm the timer again - otherwise it would fire only once.
2285 watchdog_timer_.Reset();
2286 }
2287
NumParallelJobs(unsigned int cores_per_job)2288 size_t NumParallelJobs(unsigned int cores_per_job) {
2289 const CommandLine* command_line = CommandLine::ForCurrentProcess();
2290 if (command_line->HasSwitch(switches::kTestLauncherJobs)) {
2291 // If the number of test launcher jobs was specified, return that number.
2292 size_t jobs = 0U;
2293
2294 if (!StringToSizeT(
2295 command_line->GetSwitchValueASCII(switches::kTestLauncherJobs),
2296 &jobs) ||
2297 !jobs) {
2298 LOG(ERROR) << "Invalid value for " << switches::kTestLauncherJobs;
2299 return 0U;
2300 }
2301 return jobs;
2302 }
2303 if (!BotModeEnabled(command_line) &&
2304 (command_line->HasSwitch(kGTestFilterFlag) ||
2305 command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
2306 // Do not run jobs in parallel by default if we are running a subset of
2307 // the tests and if bot mode is off.
2308 return 1U;
2309 }
2310
2311 #if BUILDFLAG(IS_WIN)
2312 // Use processors in all groups (Windows splits more than 64 logical
2313 // processors into groups).
2314 size_t cores = base::checked_cast<size_t>(
2315 ::GetActiveProcessorCount(ALL_PROCESSOR_GROUPS));
2316 #else
2317 size_t cores = base::checked_cast<size_t>(SysInfo::NumberOfProcessors());
2318 #if BUILDFLAG(IS_MAC)
2319 // This is necessary to allow tests to call SetCpuSecurityMitigationsEnabled()
2320 // despite NumberOfProcessors() having already been called in the process.
2321 SysInfo::ResetCpuSecurityMitigationsEnabledForTesting();
2322 #endif // BUILDFLAG(IS_MAC)
2323 #endif // BUILDFLAG(IS_WIN)
2324
2325 #if BUILDFLAG(IS_IOS) && TARGET_OS_SIMULATOR
2326 // If we are targeting the simulator increase the number of jobs we use by 2x
2327 // the number of cores. This is necessary because the startup of each
2328 // process is slow, so using 2x empirically approaches the total machine
2329 // utilization.
2330 cores *= 2;
2331 #endif
2332 return std::max(size_t(1), cores / cores_per_job);
2333 }
2334
GetTestOutputSnippet(const TestResult & result,const std::string & full_output)2335 std::string GetTestOutputSnippet(const TestResult& result,
2336 const std::string& full_output) {
2337 size_t run_pos = full_output.find(std::string("[ RUN ] ") +
2338 result.full_name);
2339 if (run_pos == std::string::npos)
2340 return std::string();
2341
2342 size_t end_pos = full_output.find(std::string("[ FAILED ] ") +
2343 result.full_name,
2344 run_pos);
2345 // Only clip the snippet to the "OK" message if the test really
2346 // succeeded or was skipped. It still might have e.g. crashed
2347 // after printing it.
2348 if (end_pos == std::string::npos) {
2349 if (result.status == TestResult::TEST_SUCCESS) {
2350 end_pos = full_output.find(std::string("[ OK ] ") +
2351 result.full_name,
2352 run_pos);
2353
2354 // Also handle SKIPPED next to SUCCESS because the GTest XML output
2355 // doesn't make a difference between SKIPPED and SUCCESS
2356 if (end_pos == std::string::npos)
2357 end_pos = full_output.find(
2358 std::string("[ SKIPPED ] ") + result.full_name, run_pos);
2359 } else {
2360 // If test is not successful, include all output until subsequent test.
2361 end_pos = full_output.find(std::string("[ RUN ]"), run_pos + 1);
2362 if (end_pos != std::string::npos)
2363 end_pos--;
2364 }
2365 }
2366 if (end_pos != std::string::npos) {
2367 size_t newline_pos = full_output.find("\n", end_pos);
2368 if (newline_pos != std::string::npos)
2369 end_pos = newline_pos + 1;
2370 }
2371
2372 std::string snippet(full_output.substr(run_pos));
2373 if (end_pos != std::string::npos)
2374 snippet = full_output.substr(run_pos, end_pos - run_pos);
2375
2376 return snippet;
2377 }
2378
TruncateSnippetFocused(const base::StringPiece snippet,size_t byte_limit)2379 std::string TruncateSnippetFocused(const base::StringPiece snippet,
2380 size_t byte_limit) {
2381 // Find the start of anything that looks like a fatal log message.
2382 // We want to preferentially preserve these from truncation as we
2383 // run extraction of fatal test errors from snippets in result_adapter
2384 // to populate failure reasons in ResultDB. It is also convenient for
2385 // the user to see them.
2386 // Refer to LogMessage::Init in base/logging[_platform].cc for patterns.
2387 size_t fatal_message_pos =
2388 std::min(snippet.find("FATAL:"), snippet.find("FATAL "));
2389
2390 size_t fatal_message_start = 0;
2391 size_t fatal_message_end = 0;
2392 if (fatal_message_pos != std::string::npos) {
2393 // Find the line-endings before and after the fatal message.
2394 size_t start_pos = snippet.rfind("\n", fatal_message_pos);
2395 if (start_pos != std::string::npos) {
2396 fatal_message_start = start_pos;
2397 }
2398 size_t end_pos = snippet.find("\n", fatal_message_pos);
2399 if (end_pos != std::string::npos) {
2400 // Include the new-line character.
2401 fatal_message_end = end_pos + 1;
2402 } else {
2403 fatal_message_end = snippet.length();
2404 }
2405 }
2406 // Limit fatal message length to half the snippet byte quota. This ensures
2407 // we have space for some context at the beginning and end of the snippet.
2408 fatal_message_end =
2409 std::min(fatal_message_end, fatal_message_start + (byte_limit / 2));
2410
2411 // Distribute remaining bytes between start and end of snippet.
2412 // The split is either even, or if one is small enough to be displayed
2413 // without truncation, it gets displayed in full and the other split gets
2414 // the remaining bytes.
2415 size_t remaining_bytes =
2416 byte_limit - (fatal_message_end - fatal_message_start);
2417 size_t start_split_bytes;
2418 size_t end_split_bytes;
2419 if (fatal_message_start < remaining_bytes / 2) {
2420 start_split_bytes = fatal_message_start;
2421 end_split_bytes = remaining_bytes - fatal_message_start;
2422 } else if ((snippet.length() - fatal_message_end) < remaining_bytes / 2) {
2423 start_split_bytes =
2424 remaining_bytes - (snippet.length() - fatal_message_end);
2425 end_split_bytes = (snippet.length() - fatal_message_end);
2426 } else {
2427 start_split_bytes = remaining_bytes / 2;
2428 end_split_bytes = remaining_bytes - start_split_bytes;
2429 }
2430 return base::StrCat(
2431 {TruncateSnippet(snippet.substr(0, fatal_message_start),
2432 start_split_bytes),
2433 snippet.substr(fatal_message_start,
2434 fatal_message_end - fatal_message_start),
2435 TruncateSnippet(snippet.substr(fatal_message_end), end_split_bytes)});
2436 }
2437
2438 } // namespace base
2439