• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/test/launcher/test_launcher.h"
6 
7 #include <stdio.h>
8 
9 #include <algorithm>
10 #include <map>
11 #include <random>
12 #include <unordered_map>
13 #include <unordered_set>
14 #include <utility>
15 
16 #include "base/at_exit.h"
17 #include "base/clang_profiling_buildflags.h"
18 #include "base/command_line.h"
19 #include "base/containers/adapters.h"
20 #include "base/containers/contains.h"
21 #include "base/environment.h"
22 #include "base/files/file_enumerator.h"
23 #include "base/files/file_path.h"
24 #include "base/files/file_util.h"
25 #include "base/files/scoped_file.h"
26 #include "base/files/scoped_temp_dir.h"
27 #include "base/format_macros.h"
28 #include "base/functional/bind.h"
29 #include "base/hash/hash.h"
30 #include "base/lazy_instance.h"
31 #include "base/location.h"
32 #include "base/logging.h"
33 #include "base/memory/ptr_util.h"
34 #include "base/memory/raw_ptr.h"
35 #include "base/memory/raw_ref.h"
36 #include "base/numerics/safe_conversions.h"
37 #include "base/process/kill.h"
38 #include "base/process/launch.h"
39 #include "base/ranges/algorithm.h"
40 #include "base/run_loop.h"
41 #include "base/strings/pattern.h"
42 #include "base/strings/strcat.h"
43 #include "base/strings/string_number_conversions.h"
44 #include "base/strings/string_piece.h"
45 #include "base/strings/string_split.h"
46 #include "base/strings/string_util.h"
47 #include "base/strings/stringize_macros.h"
48 #include "base/strings/stringprintf.h"
49 #include "base/strings/utf_string_conversions.h"
50 #include "base/system/sys_info.h"
51 #include "base/task/post_job.h"
52 #include "base/task/single_thread_task_runner.h"
53 #include "base/task/thread_pool.h"
54 #include "base/task/thread_pool/thread_pool_instance.h"
55 #include "base/test/gtest_util.h"
56 #include "base/test/gtest_xml_util.h"
57 #include "base/test/launcher/test_launcher_tracer.h"
58 #include "base/test/launcher/test_results_tracker.h"
59 #include "base/test/scoped_logging_settings.h"
60 #include "base/test/test_file_util.h"
61 #include "base/test/test_switches.h"
62 #include "base/test/test_timeouts.h"
63 #include "base/threading/platform_thread.h"
64 #include "base/threading/thread_restrictions.h"
65 #include "base/time/time.h"
66 #include "build/build_config.h"
67 #include "build/chromeos_buildflags.h"
68 #include "testing/gtest/include/gtest/gtest.h"
69 #include "third_party/libxml/chromium/libxml_utils.h"
70 
71 #if BUILDFLAG(IS_POSIX)
72 #include <fcntl.h>
73 
74 #include "base/files/file_descriptor_watcher_posix.h"
75 #endif
76 
77 #if BUILDFLAG(IS_APPLE)
78 #include "base/mac/scoped_nsautorelease_pool.h"
79 #endif
80 
81 #if BUILDFLAG(IS_WIN)
82 #include "base/strings/string_util_win.h"
83 
84 #include <windows.h>
85 
86 // To avoid conflicts with the macro from the Windows SDK...
87 #undef GetCommandLine
88 #endif
89 
90 #if BUILDFLAG(IS_FUCHSIA)
91 #include <lib/fdio/namespace.h>
92 #include <lib/zx/job.h>
93 #include <lib/zx/time.h>
94 #include "base/atomic_sequence_num.h"
95 #include "base/fuchsia/default_job.h"
96 #include "base/fuchsia/file_utils.h"
97 #include "base/fuchsia/fuchsia_logging.h"
98 #endif
99 
100 namespace base {
101 
102 // See
103 // https://groups.google.com/a/chromium.org/d/msg/chromium-dev/nkdTP7sstSc/uT3FaE_sgkAJ
104 using ::operator<<;
105 
106 // The environment variable name for the total number of test shards.
107 const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
108 // The environment variable name for the test shard index.
109 const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
110 
111 // Prefix indicating test has to run prior to the other test.
112 const char kPreTestPrefix[] = "PRE_";
113 
114 // Prefix indicating test is disabled, will not run unless specified.
115 const char kDisabledTestPrefix[] = "DISABLED_";
116 
ResultWatcher(FilePath result_file,size_t num_tests)117 ResultWatcher::ResultWatcher(FilePath result_file, size_t num_tests)
118     : result_file_(std::move(result_file)), num_tests_(num_tests) {}
119 
PollUntilDone(TimeDelta timeout_per_test)120 bool ResultWatcher::PollUntilDone(TimeDelta timeout_per_test) {
121   CHECK(timeout_per_test.is_positive());
122   TimeTicks batch_deadline = TimeTicks::Now() + num_tests_ * timeout_per_test;
123   TimeDelta time_to_next_check = timeout_per_test;
124   do {
125     if (WaitWithTimeout(time_to_next_check)) {
126       return true;
127     }
128     time_to_next_check = PollOnce(timeout_per_test);
129   } while (TimeTicks::Now() < batch_deadline &&
130            time_to_next_check.is_positive());
131   // The process may have exited or is about to exit. Give the process a grace
132   // period to exit on its own.
133   return WaitWithTimeout(TestTimeouts::tiny_timeout());
134 }
135 
PollOnce(TimeDelta timeout_per_test)136 TimeDelta ResultWatcher::PollOnce(TimeDelta timeout_per_test) {
137   std::vector<TestResult> test_results;
138   // If the result watcher is unlucky enough to read the results while the
139   // runner process is writing an update, it is possible to read an incomplete
140   // XML entry, in which case `ProcessGTestOutput` will return false.
141   if (!ProcessGTestOutput(result_file_, &test_results, nullptr)) {
142     return TestTimeouts::tiny_timeout();
143   }
144   Time latest_completion = LatestCompletionTimestamp(test_results);
145   // Didn't complete a single test before timeout, fail.
146   if (latest_completion.is_null()) {
147     return TimeDelta();
148   }
149   // The gtest result writer gets timestamps from `Time::Now`.
150   TimeDelta time_since_latest_completion = Time::Now() - latest_completion;
151   // This heuristic attempts to prevent unrelated clock changes between the
152   // latest write and read from being falsely identified as a test timeout.
153   // For example, daylight savings time starting or ending can add an
154   // artificial delta of +1 or -1 hour to `time_since_latest_completion`.
155   if (time_since_latest_completion.is_negative() ||
156       time_since_latest_completion > kDaylightSavingsThreshold) {
157     return timeout_per_test;
158   }
159   // Expect another test to complete no later than `timeout_per_test` after
160   // the latest completion.
161   return timeout_per_test - time_since_latest_completion;
162 }
163 
LatestCompletionTimestamp(const std::vector<TestResult> & test_results)164 Time ResultWatcher::LatestCompletionTimestamp(
165     const std::vector<TestResult>& test_results) {
166   CHECK_LE(test_results.size(), num_tests_);
167   // Since the result file is append-only, timestamps should already be in
168   // ascending order.
169   for (const TestResult& result : Reversed(test_results)) {
170     if (result.completed()) {
171       Time test_start = result.timestamp.value_or(Time());
172       return test_start + result.elapsed_time;
173     }
174   }
175   return Time();
176 }
177 
178 // Watch results generated by a child test process. Wait for the child process
179 // to exit between result checks.
180 class ProcessResultWatcher : public ResultWatcher {
181  public:
ProcessResultWatcher(FilePath result_file,size_t num_tests,Process & process)182   ProcessResultWatcher(FilePath result_file, size_t num_tests, Process& process)
183       : ResultWatcher(result_file, num_tests), process_(process) {}
184 
185   // Get the exit code of the process, or -1 if the process has not exited yet.
186   int GetExitCode();
187 
188   bool WaitWithTimeout(TimeDelta timeout) override;
189 
190  private:
191   const raw_ref<Process> process_;
192   int exit_code_ = -1;
193 };
194 
GetExitCode()195 int ProcessResultWatcher::GetExitCode() {
196   return exit_code_;
197 }
198 
WaitWithTimeout(TimeDelta timeout)199 bool ProcessResultWatcher::WaitWithTimeout(TimeDelta timeout) {
200   return process_->WaitForExitWithTimeout(timeout, &exit_code_);
201 }
202 
203 namespace {
204 
205 // Global tag for test runs where the results are unreliable for any reason.
206 const char kUnreliableResultsTag[] = "UNRELIABLE_RESULTS";
207 
208 // Maximum time of no output after which we print list of processes still
209 // running. This deliberately doesn't use TestTimeouts (which is otherwise
210 // a recommended solution), because they can be increased. This would defeat
211 // the purpose of this timeout, which is 1) to avoid buildbot "no output for
212 // X seconds" timeout killing the process 2) help communicate status of
213 // the test launcher to people looking at the output (no output for a long
214 // time is mysterious and gives no info about what is happening) 3) help
215 // debugging in case the process hangs anyway.
216 constexpr TimeDelta kOutputTimeout = Seconds(15);
217 
218 // Limit of output snippet lines when printing to stdout.
219 // Avoids flooding the logs with amount of output that gums up
220 // the infrastructure.
221 const size_t kOutputSnippetLinesLimit = 5000;
222 
223 // Limit of output snippet size. Exceeding this limit
224 // results in truncating the output and failing the test.
225 const size_t kOutputSnippetBytesLimit = 300 * 1024;
226 
227 // Limit of seed values for gtest shuffling. Arbitrary, but based on
228 // gtest's similarly arbitrary choice.
229 const uint32_t kRandomSeedUpperBound = 100000;
230 
231 // Set of live launch test processes with corresponding lock (it is allowed
232 // for callers to launch processes on different threads).
GetLiveProcessesLock()233 Lock* GetLiveProcessesLock() {
234   static auto* lock = new Lock;
235   return lock;
236 }
237 
GetLiveProcesses()238 std::map<ProcessHandle, CommandLine>* GetLiveProcesses() {
239   static auto* map = new std::map<ProcessHandle, CommandLine>;
240   return map;
241 }
242 
243 // Performance trace generator.
GetTestLauncherTracer()244 TestLauncherTracer* GetTestLauncherTracer() {
245   static auto* tracer = new TestLauncherTracer;
246   return tracer;
247 }
248 
249 #if BUILDFLAG(IS_FUCHSIA)
WaitForJobExit(const zx::job & job)250 zx_status_t WaitForJobExit(const zx::job& job) {
251   zx::time deadline =
252       zx::deadline_after(zx::duration(kOutputTimeout.ToZxDuration()));
253   zx_signals_t to_wait_for = ZX_JOB_NO_JOBS | ZX_JOB_NO_PROCESSES;
254   while (to_wait_for) {
255     zx_signals_t observed = 0;
256     zx_status_t status = job.wait_one(to_wait_for, deadline, &observed);
257     if (status != ZX_OK)
258       return status;
259     to_wait_for &= ~observed;
260   }
261   return ZX_OK;
262 }
263 #endif  // BUILDFLAG(IS_FUCHSIA)
264 
265 #if BUILDFLAG(IS_POSIX)
266 // Self-pipe that makes it possible to do complex shutdown handling
267 // outside of the signal handler.
268 int g_shutdown_pipe[2] = { -1, -1 };
269 
ShutdownPipeSignalHandler(int signal)270 void ShutdownPipeSignalHandler(int signal) {
271   HANDLE_EINTR(write(g_shutdown_pipe[1], "q", 1));
272 }
273 
KillSpawnedTestProcesses()274 void KillSpawnedTestProcesses() {
275   // Keep the lock until exiting the process to prevent further processes
276   // from being spawned.
277   AutoLock lock(*GetLiveProcessesLock());
278 
279   fprintf(stdout, "Sending SIGTERM to %zu child processes... ",
280           GetLiveProcesses()->size());
281   fflush(stdout);
282 
283   for (const auto& pair : *GetLiveProcesses()) {
284     // Send the signal to entire process group.
285     kill((-1) * (pair.first), SIGTERM);
286   }
287 
288   fprintf(stdout,
289           "done.\nGiving processes a chance to terminate cleanly... ");
290   fflush(stdout);
291 
292   PlatformThread::Sleep(Milliseconds(500));
293 
294   fprintf(stdout, "done.\n");
295   fflush(stdout);
296 
297   fprintf(stdout, "Sending SIGKILL to %zu child processes... ",
298           GetLiveProcesses()->size());
299   fflush(stdout);
300 
301   for (const auto& pair : *GetLiveProcesses()) {
302     // Send the signal to entire process group.
303     kill((-1) * (pair.first), SIGKILL);
304   }
305 
306   fprintf(stdout, "done.\n");
307   fflush(stdout);
308 }
309 #endif  // BUILDFLAG(IS_POSIX)
310 
311 // Parses the environment variable var as an Int32.  If it is unset, returns
312 // true.  If it is set, unsets it then converts it to Int32 before
313 // returning it in |result|.  Returns true on success.
TakeInt32FromEnvironment(const char * const var,int32_t * result)314 bool TakeInt32FromEnvironment(const char* const var, int32_t* result) {
315   std::unique_ptr<Environment> env(Environment::Create());
316   std::string str_val;
317 
318   if (!env->GetVar(var, &str_val))
319     return true;
320 
321   if (!env->UnSetVar(var)) {
322     LOG(ERROR) << "Invalid environment: we could not unset " << var << ".\n";
323     return false;
324   }
325 
326   if (!StringToInt(str_val, result)) {
327     LOG(ERROR) << "Invalid environment: " << var << " is not an integer.\n";
328     return false;
329   }
330 
331   return true;
332 }
333 
334 // Unsets the environment variable |name| and returns true on success.
335 // Also returns true if the variable just doesn't exist.
UnsetEnvironmentVariableIfExists(const std::string & name)336 bool UnsetEnvironmentVariableIfExists(const std::string& name) {
337   std::unique_ptr<Environment> env(Environment::Create());
338   std::string str_val;
339   if (!env->GetVar(name, &str_val))
340     return true;
341   return env->UnSetVar(name);
342 }
343 
344 // Returns true if bot mode has been requested, i.e. defaults optimized
345 // for continuous integration bots. This way developers don't have to remember
346 // special command-line flags.
BotModeEnabled(const CommandLine * command_line)347 bool BotModeEnabled(const CommandLine* command_line) {
348   std::unique_ptr<Environment> env(Environment::Create());
349   return command_line->HasSwitch(switches::kTestLauncherBotMode) ||
350          env->HasVar("CHROMIUM_TEST_LAUNCHER_BOT_MODE");
351 }
352 
353 // Returns command line command line after gtest-specific processing
354 // and applying |wrapper|.
PrepareCommandLineForGTest(const CommandLine & command_line,const std::string & wrapper,const size_t retries_left)355 CommandLine PrepareCommandLineForGTest(const CommandLine& command_line,
356                                        const std::string& wrapper,
357                                        const size_t retries_left) {
358   CommandLine new_command_line(command_line.GetProgram());
359   CommandLine::SwitchMap switches = command_line.GetSwitches();
360 
361   // Handled by the launcher process.
362   switches.erase(kGTestRepeatFlag);
363   switches.erase(kIsolatedScriptTestRepeatFlag);
364 
365   // Don't try to write the final XML report in child processes.
366   switches.erase(kGTestOutputFlag);
367 
368 #if BUILDFLAG(IS_IOS)
369   // We only need the xctest flag for the parent process. Passing it to
370   // child processes will cause the tests not to run, so remove it.
371   switches.erase(switches::kEnableRunIOSUnittestsWithXCTest);
372 #endif
373 
374   if (switches.find(switches::kTestLauncherRetriesLeft) == switches.end()) {
375     switches[switches::kTestLauncherRetriesLeft] =
376 #if BUILDFLAG(IS_WIN)
377         base::NumberToWString(
378 #else
379         base::NumberToString(
380 #endif
381             retries_left);
382   }
383 
384   for (CommandLine::SwitchMap::const_iterator iter = switches.begin();
385        iter != switches.end(); ++iter) {
386     new_command_line.AppendSwitchNative((*iter).first, (*iter).second);
387   }
388 
389   // Prepend wrapper after last CommandLine quasi-copy operation. CommandLine
390   // does not really support removing switches well, and trying to do that
391   // on a CommandLine with a wrapper is known to break.
392   // TODO(phajdan.jr): Give it a try to support CommandLine removing switches.
393 #if BUILDFLAG(IS_WIN)
394   new_command_line.PrependWrapper(UTF8ToWide(wrapper));
395 #else
396   new_command_line.PrependWrapper(wrapper);
397 #endif
398 
399   return new_command_line;
400 }
401 
402 // Launches a child process using |command_line|. If a test is still running
403 // after |timeout|, the child process is terminated and |*was_timeout| is set to
404 // true. Returns exit code of the process.
LaunchChildTestProcessWithOptions(const CommandLine & command_line,const LaunchOptions & options,int flags,const FilePath & result_file,TimeDelta timeout_per_test,size_t num_tests,TestLauncherDelegate * delegate,bool * was_timeout)405 int LaunchChildTestProcessWithOptions(const CommandLine& command_line,
406                                       const LaunchOptions& options,
407                                       int flags,
408                                       const FilePath& result_file,
409                                       TimeDelta timeout_per_test,
410                                       size_t num_tests,
411                                       TestLauncherDelegate* delegate,
412                                       bool* was_timeout) {
413 #if BUILDFLAG(IS_POSIX)
414   // Make sure an option we rely on is present - see LaunchChildGTestProcess.
415   DCHECK(options.new_process_group);
416 #endif
417 
418   LaunchOptions new_options(options);
419 
420 #if BUILDFLAG(IS_WIN)
421   DCHECK(!new_options.job_handle);
422 
423   win::ScopedHandle job_handle;
424   if (flags & TestLauncher::USE_JOB_OBJECTS) {
425     job_handle.Set(CreateJobObject(NULL, NULL));
426     if (!job_handle.is_valid()) {
427       LOG(ERROR) << "Could not create JobObject.";
428       return -1;
429     }
430 
431     DWORD job_flags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
432 
433     if (!SetJobObjectLimitFlags(job_handle.get(), job_flags)) {
434       LOG(ERROR) << "Could not SetJobObjectLimitFlags.";
435       return -1;
436     }
437 
438     new_options.job_handle = job_handle.get();
439   }
440 #elif BUILDFLAG(IS_FUCHSIA)
441   DCHECK(!new_options.job_handle);
442 
443   // Set the clone policy, deliberately omitting FDIO_SPAWN_CLONE_NAMESPACE so
444   // that we can install a different /data.
445   new_options.spawn_flags = FDIO_SPAWN_CLONE_STDIO | FDIO_SPAWN_CLONE_JOB;
446 
447   const base::FilePath kDataPath(base::kPersistedDataDirectoryPath);
448   const base::FilePath kCachePath(base::kPersistedCacheDirectoryPath);
449 
450   // Clone all namespace entries from the current process, except /data and
451   // /cache, which are overridden below.
452   fdio_flat_namespace_t* flat_namespace = nullptr;
453   zx_status_t result = fdio_ns_export_root(&flat_namespace);
454   ZX_CHECK(ZX_OK == result, result) << "fdio_ns_export_root";
455   for (size_t i = 0; i < flat_namespace->count; ++i) {
456     base::FilePath path(flat_namespace->path[i]);
457     if (path == kDataPath || path == kCachePath) {
458       result = zx_handle_close(flat_namespace->handle[i]);
459       ZX_CHECK(ZX_OK == result, result) << "zx_handle_close";
460     } else {
461       new_options.paths_to_transfer.push_back(
462           {path, flat_namespace->handle[i]});
463     }
464   }
465   free(flat_namespace);
466 
467   zx::job job_handle;
468   result = zx::job::create(*GetDefaultJob(), 0, &job_handle);
469   ZX_CHECK(ZX_OK == result, result) << "zx_job_create";
470   new_options.job_handle = job_handle.get();
471 
472   // Give this test its own isolated /data directory by creating a new temporary
473   // subdirectory under data (/data/test-$PID) and binding paths under that to
474   // /data and /cache in the child process.
475   // Persistent data storage is mapped to /cache rather than system-provided
476   // cache storage, to avoid unexpected purges (see crbug.com/1242170).
477   CHECK(base::PathExists(kDataPath));
478 
479   // Create the test subdirectory with a name that is unique to the child test
480   // process (qualified by parent PID and an autoincrementing test process
481   // index).
482   static base::AtomicSequenceNumber child_launch_index;
483   const base::FilePath child_data_path = kDataPath.AppendASCII(
484       base::StringPrintf("test-%zu-%d", base::Process::Current().Pid(),
485                          child_launch_index.GetNext()));
486   CHECK(!base::DirectoryExists(child_data_path));
487   CHECK(base::CreateDirectory(child_data_path));
488   DCHECK(base::DirectoryExists(child_data_path));
489 
490   const base::FilePath test_data_dir(child_data_path.AppendASCII("data"));
491   CHECK(base::CreateDirectory(test_data_dir));
492   const base::FilePath test_cache_dir(child_data_path.AppendASCII("cache"));
493   CHECK(base::CreateDirectory(test_cache_dir));
494 
495   // Transfer handles to the new directories as /data and /cache in the child
496   // process' namespace.
497   new_options.paths_to_transfer.push_back(
498       {kDataPath,
499        base::OpenDirectoryHandle(test_data_dir).TakeChannel().release()});
500   new_options.paths_to_transfer.push_back(
501       {kCachePath,
502        base::OpenDirectoryHandle(test_cache_dir).TakeChannel().release()});
503 #endif  // BUILDFLAG(IS_FUCHSIA)
504 
505 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
506   // To prevent accidental privilege sharing to an untrusted child, processes
507   // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
508   // new child will be privileged and trusted.
509   new_options.allow_new_privs = true;
510 #endif
511 
512   Process process;
513 
514   {
515     // Note how we grab the lock before the process possibly gets created.
516     // This ensures that when the lock is held, ALL the processes are registered
517     // in the set.
518     AutoLock lock(*GetLiveProcessesLock());
519 
520 #if BUILDFLAG(IS_WIN)
521     // Allow the handle used to capture stdio and stdout to be inherited by the
522     // child. Note that this is done under GetLiveProcessesLock() to ensure that
523     // only the desired child receives the handle.
524     if (new_options.stdout_handle) {
525       ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT,
526                              HANDLE_FLAG_INHERIT);
527     }
528 #endif
529 
530     process = LaunchProcess(command_line, new_options);
531 
532 #if BUILDFLAG(IS_WIN)
533     // Revoke inheritance so that the handle isn't leaked into other children.
534     // Note that this is done under GetLiveProcessesLock() to ensure that only
535     // the desired child receives the handle.
536     if (new_options.stdout_handle)
537       ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT, 0);
538 #endif
539 
540     if (!process.IsValid())
541       return -1;
542 
543     // TODO(rvargas) crbug.com/417532: Don't store process handles.
544     GetLiveProcesses()->insert(std::make_pair(process.Handle(), command_line));
545   }
546 
547   int exit_code = 0;
548   bool did_exit = false;
549 
550   {
551     base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
552     if (num_tests == 1) {
553       did_exit = process.WaitForExitWithTimeout(timeout_per_test, &exit_code);
554     } else {
555       ProcessResultWatcher result_watcher(result_file, num_tests, process);
556       did_exit = result_watcher.PollUntilDone(timeout_per_test);
557       exit_code = result_watcher.GetExitCode();
558     }
559   }
560 
561   if (!did_exit) {
562     if (delegate)
563       delegate->OnTestTimedOut(command_line);
564 
565     *was_timeout = true;
566     exit_code = -1;  // Set a non-zero exit code to signal a failure.
567 
568     {
569       base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
570       // Ensure that the process terminates.
571       process.Terminate(-1, true);
572     }
573   }
574 
575 #if BUILDFLAG(IS_FUCHSIA)
576   zx_status_t wait_status = WaitForJobExit(job_handle);
577   if (wait_status != ZX_OK) {
578     LOG(ERROR) << "Batch leaked jobs or processes.";
579     exit_code = -1;
580   }
581 #endif  // BUILDFLAG(IS_FUCHSIA)
582 
583   {
584     // Note how we grab the log before issuing a possibly broad process kill.
585     // Other code parts that grab the log kill processes, so avoid trying
586     // to do that twice and trigger all kinds of log messages.
587     AutoLock lock(*GetLiveProcessesLock());
588 
589 #if BUILDFLAG(IS_FUCHSIA)
590     zx_status_t status = job_handle.kill();
591     ZX_CHECK(status == ZX_OK, status);
592 
593     // Cleanup the data directory.
594     CHECK(DeletePathRecursively(child_data_path));
595 #elif BUILDFLAG(IS_POSIX)
596     // It is not possible to waitpid() on any leaked sub-processes of the test
597     // batch process, since those are not direct children of this process.
598     // kill()ing the process-group will return a result indicating whether the
599     // group was found (i.e. processes were still running in it) or not (i.e.
600     // sub-processes had exited already). Unfortunately many tests (e.g. browser
601     // tests) have processes exit asynchronously, so checking the kill() result
602     // will report false failures.
603     // Unconditionally kill the process group, regardless of the batch exit-code
604     // until a better solution is available.
605     kill(-1 * process.Handle(), SIGKILL);
606 #endif  // BUILDFLAG(IS_POSIX)
607 
608     GetLiveProcesses()->erase(process.Handle());
609   }
610 
611   return exit_code;
612 }
613 
614 struct ChildProcessResults {
615   // Total time for DoLaunchChildTest Process to execute.
616   TimeDelta elapsed_time;
617   // If stdio is redirected, pass output file content.
618   std::string output_file_contents;
619   // True if child process timed out.
620   bool was_timeout = false;
621   // Exit code of child process.
622   int exit_code;
623   // Thread ID of the runner.
624   PlatformThreadId thread_id;
625   // The sequence number of the child test process executed.
626   // It's used instead of process id to distinguish processes that process id
627   // might be reused by OS.
628   int process_num;
629 };
630 
631 // Returns the path to a temporary directory within |task_temp_dir| for the
632 // child process of index |child_index|, or an empty FilePath if per-child temp
633 // dirs are not supported.
CreateChildTempDirIfSupported(const FilePath & task_temp_dir,int child_index)634 FilePath CreateChildTempDirIfSupported(const FilePath& task_temp_dir,
635                                        int child_index) {
636   if (!TestLauncher::SupportsPerChildTempDirs())
637     return FilePath();
638   FilePath child_temp = task_temp_dir.AppendASCII(NumberToString(child_index));
639   CHECK(CreateDirectoryAndGetError(child_temp, nullptr));
640   return child_temp;
641 }
642 
643 // Adds the platform-specific variable setting |temp_dir| as a process's
644 // temporary directory to |environment|.
SetTemporaryDirectory(const FilePath & temp_dir,EnvironmentMap * environment)645 void SetTemporaryDirectory(const FilePath& temp_dir,
646                            EnvironmentMap* environment) {
647 #if BUILDFLAG(IS_WIN)
648   environment->emplace(L"TMP", temp_dir.value());
649 #elif BUILDFLAG(IS_APPLE)
650   environment->emplace("MAC_CHROMIUM_TMPDIR", temp_dir.value());
651 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
652   environment->emplace("TMPDIR", temp_dir.value());
653 #endif
654 }
655 
656 // This launches the child test process, waits for it to complete,
657 // and returns child process results.
DoLaunchChildTestProcess(const CommandLine & command_line,const FilePath & process_temp_dir,const FilePath & result_file,TimeDelta timeout_per_test,size_t num_tests,const TestLauncher::LaunchOptions & test_launch_options,bool redirect_stdio,TestLauncherDelegate * delegate)658 ChildProcessResults DoLaunchChildTestProcess(
659     const CommandLine& command_line,
660     const FilePath& process_temp_dir,
661     const FilePath& result_file,
662     TimeDelta timeout_per_test,
663     size_t num_tests,
664     const TestLauncher::LaunchOptions& test_launch_options,
665     bool redirect_stdio,
666     TestLauncherDelegate* delegate) {
667   TimeTicks start_time = TimeTicks::Now();
668 
669   ChildProcessResults result;
670   result.thread_id = PlatformThread::CurrentId();
671 
672   ScopedFILE output_file;
673   FilePath output_filename;
674   if (redirect_stdio) {
675     output_file = CreateAndOpenTemporaryStream(&output_filename);
676     CHECK(output_file);
677 #if BUILDFLAG(IS_WIN)
678     // Paint the file so that it will be deleted when all handles are closed.
679     if (!FILEToFile(output_file.get()).DeleteOnClose(true)) {
680       PLOG(WARNING) << "Failed to mark " << output_filename.AsUTF8Unsafe()
681                     << " for deletion on close";
682     }
683 #endif
684   }
685 
686   LaunchOptions options;
687 
688   // Tell the child process to use its designated temporary directory.
689   if (!process_temp_dir.empty())
690     SetTemporaryDirectory(process_temp_dir, &options.environment);
691 #if BUILDFLAG(IS_WIN)
692 
693   options.inherit_mode = test_launch_options.inherit_mode;
694   options.handles_to_inherit = test_launch_options.handles_to_inherit;
695   if (redirect_stdio) {
696     HANDLE handle =
697         reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(output_file.get())));
698     CHECK_NE(INVALID_HANDLE_VALUE, handle);
699     options.stdin_handle = INVALID_HANDLE_VALUE;
700     options.stdout_handle = handle;
701     options.stderr_handle = handle;
702     // See LaunchOptions.stdout_handle comments for why this compares against
703     // FILE_TYPE_CHAR.
704     if (options.inherit_mode == base::LaunchOptions::Inherit::kSpecific &&
705         GetFileType(handle) != FILE_TYPE_CHAR) {
706       options.handles_to_inherit.push_back(handle);
707     }
708   }
709 
710 #else  // if !BUILDFLAG(IS_WIN)
711 
712   options.fds_to_remap = test_launch_options.fds_to_remap;
713   if (redirect_stdio) {
714     int output_file_fd = fileno(output_file.get());
715     CHECK_LE(0, output_file_fd);
716     options.fds_to_remap.push_back(
717         std::make_pair(output_file_fd, STDOUT_FILENO));
718     options.fds_to_remap.push_back(
719         std::make_pair(output_file_fd, STDERR_FILENO));
720   }
721 
722 #if !BUILDFLAG(IS_FUCHSIA)
723   options.new_process_group = true;
724 #endif
725 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
726   options.kill_on_parent_death = true;
727 #endif
728 
729 #endif  // !BUILDFLAG(IS_WIN)
730 
731   result.exit_code = LaunchChildTestProcessWithOptions(
732       command_line, options, test_launch_options.flags, result_file,
733       timeout_per_test, num_tests, delegate, &result.was_timeout);
734 
735   if (redirect_stdio) {
736     fflush(output_file.get());
737 
738     // Reading the file can sometimes fail when the process was killed midflight
739     // (e.g. on test suite timeout): https://crbug.com/826408. Attempt to read
740     // the output file anyways, but do not crash on failure in this case.
741     CHECK(ReadStreamToString(output_file.get(), &result.output_file_contents) ||
742           result.exit_code != 0);
743 
744     output_file.reset();
745 #if !BUILDFLAG(IS_WIN)
746     // On Windows, the reset() above is enough to delete the file since it was
747     // painted for such after being opened. Lesser platforms require an explicit
748     // delete now.
749     if (!DeleteFile(output_filename))
750       LOG(WARNING) << "Failed to delete " << output_filename.AsUTF8Unsafe();
751 #endif
752   }
753   result.elapsed_time = TimeTicks::Now() - start_time;
754   result.process_num = GetTestLauncherTracer()->RecordProcessExecution(
755       start_time, result.elapsed_time);
756   return result;
757 }
758 
ExtractTestsFromFilter(const std::string & filter,bool double_colon_supported)759 std::vector<std::string> ExtractTestsFromFilter(const std::string& filter,
760                                                 bool double_colon_supported) {
761   std::vector<std::string> tests;
762   if (double_colon_supported) {
763     tests =
764         SplitString(filter, "::", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
765   }
766   if (tests.size() <= 1) {
767     tests =
768         SplitString(filter, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
769   }
770   return tests;
771 }
772 
773 // A test runner object to run tests across a number of sequence runners,
774 // and control running pre tests in sequence.
775 class TestRunner {
776  public:
TestRunner(TestLauncher * launcher,size_t max_workers=1u,size_t batch_size=1u)777   explicit TestRunner(TestLauncher* launcher,
778                       size_t max_workers = 1u,
779                       size_t batch_size = 1u)
780       : launcher_(launcher),
781         max_workers_(max_workers),
782         batch_size_(batch_size) {}
783 
784   // Sets |test_names| to be run, with |batch_size| tests per process.
785   // Posts a job to run LaunchChildGTestProcess on |max_workers| workers.
786   void Run(const std::vector<std::string>& test_names);
787 
788  private:
789   // Called to check if the next batch has to run on the same
790   // sequence task runner and using the same temporary directory.
IsPreTestBatch(const std::vector<std::string> & test_names)791   static bool IsPreTestBatch(const std::vector<std::string>& test_names) {
792     return test_names.size() == 1u &&
793            test_names.front().find(kPreTestPrefix) != std::string::npos;
794   }
795 
IsSingleThreaded() const796   bool IsSingleThreaded() const { return batch_size_ == 0; }
797 
798   void WorkerTask(scoped_refptr<TaskRunner> main_task_runner,
799                   base::JobDelegate* delegate);
800 
GetMaxConcurrency(size_t worker_count)801   size_t GetMaxConcurrency(size_t worker_count) {
802     AutoLock auto_lock(lock_);
803     if (IsSingleThreaded()) {
804       return tests_to_run_.empty() ? 0 : 1;
805     }
806 
807     // Round up the division to ensure enough workers for all tests.
808     return std::min((tests_to_run_.size() + batch_size_ - 1) / batch_size_,
809                     max_workers_);
810   }
811 
GetNextBatch()812   std::vector<std::string> GetNextBatch() EXCLUSIVE_LOCKS_REQUIRED(lock_) {
813     size_t batch_size;
814     // Single threaded case runs all tests in one batch.
815     if (IsSingleThreaded()) {
816       batch_size = tests_to_run_.size();
817     }
818     // Run remaining tests up to |batch_size_|.
819     else {
820       batch_size = std::min(batch_size_, tests_to_run_.size());
821     }
822     std::vector<std::string> batch(tests_to_run_.rbegin(),
823                                    tests_to_run_.rbegin() + batch_size);
824     tests_to_run_.erase(tests_to_run_.end() - batch_size, tests_to_run_.end());
825     return batch;
826   }
827 
828   // Cleans up |task_temp_dir| from a previous task and quits |run_loop| if
829   // |done|.
830   void CleanupTask(base::ScopedTempDir task_temp_dir, bool done);
831 
832   // No-op error function that replaces libxml's default, which writes to
833   // stderr.
NullXmlErrorFunc(void * context,const char * message,...)834   static void NullXmlErrorFunc(void* context, const char* message, ...) {}
835 
836   ThreadChecker thread_checker_;
837 
838   const raw_ptr<TestLauncher> launcher_;
839   JobHandle job_handle_;
840   // Max number of workers to use.
841   const size_t max_workers_;
842   // Number of tests per process, 0 is special case for all tests.
843   const size_t batch_size_;
844   RunLoop run_loop_;
845   // Protects member used concurrently by worker tasks.
846   base::Lock lock_;
847   std::vector<std::string> tests_to_run_ GUARDED_BY(lock_);
848   // Set the global libxml error context and function pointer for the lifetime
849   // of this test runner.
850   ScopedXmlErrorFunc xml_error_func_{nullptr, &NullXmlErrorFunc};
851 
852   base::WeakPtrFactory<TestRunner> weak_ptr_factory_{this};
853 };
854 
Run(const std::vector<std::string> & test_names)855 void TestRunner::Run(const std::vector<std::string>& test_names) {
856   DCHECK(thread_checker_.CalledOnValidThread());
857   // No workers, fail immediately.
858   CHECK_GT(max_workers_, 0u);
859   if (test_names.empty()) {
860     return;
861   }
862 
863   {
864     AutoLock auto_lock(lock_);
865     tests_to_run_ = test_names;
866     // Reverse test order to avoid copying the whole vector when removing tests.
867     std::reverse(tests_to_run_.begin(), tests_to_run_.end());
868   }
869 
870   job_handle_ = base::PostJob(
871       FROM_HERE, {TaskPriority::USER_BLOCKING, MayBlock()},
872       BindRepeating(&TestRunner::WorkerTask, Unretained(this),
873                     SingleThreadTaskRunner::GetCurrentDefault()),
874       BindRepeating(&TestRunner::GetMaxConcurrency, Unretained(this)));
875 
876   run_loop_.Run();
877 }
878 
WorkerTask(scoped_refptr<TaskRunner> main_task_runner,base::JobDelegate * delegate)879 void TestRunner::WorkerTask(scoped_refptr<TaskRunner> main_task_runner,
880                             base::JobDelegate* delegate) {
881   bool done = false;
882   while (!done && !delegate->ShouldYield()) {
883     // Create a temporary directory for this task. This directory will hold the
884     // flags and results files for the child processes as well as their User
885     // Data dir, where appropriate. For platforms that support per-child temp
886     // dirs, this directory will also contain one subdirectory per child for
887     // that child's process-wide temp dir.
888     base::ScopedTempDir task_temp_dir;
889     CHECK(task_temp_dir.CreateUniqueTempDirUnderPath(GetTempDirForTesting()));
890     int child_index = 0;
891 
892     std::vector<std::vector<std::string>> batches;
893     {
894       AutoLock auto_lock(lock_);
895       if (!tests_to_run_.empty()) {
896         batches.push_back(GetNextBatch());
897         while (IsPreTestBatch(batches.back())) {
898           DCHECK(!tests_to_run_.empty());
899           batches.push_back(GetNextBatch());
900         }
901       }
902       done = tests_to_run_.empty();
903     }
904     for (const auto& batch : batches) {
905       launcher_->LaunchChildGTestProcess(
906           main_task_runner, batch, task_temp_dir.GetPath(),
907           CreateChildTempDirIfSupported(task_temp_dir.GetPath(),
908                                         child_index++));
909     }
910 
911     // Cleaning up test results is scheduled to |main_task_runner| because it
912     // must happen after all post processing step that was scheduled in
913     // LaunchChildGTestProcess to |main_task_runner|.
914     main_task_runner->PostTask(
915         FROM_HERE,
916         BindOnce(&TestRunner::CleanupTask, weak_ptr_factory_.GetWeakPtr(),
917                  std::move(task_temp_dir), done));
918   }
919 }
920 
CleanupTask(base::ScopedTempDir task_temp_dir,bool done)921 void TestRunner::CleanupTask(base::ScopedTempDir task_temp_dir, bool done) {
922   DCHECK(thread_checker_.CalledOnValidThread());
923 
924   // delete previous temporary directory
925   if (!task_temp_dir.Delete()) {
926     // This needs to be non-fatal at least for Windows.
927     LOG(WARNING) << "Failed to delete "
928                  << task_temp_dir.GetPath().AsUTF8Unsafe();
929   }
930 
931   if (!done) {
932     return;
933   }
934 
935   if (job_handle_) {
936     job_handle_.Cancel();
937     run_loop_.QuitWhenIdle();
938   }
939 }
940 
941 // Returns the number of files and directories in |dir|, or 0 if |dir| is empty.
CountItemsInDirectory(const FilePath & dir)942 int CountItemsInDirectory(const FilePath& dir) {
943   if (dir.empty())
944     return 0;
945   int items = 0;
946   FileEnumerator file_enumerator(
947       dir, /*recursive=*/false,
948       FileEnumerator::FILES | FileEnumerator::DIRECTORIES);
949   for (FilePath name = file_enumerator.Next(); !name.empty();
950        name = file_enumerator.Next()) {
951     ++items;
952   }
953   return items;
954 }
955 
956 // Truncates a snippet in the middle to the given byte limit. byte_limit should
957 // be at least 30.
TruncateSnippet(const base::StringPiece snippet,size_t byte_limit)958 std::string TruncateSnippet(const base::StringPiece snippet,
959                             size_t byte_limit) {
960   if (snippet.length() <= byte_limit) {
961     return std::string(snippet);
962   }
963   std::string truncation_message =
964       StringPrintf("\n<truncated (%zu bytes)>\n", snippet.length());
965   if (truncation_message.length() > byte_limit) {
966     // Fail gracefully.
967     return truncation_message;
968   }
969   size_t remaining_limit = byte_limit - truncation_message.length();
970   size_t first_half = remaining_limit / 2;
971   return base::StrCat(
972       {snippet.substr(0, first_half), truncation_message,
973        snippet.substr(snippet.length() - (remaining_limit - first_half))});
974 }
975 
976 }  // namespace
977 
978 const char kGTestBreakOnFailure[] = "gtest_break_on_failure";
979 const char kGTestFilterFlag[] = "gtest_filter";
980 const char kGTestFlagfileFlag[] = "gtest_flagfile";
981 const char kGTestHelpFlag[]   = "gtest_help";
982 const char kGTestListTestsFlag[] = "gtest_list_tests";
983 const char kGTestRepeatFlag[] = "gtest_repeat";
984 const char kGTestRunDisabledTestsFlag[] = "gtest_also_run_disabled_tests";
985 const char kGTestOutputFlag[] = "gtest_output";
986 const char kGTestShuffleFlag[] = "gtest_shuffle";
987 const char kGTestRandomSeedFlag[] = "gtest_random_seed";
988 const char kIsolatedScriptRunDisabledTestsFlag[] =
989     "isolated-script-test-also-run-disabled-tests";
990 const char kIsolatedScriptTestFilterFlag[] = "isolated-script-test-filter";
991 const char kIsolatedScriptTestRepeatFlag[] = "isolated-script-test-repeat";
992 
993 class TestLauncher::TestInfo {
994  public:
995   TestInfo() = default;
996   TestInfo(const TestInfo& other) = default;
997   TestInfo(const TestIdentifier& test_id);
998   ~TestInfo() = default;
999 
1000   // Returns test name excluding DISABLE_ prefix.
1001   std::string GetDisabledStrippedName() const;
1002 
1003   // Returns full test name.
1004   std::string GetFullName() const;
1005 
1006   // Returns test name with PRE_ prefix added, excluding DISABLE_ prefix.
1007   std::string GetPreName() const;
1008 
1009   // Returns test name excluding DISABLED_ and PRE_ prefixes.
1010   std::string GetPrefixStrippedName() const;
1011 
test_case_name() const1012   const std::string& test_case_name() const { return test_case_name_; }
test_name() const1013   const std::string& test_name() const { return test_name_; }
file() const1014   const std::string& file() const { return file_; }
line() const1015   int line() const { return line_; }
disabled() const1016   bool disabled() const { return disabled_; }
pre_test() const1017   bool pre_test() const { return pre_test_; }
1018 
1019  private:
1020   std::string test_case_name_;
1021   std::string test_name_;
1022   std::string file_;
1023   int line_;
1024   bool disabled_;
1025   bool pre_test_;
1026 };
1027 
TestInfo(const TestIdentifier & test_id)1028 TestLauncher::TestInfo::TestInfo(const TestIdentifier& test_id)
1029     : test_case_name_(test_id.test_case_name),
1030       test_name_(test_id.test_name),
1031       file_(test_id.file),
1032       line_(test_id.line),
1033       disabled_(false),
1034       pre_test_(false) {
1035   disabled_ = GetFullName().find(kDisabledTestPrefix) != std::string::npos;
1036   pre_test_ = test_name_.find(kPreTestPrefix) != std::string::npos;
1037 }
1038 
GetDisabledStrippedName() const1039 std::string TestLauncher::TestInfo::GetDisabledStrippedName() const {
1040   std::string test_name = GetFullName();
1041   ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
1042                                std::string());
1043   return test_name;
1044 }
1045 
GetFullName() const1046 std::string TestLauncher::TestInfo::GetFullName() const {
1047   return FormatFullTestName(test_case_name_, test_name_);
1048 }
1049 
GetPreName() const1050 std::string TestLauncher::TestInfo::GetPreName() const {
1051   std::string name = test_name_;
1052   ReplaceSubstringsAfterOffset(&name, 0, kDisabledTestPrefix, std::string());
1053   std::string case_name = test_case_name_;
1054   ReplaceSubstringsAfterOffset(&case_name, 0, kDisabledTestPrefix,
1055                                std::string());
1056   return FormatFullTestName(case_name, kPreTestPrefix + name);
1057 }
1058 
GetPrefixStrippedName() const1059 std::string TestLauncher::TestInfo::GetPrefixStrippedName() const {
1060   std::string test_name = GetDisabledStrippedName();
1061   ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
1062   return test_name;
1063 }
1064 
1065 TestLauncherDelegate::~TestLauncherDelegate() = default;
1066 
ShouldRunTest(const TestIdentifier & test)1067 bool TestLauncherDelegate::ShouldRunTest(const TestIdentifier& test) {
1068   return true;
1069 }
1070 
1071 TestLauncher::LaunchOptions::LaunchOptions() = default;
1072 TestLauncher::LaunchOptions::LaunchOptions(const LaunchOptions& other) =
1073     default;
1074 TestLauncher::LaunchOptions::~LaunchOptions() = default;
1075 
TestLauncher(TestLauncherDelegate * launcher_delegate,size_t parallel_jobs,size_t retry_limit)1076 TestLauncher::TestLauncher(TestLauncherDelegate* launcher_delegate,
1077                            size_t parallel_jobs,
1078                            size_t retry_limit)
1079     : launcher_delegate_(launcher_delegate),
1080       total_shards_(1),
1081       shard_index_(0),
1082       cycles_(1),
1083       broken_threshold_(0),
1084       test_started_count_(0),
1085       test_finished_count_(0),
1086       test_success_count_(0),
1087       test_broken_count_(0),
1088       retries_left_(0),
1089       retry_limit_(retry_limit),
1090       output_bytes_limit_(kOutputSnippetBytesLimit),
1091       force_run_broken_tests_(false),
1092       watchdog_timer_(FROM_HERE,
1093                       kOutputTimeout,
1094                       this,
1095                       &TestLauncher::OnOutputTimeout),
1096       parallel_jobs_(parallel_jobs),
1097       print_test_stdio_(AUTO) {}
1098 
~TestLauncher()1099 TestLauncher::~TestLauncher() {
1100   if (base::ThreadPoolInstance::Get()) {
1101     // Clear the ThreadPoolInstance entirely to make it clear to final cleanup
1102     // phases that they are happening in a single-threaded phase. Assertions in
1103     // code like ~ScopedFeatureList are unhappy otherwise (crbug.com/1359095).
1104     base::ThreadPoolInstance::Get()->Shutdown();
1105     base::ThreadPoolInstance::Get()->JoinForTesting();
1106     base::ThreadPoolInstance::Set(nullptr);
1107   }
1108 }
1109 
Run(CommandLine * command_line)1110 bool TestLauncher::Run(CommandLine* command_line) {
1111   base::PlatformThread::SetName("TestLauncherMain");
1112 
1113   if (!Init((command_line == nullptr) ? CommandLine::ForCurrentProcess()
1114                                       : command_line))
1115     return false;
1116 
1117 #if BUILDFLAG(IS_POSIX)
1118   CHECK_EQ(0, pipe(g_shutdown_pipe));
1119 
1120   struct sigaction action;
1121   memset(&action, 0, sizeof(action));
1122   sigemptyset(&action.sa_mask);
1123   action.sa_handler = &ShutdownPipeSignalHandler;
1124 
1125   CHECK_EQ(0, sigaction(SIGINT, &action, nullptr));
1126   CHECK_EQ(0, sigaction(SIGQUIT, &action, nullptr));
1127   CHECK_EQ(0, sigaction(SIGTERM, &action, nullptr));
1128 
1129   auto controller = base::FileDescriptorWatcher::WatchReadable(
1130       g_shutdown_pipe[0],
1131       base::BindRepeating(&TestLauncher::OnShutdownPipeReadable,
1132                           Unretained(this)));
1133 #endif  // BUILDFLAG(IS_POSIX)
1134 
1135   // Start the watchdog timer.
1136   watchdog_timer_.Reset();
1137 
1138   // Indicate a test did not succeed.
1139   bool test_failed = false;
1140   int iterations = cycles_;
1141   if (cycles_ > 1 && !stop_on_failure_) {
1142     // If we don't stop on failure, execute all the repeats in all iteration,
1143     // which allows us to parallelize the execution.
1144     iterations = 1;
1145     repeats_per_iteration_ = cycles_;
1146   }
1147   // Set to false if any iteration fails.
1148   bool run_result = true;
1149 
1150   while ((iterations > 0 || iterations == -1) &&
1151          !(stop_on_failure_ && test_failed)) {
1152     OnTestIterationStart();
1153 
1154     RunTests();
1155     bool retry_result = RunRetryTests();
1156     // Signal failure, but continue to run all requested test iterations.
1157     // With the summary of all iterations at the end this is a good default.
1158     run_result = run_result && retry_result;
1159 
1160     if (retry_result) {
1161       fprintf(stdout, "SUCCESS: all tests passed.\n");
1162       fflush(stdout);
1163     }
1164 
1165     test_failed = test_success_count_ != test_finished_count_;
1166     OnTestIterationFinished();
1167     // Special value "-1" means "repeat indefinitely".
1168     iterations = (iterations == -1) ? iterations : iterations - 1;
1169   }
1170 
1171   if (cycles_ != 1)
1172     results_tracker_.PrintSummaryOfAllIterations();
1173 
1174   MaybeSaveSummaryAsJSON(std::vector<std::string>());
1175 
1176   return run_result;
1177 }
1178 
LaunchChildGTestProcess(scoped_refptr<TaskRunner> task_runner,const std::vector<std::string> & test_names,const FilePath & task_temp_dir,const FilePath & child_temp_dir)1179 void TestLauncher::LaunchChildGTestProcess(
1180     scoped_refptr<TaskRunner> task_runner,
1181     const std::vector<std::string>& test_names,
1182     const FilePath& task_temp_dir,
1183     const FilePath& child_temp_dir) {
1184   FilePath result_file;
1185   CommandLine cmd_line = launcher_delegate_->GetCommandLine(
1186       test_names, task_temp_dir, &result_file);
1187 
1188   // Record the exact command line used to launch the child.
1189   CommandLine new_command_line(PrepareCommandLineForGTest(
1190       cmd_line, launcher_delegate_->GetWrapper(), retries_left_));
1191   LaunchOptions options;
1192   options.flags = launcher_delegate_->GetLaunchOptions();
1193 
1194   ChildProcessResults process_results = DoLaunchChildTestProcess(
1195       new_command_line, child_temp_dir, result_file,
1196       launcher_delegate_->GetTimeout(), test_names.size(), options,
1197       redirect_stdio_, launcher_delegate_);
1198 
1199   // Invoke ProcessTestResults on the original thread, not
1200   // on a worker pool thread.
1201   task_runner->PostTask(
1202       FROM_HERE,
1203       BindOnce(&TestLauncher::ProcessTestResults, Unretained(this), test_names,
1204                result_file, process_results.output_file_contents,
1205                process_results.elapsed_time, process_results.exit_code,
1206                process_results.was_timeout, process_results.thread_id,
1207                process_results.process_num,
1208                CountItemsInDirectory(child_temp_dir)));
1209 }
1210 
1211 // Determines which result status will be assigned for missing test results.
MissingResultStatus(size_t tests_to_run_count,bool was_timeout,bool exit_code)1212 TestResult::Status MissingResultStatus(size_t tests_to_run_count,
1213                                        bool was_timeout,
1214                                        bool exit_code) {
1215   // There is more than one test, cannot assess status.
1216   if (tests_to_run_count > 1u)
1217     return TestResult::TEST_SKIPPED;
1218 
1219   // There is only one test and no results.
1220   // Try to determine status by timeout or exit code.
1221   if (was_timeout)
1222     return TestResult::TEST_TIMEOUT;
1223   if (exit_code != 0)
1224     return TestResult::TEST_FAILURE;
1225 
1226   // It's strange case when test executed successfully,
1227   // but we failed to read machine-readable report for it.
1228   return TestResult::TEST_UNKNOWN;
1229 }
1230 
1231 // Returns interpreted test results.
ProcessTestResults(const std::vector<std::string> & test_names,const FilePath & result_file,const std::string & output,TimeDelta elapsed_time,int exit_code,bool was_timeout,PlatformThreadId thread_id,int process_num,int leaked_items)1232 void TestLauncher::ProcessTestResults(
1233     const std::vector<std::string>& test_names,
1234     const FilePath& result_file,
1235     const std::string& output,
1236     TimeDelta elapsed_time,
1237     int exit_code,
1238     bool was_timeout,
1239     PlatformThreadId thread_id,
1240     int process_num,
1241     int leaked_items) {
1242   std::vector<TestResult> test_results;
1243   bool crashed = false;
1244   bool have_test_results =
1245       ProcessGTestOutput(result_file, &test_results, &crashed);
1246 
1247   if (!have_test_results) {
1248     // We do not have reliable details about test results (parsing test
1249     // stdout is known to be unreliable).
1250     LOG(ERROR) << "Failed to get out-of-band test success data, "
1251                   "dumping full stdio below:\n"
1252                << output << "\n";
1253     // This is odd, but sometimes ProcessGtestOutput returns
1254     // false, but TestResults is not empty.
1255     test_results.clear();
1256   }
1257 
1258   TestResult::Status missing_result_status =
1259       MissingResultStatus(test_names.size(), was_timeout, exit_code);
1260 
1261   // TODO(phajdan.jr): Check for duplicates and mismatches between
1262   // the results we got from XML file and tests we intended to run.
1263   std::map<std::string, TestResult> results_map;
1264   for (const auto& i : test_results)
1265     results_map[i.full_name] = i;
1266 
1267   // Results to be reported back to the test launcher.
1268   std::vector<TestResult> final_results;
1269 
1270   for (const auto& i : test_names) {
1271     if (Contains(results_map, i)) {
1272       TestResult test_result = results_map[i];
1273       // Fix up the test status: we forcibly kill the child process
1274       // after the timeout, so from XML results it looks just like
1275       // a crash.
1276       if ((was_timeout && test_result.status == TestResult::TEST_CRASH) ||
1277           // If we run multiple tests in a batch with a timeout applied
1278           // to the entire batch. It is possible that with other tests
1279           // running quickly some tests take longer than the per-test timeout.
1280           // For consistent handling of tests independent of order and other
1281           // factors, mark them as timing out.
1282           test_result.elapsed_time > launcher_delegate_->GetTimeout()) {
1283         test_result.status = TestResult::TEST_TIMEOUT;
1284       }
1285       final_results.push_back(test_result);
1286     } else {
1287       // TODO(phajdan.jr): Explicitly pass the info that the test didn't
1288       // run for a mysterious reason.
1289       LOG(ERROR) << "no test result for " << i;
1290       TestResult test_result;
1291       test_result.full_name = i;
1292       test_result.status = missing_result_status;
1293       final_results.push_back(test_result);
1294     }
1295   }
1296   // TODO(phajdan.jr): Handle the case where processing XML output
1297   // indicates a crash but none of the test results is marked as crashing.
1298 
1299   bool has_non_success_test = false;
1300   for (const auto& i : final_results) {
1301     if (i.status != TestResult::TEST_SUCCESS) {
1302       has_non_success_test = true;
1303       break;
1304     }
1305   }
1306 
1307   if (!has_non_success_test && exit_code != 0) {
1308     // This is a bit surprising case: all tests are marked as successful,
1309     // but the exit code was not zero. This can happen e.g. under memory
1310     // tools that report leaks this way. Mark all tests as a failure on exit,
1311     // and for more precise info they'd need to be retried serially.
1312     for (auto& i : final_results)
1313       i.status = TestResult::TEST_FAILURE_ON_EXIT;
1314   }
1315 
1316   for (auto& i : final_results) {
1317     // Fix the output snippet after possible changes to the test result.
1318     i.output_snippet = GetTestOutputSnippet(i, output);
1319     // The thread id injected here is the worker thread that launching the child
1320     // testing process, it might be different from the current thread that
1321     // ProcessTestResults.
1322     i.thread_id = thread_id;
1323     i.process_num = process_num;
1324   }
1325 
1326   if (leaked_items)
1327     results_tracker_.AddLeakedItems(leaked_items, test_names);
1328 
1329   launcher_delegate_->ProcessTestResults(final_results, elapsed_time);
1330 
1331   for (const auto& result : final_results)
1332     OnTestFinished(result);
1333 }
1334 
OnTestFinished(const TestResult & original_result)1335 void TestLauncher::OnTestFinished(const TestResult& original_result) {
1336   ++test_finished_count_;
1337 
1338   TestResult result(original_result);
1339 
1340   if (result.output_snippet.length() > output_bytes_limit_) {
1341     if (result.status == TestResult::TEST_SUCCESS)
1342       result.status = TestResult::TEST_EXCESSIVE_OUTPUT;
1343 
1344     result.output_snippet =
1345         TruncateSnippetFocused(result.output_snippet, output_bytes_limit_);
1346   }
1347 
1348   bool print_snippet = false;
1349   if (print_test_stdio_ == AUTO) {
1350     print_snippet = (result.status != TestResult::TEST_SUCCESS);
1351   } else if (print_test_stdio_ == ALWAYS) {
1352     print_snippet = true;
1353   } else if (print_test_stdio_ == NEVER) {
1354     print_snippet = false;
1355   }
1356   if (print_snippet) {
1357     std::vector<base::StringPiece> snippet_lines =
1358         SplitStringPiece(result.output_snippet, "\n", base::KEEP_WHITESPACE,
1359                          base::SPLIT_WANT_ALL);
1360     if (snippet_lines.size() > kOutputSnippetLinesLimit) {
1361       size_t truncated_size = snippet_lines.size() - kOutputSnippetLinesLimit;
1362       snippet_lines.erase(
1363           snippet_lines.begin(),
1364           snippet_lines.begin() + truncated_size);
1365       snippet_lines.insert(snippet_lines.begin(), "<truncated>");
1366     }
1367     fprintf(stdout, "%s", base::JoinString(snippet_lines, "\n").c_str());
1368     fflush(stdout);
1369   }
1370 
1371   if (result.status == TestResult::TEST_SUCCESS) {
1372     ++test_success_count_;
1373   } else {
1374     // Records prefix stripped name to run all dependent tests.
1375     std::string test_name(result.full_name);
1376     ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
1377     ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
1378                                  std::string());
1379     tests_to_retry_.insert(test_name);
1380   }
1381 
1382   // There are no results for this tests,
1383   // most likley due to another test failing in the same batch.
1384   if (result.status != TestResult::TEST_SKIPPED)
1385     results_tracker_.AddTestResult(result);
1386 
1387   // TODO(phajdan.jr): Align counter (padding).
1388   std::string status_line(StringPrintf("[%zu/%zu] %s ", test_finished_count_,
1389                                        test_started_count_,
1390                                        result.full_name.c_str()));
1391   if (result.completed()) {
1392     status_line.append(StringPrintf("(%" PRId64 " ms)",
1393                                     result.elapsed_time.InMilliseconds()));
1394   } else if (result.status == TestResult::TEST_TIMEOUT) {
1395     status_line.append("(TIMED OUT)");
1396   } else if (result.status == TestResult::TEST_CRASH) {
1397     status_line.append("(CRASHED)");
1398   } else if (result.status == TestResult::TEST_SKIPPED) {
1399     status_line.append("(SKIPPED)");
1400   } else if (result.status == TestResult::TEST_UNKNOWN) {
1401     status_line.append("(UNKNOWN)");
1402   } else {
1403     // Fail very loudly so it's not ignored.
1404     CHECK(false) << "Unhandled test result status: " << result.status;
1405   }
1406   fprintf(stdout, "%s\n", status_line.c_str());
1407   fflush(stdout);
1408 
1409   if (CommandLine::ForCurrentProcess()->HasSwitch(
1410           switches::kTestLauncherPrintTimestamps)) {
1411     ::logging::ScopedLoggingSettings scoped_logging_setting;
1412     ::logging::SetLogItems(true, true, true, true);
1413     LOG(INFO) << "Test_finished_timestamp";
1414   }
1415   // We just printed a status line, reset the watchdog timer.
1416   watchdog_timer_.Reset();
1417 
1418   // Do not waste time on timeouts.
1419   if (result.status == TestResult::TEST_TIMEOUT) {
1420     test_broken_count_++;
1421   }
1422   if (!force_run_broken_tests_ && test_broken_count_ >= broken_threshold_) {
1423     fprintf(stdout, "Too many badly broken tests (%zu), exiting now.\n",
1424             test_broken_count_);
1425     fflush(stdout);
1426 
1427 #if BUILDFLAG(IS_POSIX)
1428     KillSpawnedTestProcesses();
1429 #endif  // BUILDFLAG(IS_POSIX)
1430 
1431     MaybeSaveSummaryAsJSON({"BROKEN_TEST_EARLY_EXIT"});
1432 
1433     exit(1);
1434   }
1435 }
1436 
1437 // Helper used to parse test filter files. Syntax is documented in
1438 // //testing/buildbot/filters/README.md .
LoadFilterFile(const FilePath & file_path,std::vector<std::string> * positive_filter,std::vector<std::string> * negative_filter)1439 bool LoadFilterFile(const FilePath& file_path,
1440                     std::vector<std::string>* positive_filter,
1441                     std::vector<std::string>* negative_filter) {
1442   std::string file_content;
1443   if (!ReadFileToString(file_path, &file_content)) {
1444     LOG(ERROR) << "Failed to read the filter file.";
1445     return false;
1446   }
1447 
1448   std::vector<std::string> filter_lines = SplitString(
1449       file_content, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
1450   int line_num = 0;
1451   for (const std::string& filter_line : filter_lines) {
1452     line_num++;
1453 
1454     size_t hash_pos = filter_line.find('#');
1455 
1456     // In case when # symbol is not in the beginning of the line and is not
1457     // proceeded with a space then it's likely that the comment was
1458     // unintentional.
1459     if (hash_pos != std::string::npos && hash_pos > 0 &&
1460         filter_line[hash_pos - 1] != ' ') {
1461       LOG(WARNING) << "Content of line " << line_num << " in " << file_path
1462                    << " after # is treated as a comment, " << filter_line;
1463     }
1464 
1465     // Strip comments and whitespace from each line.
1466     std::string trimmed_line(
1467         TrimWhitespaceASCII(filter_line.substr(0, hash_pos), TRIM_ALL));
1468 
1469     if (trimmed_line.substr(0, 2) == "//") {
1470       LOG(ERROR) << "Line " << line_num << " in " << file_path
1471                  << " starts with //, use # for comments.";
1472       return false;
1473     }
1474 
1475     // Treat a line starting with '//' as a comment.
1476     if (trimmed_line.empty())
1477       continue;
1478 
1479     if (trimmed_line[0] == '-')
1480       negative_filter->push_back(trimmed_line.substr(1));
1481     else
1482       positive_filter->push_back(trimmed_line);
1483   }
1484 
1485   return true;
1486 }
1487 
Init(CommandLine * command_line)1488 bool TestLauncher::Init(CommandLine* command_line) {
1489   // Initialize sharding. Command line takes precedence over legacy environment
1490   // variables.
1491   if (command_line->HasSwitch(switches::kTestLauncherTotalShards) &&
1492       command_line->HasSwitch(switches::kTestLauncherShardIndex)) {
1493     if (!StringToInt(
1494             command_line->GetSwitchValueASCII(
1495                 switches::kTestLauncherTotalShards),
1496             &total_shards_)) {
1497       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherTotalShards;
1498       return false;
1499     }
1500     if (!StringToInt(
1501             command_line->GetSwitchValueASCII(
1502                 switches::kTestLauncherShardIndex),
1503             &shard_index_)) {
1504       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherShardIndex;
1505       return false;
1506     }
1507     fprintf(stdout,
1508             "Using sharding settings from command line. This is shard %d/%d\n",
1509             shard_index_, total_shards_);
1510     fflush(stdout);
1511   } else {
1512     if (!TakeInt32FromEnvironment(kTestTotalShards, &total_shards_))
1513       return false;
1514     if (!TakeInt32FromEnvironment(kTestShardIndex, &shard_index_))
1515       return false;
1516     fprintf(stdout,
1517             "Using sharding settings from environment. This is shard %d/%d\n",
1518             shard_index_, total_shards_);
1519     fflush(stdout);
1520   }
1521   if (shard_index_ < 0 ||
1522       total_shards_ < 0 ||
1523       shard_index_ >= total_shards_) {
1524     LOG(ERROR) << "Invalid sharding settings: we require 0 <= "
1525                << kTestShardIndex << " < " << kTestTotalShards
1526                << ", but you have " << kTestShardIndex << "=" << shard_index_
1527                << ", " << kTestTotalShards << "=" << total_shards_ << ".\n";
1528     return false;
1529   }
1530 
1531   // Make sure we don't pass any sharding-related environment to the child
1532   // processes. This test launcher implements the sharding completely.
1533   CHECK(UnsetEnvironmentVariableIfExists("GTEST_TOTAL_SHARDS"));
1534   CHECK(UnsetEnvironmentVariableIfExists("GTEST_SHARD_INDEX"));
1535 
1536   if (command_line->HasSwitch(kGTestRepeatFlag) &&
1537       !StringToInt(command_line->GetSwitchValueASCII(kGTestRepeatFlag),
1538                    &cycles_)) {
1539     LOG(ERROR) << "Invalid value for " << kGTestRepeatFlag;
1540     return false;
1541   }
1542   if (command_line->HasSwitch(kIsolatedScriptTestRepeatFlag) &&
1543       !StringToInt(
1544           command_line->GetSwitchValueASCII(kIsolatedScriptTestRepeatFlag),
1545           &cycles_)) {
1546     LOG(ERROR) << "Invalid value for " << kIsolatedScriptTestRepeatFlag;
1547     return false;
1548   }
1549 
1550   if (command_line->HasSwitch(switches::kTestLauncherRetryLimit)) {
1551     int retry_limit = -1;
1552     if (!StringToInt(command_line->GetSwitchValueASCII(
1553                          switches::kTestLauncherRetryLimit), &retry_limit) ||
1554         retry_limit < 0) {
1555       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherRetryLimit;
1556       return false;
1557     }
1558 
1559     retry_limit_ = retry_limit;
1560   } else if (command_line->HasSwitch(
1561                  switches::kIsolatedScriptTestLauncherRetryLimit)) {
1562     int retry_limit = -1;
1563     if (!StringToInt(command_line->GetSwitchValueASCII(
1564                          switches::kIsolatedScriptTestLauncherRetryLimit),
1565                      &retry_limit) ||
1566         retry_limit < 0) {
1567       LOG(ERROR) << "Invalid value for "
1568                  << switches::kIsolatedScriptTestLauncherRetryLimit;
1569       return false;
1570     }
1571 
1572     retry_limit_ = retry_limit;
1573   } else if (command_line->HasSwitch(kGTestRepeatFlag) ||
1574              command_line->HasSwitch(kGTestBreakOnFailure)) {
1575     // If we are repeating tests or waiting for the first test to fail, disable
1576     // retries.
1577     retry_limit_ = 0U;
1578   } else if (!BotModeEnabled(command_line) &&
1579              (command_line->HasSwitch(kGTestFilterFlag) ||
1580               command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
1581     // No retry flag specified, not in bot mode and filtered by flag
1582     // Set reties to zero
1583     retry_limit_ = 0U;
1584   }
1585 
1586   retries_left_ = retry_limit_;
1587   force_run_broken_tests_ =
1588       command_line->HasSwitch(switches::kTestLauncherForceRunBrokenTests);
1589 
1590   if (command_line->HasSwitch(switches::kTestLauncherOutputBytesLimit)) {
1591     int output_bytes_limit = -1;
1592     if (!StringToInt(command_line->GetSwitchValueASCII(
1593                          switches::kTestLauncherOutputBytesLimit),
1594                      &output_bytes_limit) ||
1595         output_bytes_limit < 0) {
1596       LOG(ERROR) << "Invalid value for "
1597                  << switches::kTestLauncherOutputBytesLimit;
1598       return false;
1599     }
1600 
1601     output_bytes_limit_ = output_bytes_limit;
1602   }
1603 
1604   fprintf(stdout, "Using %zu parallel jobs.\n", parallel_jobs_);
1605   fflush(stdout);
1606 
1607   CreateAndStartThreadPool(parallel_jobs_);
1608 
1609   std::vector<std::string> positive_file_filter;
1610   std::vector<std::string> positive_gtest_filter;
1611 
1612   if (command_line->HasSwitch(switches::kTestLauncherFilterFile)) {
1613     auto filter =
1614         command_line->GetSwitchValueNative(switches::kTestLauncherFilterFile);
1615     for (auto filter_file :
1616          SplitStringPiece(filter, FILE_PATH_LITERAL(";"), base::TRIM_WHITESPACE,
1617                           base::SPLIT_WANT_ALL)) {
1618       base::FilePath filter_file_path =
1619           base::MakeAbsoluteFilePath(FilePath(filter_file));
1620       if (!LoadFilterFile(filter_file_path, &positive_file_filter,
1621                           &negative_test_filter_))
1622         return false;
1623     }
1624   }
1625 
1626   // If kGTestRunDisabledTestsFlag is set, force running all negative
1627   // tests in testing/buildbot/filters.
1628   if (command_line->HasSwitch(kGTestRunDisabledTestsFlag)) {
1629     negative_test_filter_.clear();
1630   }
1631 
1632   // Split --gtest_filter at '-', if there is one, to separate into
1633   // positive filter and negative filter portions.
1634   bool double_colon_supported = !command_line->HasSwitch(kGTestFilterFlag);
1635   std::string filter = command_line->GetSwitchValueASCII(
1636       double_colon_supported ? kIsolatedScriptTestFilterFlag
1637                              : kGTestFilterFlag);
1638   size_t dash_pos = filter.find('-');
1639   if (dash_pos == std::string::npos) {
1640     positive_gtest_filter =
1641         ExtractTestsFromFilter(filter, double_colon_supported);
1642   } else {
1643     // Everything up to the dash.
1644     positive_gtest_filter = ExtractTestsFromFilter(filter.substr(0, dash_pos),
1645                                                    double_colon_supported);
1646 
1647     // Everything after the dash.
1648     for (std::string pattern : ExtractTestsFromFilter(
1649              filter.substr(dash_pos + 1), double_colon_supported)) {
1650       negative_test_filter_.push_back(pattern);
1651     }
1652   }
1653 
1654   skip_disabled_tests_ =
1655       !command_line->HasSwitch(kGTestRunDisabledTestsFlag) &&
1656       !command_line->HasSwitch(kIsolatedScriptRunDisabledTestsFlag);
1657 
1658   if (!InitTests())
1659     return false;
1660 
1661   if (!ShuffleTests(command_line))
1662     return false;
1663 
1664   if (!ProcessAndValidateTests())
1665     return false;
1666 
1667   if (command_line->HasSwitch(switches::kTestLauncherPrintTestStdio)) {
1668     std::string print_test_stdio = command_line->GetSwitchValueASCII(
1669         switches::kTestLauncherPrintTestStdio);
1670     if (print_test_stdio == "auto") {
1671       print_test_stdio_ = AUTO;
1672     } else if (print_test_stdio == "always") {
1673       print_test_stdio_ = ALWAYS;
1674     } else if (print_test_stdio == "never") {
1675       print_test_stdio_ = NEVER;
1676     } else {
1677       LOG(WARNING) << "Invalid value of "
1678                    << switches::kTestLauncherPrintTestStdio << ": "
1679                    << print_test_stdio;
1680       return false;
1681     }
1682   }
1683 
1684   stop_on_failure_ = command_line->HasSwitch(kGTestBreakOnFailure);
1685 
1686   if (command_line->HasSwitch(switches::kTestLauncherSummaryOutput)) {
1687     summary_path_ = FilePath(
1688         command_line->GetSwitchValuePath(switches::kTestLauncherSummaryOutput));
1689   }
1690   if (command_line->HasSwitch(switches::kTestLauncherTrace)) {
1691     trace_path_ = FilePath(
1692         command_line->GetSwitchValuePath(switches::kTestLauncherTrace));
1693   }
1694 
1695   // When running in parallel mode we need to redirect stdio to avoid mixed-up
1696   // output. We also always redirect on the bots to get the test output into
1697   // JSON summary.
1698   redirect_stdio_ = (parallel_jobs_ > 1) || BotModeEnabled(command_line);
1699 
1700   CombinePositiveTestFilters(std::move(positive_gtest_filter),
1701                              std::move(positive_file_filter));
1702 
1703   if (!results_tracker_.Init(*command_line)) {
1704     LOG(ERROR) << "Failed to initialize test results tracker.";
1705     return true;
1706   }
1707 
1708 #if defined(NDEBUG)
1709   results_tracker_.AddGlobalTag("MODE_RELEASE");
1710 #else
1711   results_tracker_.AddGlobalTag("MODE_DEBUG");
1712 #endif
1713 
1714   // Operating systems (sorted alphabetically).
1715   // Note that they can deliberately overlap, e.g. OS_LINUX is a subset
1716   // of OS_POSIX.
1717 #if BUILDFLAG(IS_ANDROID)
1718   results_tracker_.AddGlobalTag("OS_ANDROID");
1719 #endif
1720 
1721 #if BUILDFLAG(IS_APPLE)
1722   results_tracker_.AddGlobalTag("OS_APPLE");
1723 #endif
1724 
1725 #if BUILDFLAG(IS_BSD)
1726   results_tracker_.AddGlobalTag("OS_BSD");
1727 #endif
1728 
1729 #if BUILDFLAG(IS_FREEBSD)
1730   results_tracker_.AddGlobalTag("OS_FREEBSD");
1731 #endif
1732 
1733 #if BUILDFLAG(IS_FUCHSIA)
1734   results_tracker_.AddGlobalTag("OS_FUCHSIA");
1735 #endif
1736 
1737 #if BUILDFLAG(IS_IOS)
1738   results_tracker_.AddGlobalTag("OS_IOS");
1739 #endif
1740 
1741 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
1742   results_tracker_.AddGlobalTag("OS_LINUX");
1743 #endif
1744 
1745 #if BUILDFLAG(IS_CHROMEOS_ASH)
1746   results_tracker_.AddGlobalTag("OS_CHROMEOS");
1747 #endif
1748 
1749 #if BUILDFLAG(IS_MAC)
1750   results_tracker_.AddGlobalTag("OS_MAC");
1751 #endif
1752 
1753 #if BUILDFLAG(IS_NACL)
1754   results_tracker_.AddGlobalTag("OS_NACL");
1755 #endif
1756 
1757 #if BUILDFLAG(IS_OPENBSD)
1758   results_tracker_.AddGlobalTag("OS_OPENBSD");
1759 #endif
1760 
1761 #if BUILDFLAG(IS_POSIX)
1762   results_tracker_.AddGlobalTag("OS_POSIX");
1763 #endif
1764 
1765 #if BUILDFLAG(IS_SOLARIS)
1766   results_tracker_.AddGlobalTag("OS_SOLARIS");
1767 #endif
1768 
1769 #if BUILDFLAG(IS_WIN)
1770   results_tracker_.AddGlobalTag("OS_WIN");
1771 #endif
1772 
1773   // CPU-related tags.
1774 #if defined(ARCH_CPU_32_BITS)
1775   results_tracker_.AddGlobalTag("CPU_32_BITS");
1776 #endif
1777 
1778 #if defined(ARCH_CPU_64_BITS)
1779   results_tracker_.AddGlobalTag("CPU_64_BITS");
1780 #endif
1781 
1782   return true;
1783 }
1784 
InitTests()1785 bool TestLauncher::InitTests() {
1786   std::vector<TestIdentifier> tests;
1787   if (!launcher_delegate_->GetTests(&tests)) {
1788     LOG(ERROR) << "Failed to get list of tests.";
1789     return false;
1790   }
1791   std::vector<std::string> uninstantiated_tests;
1792   for (const TestIdentifier& test_id : tests) {
1793     TestInfo test_info(test_id);
1794     if (test_id.test_case_name == "GoogleTestVerification") {
1795       // GoogleTestVerification is used by googletest to detect tests that are
1796       // parameterized but not instantiated.
1797       uninstantiated_tests.push_back(test_id.test_name);
1798       continue;
1799     }
1800     // TODO(isamsonov): crbug.com/1004417 remove when windows builders
1801     // stop flaking on MANAUAL_ tests.
1802     if (launcher_delegate_->ShouldRunTest(test_id))
1803       tests_.push_back(test_info);
1804   }
1805   if (!uninstantiated_tests.empty()) {
1806     LOG(ERROR) << "Found uninstantiated parameterized tests. These test suites "
1807                   "will not run:";
1808     for (const std::string& name : uninstantiated_tests)
1809       LOG(ERROR) << "  " << name;
1810     LOG(ERROR) << "Please use INSTANTIATE_TEST_SUITE_P to instantiate the "
1811                   "tests, or GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST if "
1812                   "the parameter list can be intentionally empty. See "
1813                   "//third_party/googletest/src/docs/advanced.md";
1814     return false;
1815   }
1816   return true;
1817 }
1818 
ShuffleTests(CommandLine * command_line)1819 bool TestLauncher::ShuffleTests(CommandLine* command_line) {
1820   if (command_line->HasSwitch(kGTestShuffleFlag)) {
1821     uint32_t shuffle_seed;
1822     if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1823       const std::string custom_seed_str =
1824           command_line->GetSwitchValueASCII(kGTestRandomSeedFlag);
1825       uint32_t custom_seed = 0;
1826       if (!StringToUint(custom_seed_str, &custom_seed)) {
1827         LOG(ERROR) << "Unable to parse seed \"" << custom_seed_str << "\".";
1828         return false;
1829       }
1830       if (custom_seed >= kRandomSeedUpperBound) {
1831         LOG(ERROR) << "Seed " << custom_seed << " outside of expected range "
1832                    << "[0, " << kRandomSeedUpperBound << ")";
1833         return false;
1834       }
1835       shuffle_seed = custom_seed;
1836     } else {
1837       std::uniform_int_distribution<uint32_t> dist(0, kRandomSeedUpperBound);
1838       std::random_device random_dev;
1839       shuffle_seed = dist(random_dev);
1840     }
1841 
1842     std::mt19937 randomizer;
1843     randomizer.seed(shuffle_seed);
1844     ranges::shuffle(tests_, randomizer);
1845 
1846     fprintf(stdout, "Randomizing with seed %u\n", shuffle_seed);
1847     fflush(stdout);
1848   } else if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1849     LOG(ERROR) << kGTestRandomSeedFlag << " requires " << kGTestShuffleFlag;
1850     return false;
1851   }
1852   return true;
1853 }
1854 
ProcessAndValidateTests()1855 bool TestLauncher::ProcessAndValidateTests() {
1856   bool result = true;
1857   std::unordered_set<std::string> disabled_tests;
1858   std::unordered_map<std::string, TestInfo> pre_tests;
1859 
1860   // Find disabled and pre tests
1861   for (const TestInfo& test_info : tests_) {
1862     std::string test_name = test_info.GetFullName();
1863     results_tracker_.AddTest(test_name);
1864     if (test_info.disabled()) {
1865       disabled_tests.insert(test_info.GetDisabledStrippedName());
1866       results_tracker_.AddDisabledTest(test_name);
1867     }
1868     if (test_info.pre_test())
1869       pre_tests[test_info.GetDisabledStrippedName()] = test_info;
1870   }
1871 
1872   std::vector<TestInfo> tests_to_run;
1873   for (const TestInfo& test_info : tests_) {
1874     std::string test_name = test_info.GetFullName();
1875     // If any test has a matching disabled test, fail and log for audit.
1876     if (base::Contains(disabled_tests, test_name)) {
1877       LOG(ERROR) << test_name << " duplicated by a DISABLED_ test";
1878       result = false;
1879     }
1880 
1881     // Passes on PRE tests, those will append when final test is found.
1882     if (test_info.pre_test())
1883       continue;
1884 
1885     std::vector<TestInfo> test_sequence;
1886     test_sequence.push_back(test_info);
1887     // Move Pre Tests prior to final test in order.
1888     while (base::Contains(pre_tests, test_sequence.back().GetPreName())) {
1889       test_sequence.push_back(pre_tests[test_sequence.back().GetPreName()]);
1890       pre_tests.erase(test_sequence.back().GetDisabledStrippedName());
1891     }
1892     // Skip disabled tests unless explicitly requested.
1893     if (!test_info.disabled() || !skip_disabled_tests_)
1894       tests_to_run.insert(tests_to_run.end(), test_sequence.rbegin(),
1895                           test_sequence.rend());
1896   }
1897   tests_ = std::move(tests_to_run);
1898 
1899   // If any tests remain in |pre_tests| map, fail and log for audit.
1900   for (const auto& i : pre_tests) {
1901     LOG(ERROR) << i.first << " is an orphaned pre test";
1902     result = false;
1903   }
1904   return result;
1905 }
1906 
CreateAndStartThreadPool(size_t num_parallel_jobs)1907 void TestLauncher::CreateAndStartThreadPool(size_t num_parallel_jobs) {
1908   base::ThreadPoolInstance::Create("TestLauncher");
1909   base::ThreadPoolInstance::Get()->Start({num_parallel_jobs});
1910 }
1911 
CombinePositiveTestFilters(std::vector<std::string> filter_a,std::vector<std::string> filter_b)1912 void TestLauncher::CombinePositiveTestFilters(
1913     std::vector<std::string> filter_a,
1914     std::vector<std::string> filter_b) {
1915   has_at_least_one_positive_filter_ = !filter_a.empty() || !filter_b.empty();
1916   if (!has_at_least_one_positive_filter_) {
1917     return;
1918   }
1919   // If two positive filters are present, only run tests that match a pattern
1920   // in both filters.
1921   if (!filter_a.empty() && !filter_b.empty()) {
1922     for (const auto& i : tests_) {
1923       std::string test_name = i.GetFullName();
1924       bool found_a = false;
1925       bool found_b = false;
1926       for (const auto& k : filter_a) {
1927         found_a = found_a || MatchPattern(test_name, k);
1928       }
1929       for (const auto& k : filter_b) {
1930         found_b = found_b || MatchPattern(test_name, k);
1931       }
1932       if (found_a && found_b) {
1933         positive_test_filter_.push_back(test_name);
1934       }
1935     }
1936   } else if (!filter_a.empty()) {
1937     positive_test_filter_ = std::move(filter_a);
1938   } else {
1939     positive_test_filter_ = std::move(filter_b);
1940   }
1941 }
1942 
CollectTests()1943 std::vector<std::string> TestLauncher::CollectTests() {
1944   std::vector<std::string> test_names;
1945   // To support RTS(regression test selection), which may have 100,000 or
1946   // more exact gtest filter, we first split filter into exact filter
1947   // and wildcards filter, then exact filter can match faster.
1948   std::vector<StringPiece> positive_wildcards_filter;
1949   std::unordered_set<StringPiece, StringPieceHash> positive_exact_filter;
1950   positive_exact_filter.reserve(positive_test_filter_.size());
1951   for (const std::string& filter : positive_test_filter_) {
1952     if (filter.find('*') != std::string::npos) {
1953       positive_wildcards_filter.push_back(filter);
1954     } else {
1955       positive_exact_filter.insert(filter);
1956     }
1957   }
1958 
1959   std::vector<StringPiece> negative_wildcards_filter;
1960   std::unordered_set<StringPiece, StringPieceHash> negative_exact_filter;
1961   negative_exact_filter.reserve(negative_test_filter_.size());
1962   for (const std::string& filter : negative_test_filter_) {
1963     if (filter.find('*') != std::string::npos) {
1964       negative_wildcards_filter.push_back(filter);
1965     } else {
1966       negative_exact_filter.insert(filter);
1967     }
1968   }
1969 
1970   for (const TestInfo& test_info : tests_) {
1971     std::string test_name = test_info.GetFullName();
1972 
1973     std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
1974 
1975     // Skip the test that doesn't match the filter (if given).
1976     if (has_at_least_one_positive_filter_) {
1977       bool found = positive_exact_filter.find(test_name) !=
1978                        positive_exact_filter.end() ||
1979                    positive_exact_filter.find(prefix_stripped_name) !=
1980                        positive_exact_filter.end();
1981       if (!found) {
1982         for (const StringPiece& filter : positive_wildcards_filter) {
1983           if (MatchPattern(test_name, filter) ||
1984               MatchPattern(prefix_stripped_name, filter)) {
1985             found = true;
1986             break;
1987           }
1988         }
1989       }
1990 
1991       if (!found)
1992         continue;
1993     }
1994 
1995     if (negative_exact_filter.find(test_name) != negative_exact_filter.end() ||
1996         negative_exact_filter.find(prefix_stripped_name) !=
1997             negative_exact_filter.end()) {
1998       continue;
1999     }
2000 
2001     bool excluded = false;
2002     for (const StringPiece& filter : negative_wildcards_filter) {
2003       if (MatchPattern(test_name, filter) ||
2004           MatchPattern(prefix_stripped_name, filter)) {
2005         excluded = true;
2006         break;
2007       }
2008     }
2009     if (excluded)
2010       continue;
2011 
2012     // Tests with the name XYZ will cause tests with the name PRE_XYZ to run. We
2013     // should bucket all of these tests together.
2014     if (PersistentHash(prefix_stripped_name) % total_shards_ !=
2015         static_cast<uint32_t>(shard_index_)) {
2016       continue;
2017     }
2018 
2019     // Report test locations after applying all filters, so that we report test
2020     // locations only for those tests that were run as part of this shard.
2021     results_tracker_.AddTestLocation(test_name, test_info.file(),
2022                                      test_info.line());
2023 
2024     if (!test_info.pre_test()) {
2025       // Only a subset of tests that are run require placeholders -- namely,
2026       // those that will output results. Note that the results for PRE_XYZ will
2027       // be merged into XYZ's results if the former fails, so we don't need a
2028       // placeholder for it.
2029       results_tracker_.AddTestPlaceholder(test_name);
2030     }
2031 
2032     test_names.push_back(test_name);
2033   }
2034 
2035   return test_names;
2036 }
2037 
RunTests()2038 void TestLauncher::RunTests() {
2039   std::vector<std::string> original_test_names = CollectTests();
2040 
2041   std::vector<std::string> test_names;
2042   for (int i = 0; i < repeats_per_iteration_; ++i) {
2043     test_names.insert(test_names.end(), original_test_names.begin(),
2044                       original_test_names.end());
2045   }
2046 
2047   broken_threshold_ = std::max(static_cast<size_t>(20), tests_.size() / 10);
2048 
2049   test_started_count_ = test_names.size();
2050 
2051   // If there are no matching tests, warn and notify of any matches against
2052   // *<filter>*.
2053   if (test_started_count_ == 0) {
2054     PrintFuzzyMatchingTestNames();
2055     fprintf(stdout, "WARNING: No matching tests to run.\n");
2056     fflush(stdout);
2057   }
2058 
2059   // Save an early test summary in case the launcher crashes or gets killed.
2060   results_tracker_.GeneratePlaceholderIteration();
2061   MaybeSaveSummaryAsJSON({"EARLY_SUMMARY"});
2062 
2063   // If we are repeating the test, set batch size to 1 to ensure that batch size
2064   // does not interfere with repeats (unittests are using filter for batches and
2065   // can't run the same test twice in the same batch).
2066   size_t batch_size =
2067       repeats_per_iteration_ > 1 ? 1U : launcher_delegate_->GetBatchSize();
2068 
2069   TestRunner test_runner(this, parallel_jobs_, batch_size);
2070   test_runner.Run(test_names);
2071 }
2072 
PrintFuzzyMatchingTestNames()2073 void TestLauncher::PrintFuzzyMatchingTestNames() {
2074   for (auto filter : positive_test_filter_) {
2075     if (filter.empty())
2076       continue;
2077     std::string almost_filter;
2078     if (filter.front() != '*')
2079       almost_filter += '*';
2080     almost_filter += filter;
2081     if (filter.back() != '*')
2082       almost_filter += '*';
2083 
2084     for (const TestInfo& test_info : tests_) {
2085       std::string test_name = test_info.GetFullName();
2086       std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
2087       if (MatchPattern(test_name, almost_filter) ||
2088           MatchPattern(prefix_stripped_name, almost_filter)) {
2089         fprintf(stdout, "Filter \"%s\" would have matched: %s\n",
2090                 almost_filter.c_str(), test_name.c_str());
2091         fflush(stdout);
2092       }
2093     }
2094   }
2095 }
2096 
RunRetryTests()2097 bool TestLauncher::RunRetryTests() {
2098   while (!tests_to_retry_.empty() && retries_left_ > 0) {
2099     // Retry all tests that depend on a failing test.
2100     std::vector<std::string> test_names;
2101     for (const TestInfo& test_info : tests_) {
2102       if (base::Contains(tests_to_retry_, test_info.GetPrefixStrippedName()))
2103         test_names.push_back(test_info.GetFullName());
2104     }
2105     tests_to_retry_.clear();
2106 
2107     size_t retry_started_count = test_names.size();
2108     test_started_count_ += retry_started_count;
2109 
2110     // Only invoke RunLoop if there are any tasks to run.
2111     if (retry_started_count == 0)
2112       return false;
2113 
2114     fprintf(stdout, "Retrying %zu test%s (retry #%zu)\n", retry_started_count,
2115             retry_started_count > 1 ? "s" : "", retry_limit_ - retries_left_);
2116     fflush(stdout);
2117 
2118     --retries_left_;
2119     TestRunner test_runner(this);
2120     test_runner.Run(test_names);
2121   }
2122   return tests_to_retry_.empty();
2123 }
2124 
OnTestIterationStart()2125 void TestLauncher::OnTestIterationStart() {
2126   test_started_count_ = 0;
2127   test_finished_count_ = 0;
2128   test_success_count_ = 0;
2129   test_broken_count_ = 0;
2130   tests_to_retry_.clear();
2131   results_tracker_.OnTestIterationStarting();
2132 }
2133 
2134 #if BUILDFLAG(IS_POSIX)
2135 // I/O watcher for the reading end of the self-pipe above.
2136 // Terminates any launched child processes and exits the process.
OnShutdownPipeReadable()2137 void TestLauncher::OnShutdownPipeReadable() {
2138   fprintf(stdout, "\nCaught signal. Killing spawned test processes...\n");
2139   fflush(stdout);
2140 
2141   KillSpawnedTestProcesses();
2142 
2143   MaybeSaveSummaryAsJSON({"CAUGHT_TERMINATION_SIGNAL"});
2144 
2145   // The signal would normally kill the process, so exit now.
2146   _exit(1);
2147 }
2148 #endif  // BUILDFLAG(IS_POSIX)
2149 
MaybeSaveSummaryAsJSON(const std::vector<std::string> & additional_tags)2150 void TestLauncher::MaybeSaveSummaryAsJSON(
2151     const std::vector<std::string>& additional_tags) {
2152   if (!summary_path_.empty()) {
2153     if (!results_tracker_.SaveSummaryAsJSON(summary_path_, additional_tags)) {
2154       LOG(ERROR) << "Failed to save test launcher output summary.";
2155     }
2156   }
2157   if (!trace_path_.empty()) {
2158     if (!GetTestLauncherTracer()->Dump(trace_path_)) {
2159       LOG(ERROR) << "Failed to save test launcher trace.";
2160     }
2161   }
2162 }
2163 
OnTestIterationFinished()2164 void TestLauncher::OnTestIterationFinished() {
2165   TestResultsTracker::TestStatusMap tests_by_status(
2166       results_tracker_.GetTestStatusMapForCurrentIteration());
2167   if (!tests_by_status[TestResult::TEST_UNKNOWN].empty())
2168     results_tracker_.AddGlobalTag(kUnreliableResultsTag);
2169 
2170   results_tracker_.PrintSummaryOfCurrentIteration();
2171 }
2172 
OnOutputTimeout()2173 void TestLauncher::OnOutputTimeout() {
2174   DCHECK(thread_checker_.CalledOnValidThread());
2175 
2176   AutoLock lock(*GetLiveProcessesLock());
2177 
2178   fprintf(stdout, "Still waiting for the following processes to finish:\n");
2179 
2180   for (const auto& pair : *GetLiveProcesses()) {
2181 #if BUILDFLAG(IS_WIN)
2182     fwprintf(stdout, L"\t%s\n", pair.second.GetCommandLineString().c_str());
2183 #else
2184     fprintf(stdout, "\t%s\n", pair.second.GetCommandLineString().c_str());
2185 #endif
2186   }
2187 
2188   fflush(stdout);
2189 
2190   if (CommandLine::ForCurrentProcess()->HasSwitch(
2191           switches::kTestLauncherPrintTimestamps)) {
2192     ::logging::ScopedLoggingSettings scoped_logging_setting;
2193     ::logging::SetLogItems(true, true, true, true);
2194     LOG(INFO) << "Waiting_timestamp";
2195   }
2196   // Arm the timer again - otherwise it would fire only once.
2197   watchdog_timer_.Reset();
2198 }
2199 
NumParallelJobs(unsigned int cores_per_job)2200 size_t NumParallelJobs(unsigned int cores_per_job) {
2201   const CommandLine* command_line = CommandLine::ForCurrentProcess();
2202   if (command_line->HasSwitch(switches::kTestLauncherJobs)) {
2203     // If the number of test launcher jobs was specified, return that number.
2204     size_t jobs = 0U;
2205 
2206     if (!StringToSizeT(
2207             command_line->GetSwitchValueASCII(switches::kTestLauncherJobs),
2208             &jobs) ||
2209         !jobs) {
2210       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherJobs;
2211       return 0U;
2212     }
2213     return jobs;
2214   }
2215   if (!BotModeEnabled(command_line) &&
2216       (command_line->HasSwitch(kGTestFilterFlag) ||
2217        command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
2218     // Do not run jobs in parallel by default if we are running a subset of
2219     // the tests and if bot mode is off.
2220     return 1U;
2221   }
2222 
2223 #if BUILDFLAG(IS_WIN)
2224   // Use processors in all groups (Windows splits more than 64 logical
2225   // processors into groups).
2226   size_t cores = base::checked_cast<size_t>(
2227       ::GetActiveProcessorCount(ALL_PROCESSOR_GROUPS));
2228 #else
2229   size_t cores = base::checked_cast<size_t>(SysInfo::NumberOfProcessors());
2230 #endif
2231 #if BUILDFLAG(IS_IOS) && TARGET_OS_SIMULATOR
2232   // If we are targeting the simulator increase the number of jobs we use by 2x
2233   // the number of cores. This is necessary because the startup of each
2234   // process is slow, so using 2x empirically approaches the total machine
2235   // utilization.
2236   cores *= 2;
2237 #endif
2238   return std::max(size_t(1), cores / cores_per_job);
2239 }
2240 
GetTestOutputSnippet(const TestResult & result,const std::string & full_output)2241 std::string GetTestOutputSnippet(const TestResult& result,
2242                                  const std::string& full_output) {
2243   size_t run_pos = full_output.find(std::string("[ RUN      ] ") +
2244                                     result.full_name);
2245   if (run_pos == std::string::npos)
2246     return std::string();
2247 
2248   size_t end_pos = full_output.find(std::string("[  FAILED  ] ") +
2249                                     result.full_name,
2250                                     run_pos);
2251   // Only clip the snippet to the "OK" message if the test really
2252   // succeeded or was skipped. It still might have e.g. crashed
2253   // after printing it.
2254   if (end_pos == std::string::npos) {
2255     if (result.status == TestResult::TEST_SUCCESS) {
2256       end_pos = full_output.find(std::string("[       OK ] ") +
2257                                 result.full_name,
2258                                 run_pos);
2259 
2260       // Also handle SKIPPED next to SUCCESS because the GTest XML output
2261       // doesn't make a difference between SKIPPED and SUCCESS
2262       if (end_pos == std::string::npos)
2263         end_pos = full_output.find(
2264             std::string("[  SKIPPED ] ") + result.full_name, run_pos);
2265     } else {
2266       // If test is not successful, include all output until subsequent test.
2267       end_pos = full_output.find(std::string("[ RUN      ]"), run_pos + 1);
2268       if (end_pos != std::string::npos)
2269         end_pos--;
2270     }
2271   }
2272   if (end_pos != std::string::npos) {
2273     size_t newline_pos = full_output.find("\n", end_pos);
2274     if (newline_pos != std::string::npos)
2275       end_pos = newline_pos + 1;
2276   }
2277 
2278   std::string snippet(full_output.substr(run_pos));
2279   if (end_pos != std::string::npos)
2280     snippet = full_output.substr(run_pos, end_pos - run_pos);
2281 
2282   return snippet;
2283 }
2284 
TruncateSnippetFocused(const base::StringPiece snippet,size_t byte_limit)2285 std::string TruncateSnippetFocused(const base::StringPiece snippet,
2286                                    size_t byte_limit) {
2287   // Find the start of anything that looks like a fatal log message.
2288   // We want to preferentially preserve these from truncation as we
2289   // run extraction of fatal test errors from snippets in result_adapter
2290   // to populate failure reasons in ResultDB. It is also convenient for
2291   // the user to see them.
2292   // Refer to LogMessage::Init in base/logging[_platform].cc for patterns.
2293   size_t fatal_message_pos =
2294       std::min(snippet.find("FATAL:"), snippet.find("FATAL "));
2295 
2296   size_t fatal_message_start = 0;
2297   size_t fatal_message_end = 0;
2298   if (fatal_message_pos != std::string::npos) {
2299     // Find the line-endings before and after the fatal message.
2300     size_t start_pos = snippet.rfind("\n", fatal_message_pos);
2301     if (start_pos != std::string::npos) {
2302       fatal_message_start = start_pos;
2303     }
2304     size_t end_pos = snippet.find("\n", fatal_message_pos);
2305     if (end_pos != std::string::npos) {
2306       // Include the new-line character.
2307       fatal_message_end = end_pos + 1;
2308     } else {
2309       fatal_message_end = snippet.length();
2310     }
2311   }
2312   // Limit fatal message length to half the snippet byte quota. This ensures
2313   // we have space for some context at the beginning and end of the snippet.
2314   fatal_message_end =
2315       std::min(fatal_message_end, fatal_message_start + (byte_limit / 2));
2316 
2317   // Distribute remaining bytes between start and end of snippet.
2318   // The split is either even, or if one is small enough to be displayed
2319   // without truncation, it gets displayed in full and the other split gets
2320   // the remaining bytes.
2321   size_t remaining_bytes =
2322       byte_limit - (fatal_message_end - fatal_message_start);
2323   size_t start_split_bytes;
2324   size_t end_split_bytes;
2325   if (fatal_message_start < remaining_bytes / 2) {
2326     start_split_bytes = fatal_message_start;
2327     end_split_bytes = remaining_bytes - fatal_message_start;
2328   } else if ((snippet.length() - fatal_message_end) < remaining_bytes / 2) {
2329     start_split_bytes =
2330         remaining_bytes - (snippet.length() - fatal_message_end);
2331     end_split_bytes = (snippet.length() - fatal_message_end);
2332   } else {
2333     start_split_bytes = remaining_bytes / 2;
2334     end_split_bytes = remaining_bytes - start_split_bytes;
2335   }
2336   return base::StrCat(
2337       {TruncateSnippet(snippet.substr(0, fatal_message_start),
2338                        start_split_bytes),
2339        snippet.substr(fatal_message_start,
2340                       fatal_message_end - fatal_message_start),
2341        TruncateSnippet(snippet.substr(fatal_message_end), end_split_bytes)});
2342 }
2343 
2344 }  // namespace base
2345