• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# Copyright 2016 The PDFium Authors
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6import argparse
7from dataclasses import dataclass, field
8from datetime import timedelta
9from io import BytesIO
10import multiprocessing
11import os
12import re
13import shutil
14import subprocess
15import sys
16import time
17
18import common
19import pdfium_root
20import pngdiffer
21from skia_gold import skia_gold
22import suppressor
23
24pdfium_root.add_source_directory_to_import_path(os.path.join('build', 'util'))
25from lib.results import result_sink, result_types
26
27
28# Arbitrary timestamp, expressed in seconds since the epoch, used to make sure
29# that tests that depend on the current time are stable. Happens to be the
30# timestamp of the first commit to repo, 2014/5/9 17:48:50.
31TEST_SEED_TIME = "1399672130"
32
33# List of test types that should run text tests instead of pixel tests.
34TEXT_TESTS = ['javascript']
35
36# Timeout (in seconds) for individual test commands.
37# TODO(crbug.com/pdfium/1967): array_buffer.in is slow under MSan, so need a
38# very generous 5 minute timeout for now.
39TEST_TIMEOUT = timedelta(minutes=5).total_seconds()
40
41
42class TestRunner:
43
44  def __init__(self, dirname):
45    # Currently the only used directories are corpus, javascript, and pixel,
46    # which all correspond directly to the type for the test being run. In the
47    # future if there are tests that don't have this clean correspondence, then
48    # an argument for the type will need to be added.
49    self.per_process_config = _PerProcessConfig(
50        test_dir=dirname, test_type=dirname)
51
52  @property
53  def options(self):
54    return self.per_process_config.options
55
56  def IsSkiaGoldEnabled(self):
57    return (self.options.run_skia_gold and
58            not self.per_process_config.test_type in TEXT_TESTS)
59
60  def IsExecutionSuppressed(self, input_path):
61    return self.per_process_state.test_suppressor.IsExecutionSuppressed(
62        input_path)
63
64  def IsResultSuppressed(self, input_filename):
65    return self.per_process_state.test_suppressor.IsResultSuppressed(
66        input_filename)
67
68  def HandleResult(self, test_case, test_result):
69    input_filename = os.path.basename(test_case.input_path)
70
71    test_result.status = self._SuppressStatus(input_filename,
72                                              test_result.status)
73    if test_result.status == result_types.UNKNOWN:
74      self.result_suppressed_cases.append(input_filename)
75      self.surprises.append(test_case.input_path)
76    elif test_result.status == result_types.SKIP:
77      self.result_suppressed_cases.append(input_filename)
78    elif not test_result.IsPass():
79      self.failures.append(test_case.input_path)
80
81    for artifact in test_result.image_artifacts:
82      if artifact.skia_gold_status == result_types.PASS:
83        if self.IsResultSuppressed(artifact.image_path):
84          self.skia_gold_unexpected_successes.append(artifact.GetSkiaGoldId())
85        else:
86          self.skia_gold_successes.append(artifact.GetSkiaGoldId())
87      elif artifact.skia_gold_status == result_types.FAIL:
88        self.skia_gold_failures.append(artifact.GetSkiaGoldId())
89
90    # Log test result.
91    print(f'{test_result.status}: {test_result.test_id}')
92    if not test_result.IsPass():
93      if test_result.reason:
94        print(f'Failure reason: {test_result.reason}')
95      if test_result.log:
96        decoded_log = bytes.decode(test_result.log, errors='backslashreplace')
97        print(f'Test output:\n{decoded_log}')
98      for artifact in test_result.image_artifacts:
99        if artifact.skia_gold_status == result_types.FAIL:
100          print(f'Failed Skia Gold: {artifact.image_path}')
101        if artifact.image_diff:
102          print(f'Failed image diff: {artifact.image_diff.reason}')
103
104    # Report test result to ResultDB.
105    if self.resultdb:
106      only_artifacts = None
107      only_failure_reason = test_result.reason
108      if len(test_result.image_artifacts) == 1:
109        only = test_result.image_artifacts[0]
110        only_artifacts = only.GetDiffArtifacts()
111        if only.GetDiffReason():
112          only_failure_reason += f': {only.GetDiffReason()}'
113      self.resultdb.Post(
114          test_id=test_result.test_id,
115          status=test_result.status,
116          duration=test_result.duration_milliseconds,
117          test_log=test_result.log,
118          test_file=None,
119          artifacts=only_artifacts,
120          failure_reason=only_failure_reason)
121
122      # Milo only supports a single diff per test, so if we have multiple pages,
123      # report each page as its own "test."
124      if len(test_result.image_artifacts) > 1:
125        for page, artifact in enumerate(test_result.image_artifacts):
126          self.resultdb.Post(
127              test_id=f'{test_result.test_id}/{page}',
128              status=self._SuppressArtifactStatus(test_result,
129                                                  artifact.GetDiffStatus()),
130              duration=None,
131              test_log=None,
132              test_file=None,
133              artifacts=artifact.GetDiffArtifacts(),
134              failure_reason=artifact.GetDiffReason())
135
136  def _SuppressStatus(self, input_filename, status):
137    if not self.IsResultSuppressed(input_filename):
138      return status
139
140    if status == result_types.PASS:
141      # There isn't an actual status for succeeded-but-ignored, so use the
142      # "abort" status to differentiate this from failed-but-ignored.
143      #
144      # Note that this appears as a preliminary failure in Gerrit.
145      return result_types.UNKNOWN
146
147    # There isn't an actual status for failed-but-ignored, so use the "skip"
148    # status to differentiate this from succeeded-but-ignored.
149    return result_types.SKIP
150
151  def _SuppressArtifactStatus(self, test_result, status):
152    if status != result_types.FAIL:
153      return status
154
155    if test_result.status != result_types.SKIP:
156      return status
157
158    return result_types.SKIP
159
160  def Run(self):
161    # Running a test defines a number of attributes on the fly.
162    # pylint: disable=attribute-defined-outside-init
163
164    relative_test_dir = self.per_process_config.test_dir
165    if relative_test_dir != 'corpus':
166      relative_test_dir = os.path.join('resources', relative_test_dir)
167
168    parser = argparse.ArgumentParser()
169
170    parser.add_argument(
171        '--build-dir',
172        default=os.path.join('out', 'Debug'),
173        help='relative path from the base source directory')
174
175    parser.add_argument(
176        '-j',
177        default=multiprocessing.cpu_count(),
178        dest='num_workers',
179        type=int,
180        help='run NUM_WORKERS jobs in parallel')
181
182    parser.add_argument(
183        '--disable-javascript',
184        action='store_true',
185        help='Prevents JavaScript from executing in PDF files.')
186
187    parser.add_argument(
188        '--disable-xfa',
189        action='store_true',
190        help='Prevents processing XFA forms.')
191
192    parser.add_argument(
193        '--render-oneshot',
194        action='store_true',
195        help='Sets whether to use the oneshot renderer.')
196
197    parser.add_argument(
198        '--run-skia-gold',
199        action='store_true',
200        default=False,
201        help='When flag is on, skia gold tests will be run.')
202
203    # TODO: Remove when pdfium recipe stops passing this argument
204    parser.add_argument(
205        '--gold_properties',
206        default='',
207        help='Key value pairs that are written to the top level of the JSON '
208        'file that is ingested by Gold.')
209
210    # TODO: Remove when pdfium recipe stops passing this argument
211    parser.add_argument(
212        '--gold_ignore_hashes',
213        default='',
214        help='Path to a file with MD5 hashes we wish to ignore.')
215
216    parser.add_argument(
217        '--regenerate_expected',
218        action='store_true',
219        help='Regenerates expected images. For each failing image diff, this '
220        'will regenerate the most specific expected image file that exists. '
221        'This also will suggest removals of unnecessary expected image files '
222        'by renaming them with an additional ".bak" extension, although these '
223        'removals should be reviewed manually. Use "git clean" to quickly deal '
224        'with any ".bak" files.')
225
226    parser.add_argument(
227        '--reverse-byte-order',
228        action='store_true',
229        help='Run image-based tests using --reverse-byte-order.')
230
231    parser.add_argument(
232        '--ignore_errors',
233        action='store_true',
234        help='Prevents the return value from being non-zero '
235        'when image comparison fails.')
236
237    parser.add_argument(
238        'inputted_file_paths',
239        nargs='*',
240        help='Path to test files to run, relative to '
241        f'testing/{relative_test_dir}. If omitted, runs all test files under '
242        f'testing/{relative_test_dir}.',
243        metavar='relative/test/path')
244
245    skia_gold.add_skia_gold_args(parser)
246
247    self.per_process_config.options = parser.parse_args()
248
249    finder = self.per_process_config.NewFinder()
250    pdfium_test_path = self.per_process_config.GetPdfiumTestPath(finder)
251    if not os.path.exists(pdfium_test_path):
252      print(f"FAILURE: Can't find test executable '{pdfium_test_path}'")
253      print('Use --build-dir to specify its location.')
254      return 1
255    self.per_process_config.InitializeFeatures(pdfium_test_path)
256
257    self.per_process_state = _PerProcessState(self.per_process_config)
258    shutil.rmtree(self.per_process_state.working_dir, ignore_errors=True)
259    os.makedirs(self.per_process_state.working_dir)
260
261    error_message = self.per_process_state.image_differ.CheckMissingTools(
262        self.options.regenerate_expected)
263    if error_message:
264      print('FAILURE:', error_message)
265      return 1
266
267    self.resultdb = result_sink.TryInitClient()
268    if self.resultdb:
269      print('Detected ResultSink environment')
270
271    # Collect test cases.
272    walk_from_dir = finder.TestingDir(relative_test_dir)
273
274    self.test_cases = TestCaseManager()
275    self.execution_suppressed_cases = []
276    input_file_re = re.compile('^.+[.](in|pdf)$')
277    if self.options.inputted_file_paths:
278      for file_name in self.options.inputted_file_paths:
279        input_path = os.path.join(walk_from_dir, file_name)
280        if not os.path.isfile(input_path):
281          print(f"Can't find test file '{file_name}'")
282          return 1
283
284        self.test_cases.NewTestCase(input_path)
285    else:
286      for file_dir, _, filename_list in os.walk(walk_from_dir):
287        for input_filename in filename_list:
288          if input_file_re.match(input_filename):
289            input_path = os.path.join(file_dir, input_filename)
290            if self.IsExecutionSuppressed(input_path):
291              self.execution_suppressed_cases.append(input_path)
292              continue
293            if not os.path.isfile(input_path):
294              continue
295
296            self.test_cases.NewTestCase(input_path)
297
298    # Execute test cases.
299    self.failures = []
300    self.surprises = []
301    self.skia_gold_successes = []
302    self.skia_gold_unexpected_successes = []
303    self.skia_gold_failures = []
304    self.result_suppressed_cases = []
305
306    if self.IsSkiaGoldEnabled():
307      assert self.options.gold_output_dir
308      # Clear out and create top level gold output directory before starting
309      skia_gold.clear_gold_output_dir(self.options.gold_output_dir)
310
311    with multiprocessing.Pool(
312        processes=self.options.num_workers,
313        initializer=_InitializePerProcessState,
314        initargs=[self.per_process_config]) as pool:
315      if self.per_process_config.test_type in TEXT_TESTS:
316        test_function = _RunTextTest
317      else:
318        test_function = _RunPixelTest
319      for result in pool.imap(test_function, self.test_cases):
320        self.HandleResult(self.test_cases.GetTestCase(result.test_id), result)
321
322    # Report test results.
323    if self.surprises:
324      self.surprises.sort()
325      print('\nUnexpected Successes:')
326      for surprise in self.surprises:
327        print(surprise)
328
329    if self.failures:
330      self.failures.sort()
331      print('\nSummary of Failures:')
332      for failure in self.failures:
333        print(failure)
334
335    if self.skia_gold_unexpected_successes:
336      self.skia_gold_unexpected_successes.sort()
337      print('\nUnexpected Skia Gold Successes:')
338      for surprise in self.skia_gold_unexpected_successes:
339        print(surprise)
340
341    if self.skia_gold_failures:
342      self.skia_gold_failures.sort()
343      print('\nSummary of Skia Gold Failures:')
344      for failure in self.skia_gold_failures:
345        print(failure)
346
347    self._PrintSummary()
348
349    if self.failures:
350      if not self.options.ignore_errors:
351        return 1
352
353    return 0
354
355  def _PrintSummary(self):
356    number_test_cases = len(self.test_cases)
357    number_failures = len(self.failures)
358    number_suppressed = len(self.result_suppressed_cases)
359    number_successes = number_test_cases - number_failures - number_suppressed
360    number_surprises = len(self.surprises)
361    print('\nTest cases executed:', number_test_cases)
362    print('  Successes:', number_successes)
363    print('  Suppressed:', number_suppressed)
364    print('  Surprises:', number_surprises)
365    print('  Failures:', number_failures)
366    if self.IsSkiaGoldEnabled():
367      number_gold_failures = len(self.skia_gold_failures)
368      number_gold_successes = len(self.skia_gold_successes)
369      number_gold_surprises = len(self.skia_gold_unexpected_successes)
370      number_total_gold_tests = sum(
371          [number_gold_failures, number_gold_successes, number_gold_surprises])
372      print('\nSkia Gold Test cases executed:', number_total_gold_tests)
373      print('  Skia Gold Successes:', number_gold_successes)
374      print('  Skia Gold Surprises:', number_gold_surprises)
375      print('  Skia Gold Failures:', number_gold_failures)
376      skia_tester = self.per_process_state.GetSkiaGoldTester()
377      if self.skia_gold_failures and skia_tester.IsTryjobRun():
378        cl_triage_link = skia_tester.GetCLTriageLink()
379        print('  Triage link for CL:', cl_triage_link)
380        skia_tester.WriteCLTriageLink(cl_triage_link)
381    print()
382    print('Test cases not executed:', len(self.execution_suppressed_cases))
383
384  def SetDeleteOutputOnSuccess(self, new_value):
385    """Set whether to delete generated output if the test passes."""
386    self.per_process_config.delete_output_on_success = new_value
387
388  def SetEnforceExpectedImages(self, new_value):
389    """Set whether to enforce that each test case provide an expected image."""
390    self.per_process_config.enforce_expected_images = new_value
391
392
393def _RunTextTest(test_case):
394  """Runs a text test case."""
395  test_case_runner = _TestCaseRunner(test_case)
396  with test_case_runner:
397    test_case_runner.test_result = test_case_runner.GenerateAndTest(
398        test_case_runner.TestText)
399  return test_case_runner.test_result
400
401
402def _RunPixelTest(test_case):
403  """Runs a pixel test case."""
404  test_case_runner = _TestCaseRunner(test_case)
405  with test_case_runner:
406    test_case_runner.test_result = test_case_runner.GenerateAndTest(
407        test_case_runner.TestPixel)
408  return test_case_runner.test_result
409
410
411# `_PerProcessState` singleton. This is initialized when creating the
412# `multiprocessing.Pool()`. `TestRunner.Run()` creates its own separate
413# instance of `_PerProcessState` as well.
414_per_process_state = None
415
416
417def _InitializePerProcessState(config):
418  """Initializes the `_per_process_state` singleton."""
419  global _per_process_state
420  assert not _per_process_state
421  _per_process_state = _PerProcessState(config)
422
423
424@dataclass
425class _PerProcessConfig:
426  """Configuration for initializing `_PerProcessState`.
427
428  Attributes:
429    test_dir: The name of the test directory.
430    test_type: The test type.
431    delete_output_on_success: Whether to delete output on success.
432    enforce_expected_images: Whether to enforce expected images.
433    options: The dictionary of command line options.
434    features: The list of features supported by `pdfium_test`.
435  """
436  test_dir: str
437  test_type: str
438  delete_output_on_success: bool = False
439  enforce_expected_images: bool = False
440  options: dict = None
441  features: list = None
442
443  def NewFinder(self):
444    return common.DirectoryFinder(self.options.build_dir)
445
446  def GetPdfiumTestPath(self, finder):
447    return finder.ExecutablePath('pdfium_test')
448
449  def InitializeFeatures(self, pdfium_test_path):
450    output = subprocess.check_output([pdfium_test_path, '--show-config'],
451                                     timeout=TEST_TIMEOUT)
452    self.features = output.decode('utf-8').strip().split(',')
453
454
455class _PerProcessState:
456  """State defined per process."""
457
458  def __init__(self, config):
459    self.test_dir = config.test_dir
460    self.test_type = config.test_type
461    self.delete_output_on_success = config.delete_output_on_success
462    self.enforce_expected_images = config.enforce_expected_images
463    self.options = config.options
464    self.features = config.features
465
466    finder = config.NewFinder()
467    self.pdfium_test_path = config.GetPdfiumTestPath(finder)
468    self.fixup_path = finder.ScriptPath('fixup_pdf_template.py')
469    self.text_diff_path = finder.ScriptPath('text_diff.py')
470    self.font_dir = os.path.join(finder.TestingDir(), 'resources', 'fonts')
471    self.third_party_font_dir = finder.ThirdPartyFontsDir()
472
473    self.source_dir = finder.TestingDir()
474    self.working_dir = finder.WorkingDir(os.path.join('testing', self.test_dir))
475
476    self.test_suppressor = suppressor.Suppressor(
477        finder, self.features, self.options.disable_javascript,
478        self.options.disable_xfa)
479    self.image_differ = pngdiffer.PNGDiffer(finder, self.features,
480                                            self.options.reverse_byte_order)
481
482    self.process_name = multiprocessing.current_process().name
483    self.skia_tester = None
484
485  def __getstate__(self):
486    raise RuntimeError('Cannot pickle per-process state')
487
488  def GetSkiaGoldTester(self):
489    """Gets the `SkiaGoldTester` singleton for this worker."""
490    if not self.skia_tester:
491      self.skia_tester = skia_gold.SkiaGoldTester(
492          source_type=self.test_type,
493          skia_gold_args=self.options,
494          process_name=self.process_name)
495    return self.skia_tester
496
497
498class _TestCaseRunner:
499  """Runner for a single test case."""
500
501  def __init__(self, test_case):
502    self.test_case = test_case
503    self.test_result = None
504    self.duration_start = 0
505
506    self.source_dir, self.input_filename = os.path.split(
507        self.test_case.input_path)
508    self.pdf_path = os.path.join(self.working_dir, f'{self.test_id}.pdf')
509    self.actual_images = None
510
511  def __enter__(self):
512    self.duration_start = time.perf_counter_ns()
513    return self
514
515  def __exit__(self, exc_type, exc_value, traceback):
516    if not self.test_result:
517      self.test_result = self.test_case.NewResult(
518          result_types.UNKNOWN, reason='No test result recorded')
519    duration = time.perf_counter_ns() - self.duration_start
520    self.test_result.duration_milliseconds = duration * 1e-6
521
522  @property
523  def options(self):
524    return _per_process_state.options
525
526  @property
527  def test_id(self):
528    return self.test_case.test_id
529
530  @property
531  def working_dir(self):
532    return _per_process_state.working_dir
533
534  def IsResultSuppressed(self):
535    return _per_process_state.test_suppressor.IsResultSuppressed(
536        self.input_filename)
537
538  def IsImageDiffSuppressed(self):
539    return _per_process_state.test_suppressor.IsImageDiffSuppressed(
540        self.input_filename)
541
542  def GetImageMatchingAlgorithm(self):
543    return _per_process_state.test_suppressor.GetImageMatchingAlgorithm(
544        self.input_filename)
545
546  def RunCommand(self, command, stdout=None):
547    """Runs a test command.
548
549    Args:
550      command: The list of command arguments.
551      stdout: Optional `file`-like object to send standard output.
552
553    Returns:
554      The test result.
555    """
556
557    # Standard output and error are directed to the test log. If `stdout` was
558    # provided, redirect standard output to it instead.
559    if stdout:
560      assert stdout != subprocess.PIPE
561      try:
562        stdout.fileno()
563      except OSError:
564        # `stdout` doesn't have a file descriptor, so it can't be passed to
565        # `subprocess.run()` directly.
566        original_stdout = stdout
567        stdout = subprocess.PIPE
568      stderr = subprocess.PIPE
569    else:
570      stdout = subprocess.PIPE
571      stderr = subprocess.STDOUT
572
573    test_result = self.test_case.NewResult(result_types.PASS)
574    try:
575      run_result = subprocess.run(
576          command,
577          stdout=stdout,
578          stderr=stderr,
579          timeout=TEST_TIMEOUT,
580          check=False)
581      if run_result.returncode != 0:
582        test_result.status = result_types.FAIL
583        test_result.reason = 'Command {} exited with code {}'.format(
584            run_result.args, run_result.returncode)
585    except subprocess.TimeoutExpired as timeout_expired:
586      run_result = timeout_expired
587      test_result.status = result_types.TIMEOUT
588      test_result.reason = 'Command {} timed out'.format(run_result.cmd)
589
590    if stdout == subprocess.PIPE and stderr == subprocess.PIPE:
591      # Copy captured standard output, if any, to the original `stdout`.
592      if run_result.stdout:
593        original_stdout.write(run_result.stdout)
594
595    if not test_result.IsPass():
596      # On failure, report captured output to the test log.
597      if stderr == subprocess.STDOUT:
598        test_result.log = run_result.stdout
599      else:
600        test_result.log = run_result.stderr
601    return test_result
602
603  def GenerateAndTest(self, test_function):
604    """Generate test input and run pdfium_test."""
605    test_result = self.Generate()
606    if not test_result.IsPass():
607      return test_result
608
609    return test_function()
610
611  def _RegenerateIfNeeded(self):
612    if not self.options.regenerate_expected:
613      return
614    if self.IsResultSuppressed() or self.IsImageDiffSuppressed():
615      return
616    _per_process_state.image_differ.Regenerate(
617        self.input_filename,
618        self.source_dir,
619        self.working_dir,
620        image_matching_algorithm=self.GetImageMatchingAlgorithm())
621
622  def Generate(self):
623    input_event_path = os.path.join(self.source_dir, f'{self.test_id}.evt')
624    if os.path.exists(input_event_path):
625      output_event_path = f'{os.path.splitext(self.pdf_path)[0]}.evt'
626      shutil.copyfile(input_event_path, output_event_path)
627
628    template_path = os.path.join(self.source_dir, f'{self.test_id}.in')
629    if not os.path.exists(template_path):
630      if os.path.exists(self.test_case.input_path):
631        shutil.copyfile(self.test_case.input_path, self.pdf_path)
632      return self.test_case.NewResult(result_types.PASS)
633
634    return self.RunCommand([
635        sys.executable, _per_process_state.fixup_path,
636        f'--output-dir={self.working_dir}', template_path
637    ])
638
639  def TestText(self):
640    txt_path = os.path.join(self.working_dir, f'{self.test_id}.txt')
641    with open(txt_path, 'w') as outfile:
642      cmd_to_run = [
643          _per_process_state.pdfium_test_path, '--send-events',
644          f'--time={TEST_SEED_TIME}'
645      ]
646
647      if self.options.disable_javascript:
648        cmd_to_run.append('--disable-javascript')
649
650      if self.options.disable_xfa:
651        cmd_to_run.append('--disable-xfa')
652
653      cmd_to_run.append(self.pdf_path)
654      test_result = self.RunCommand(cmd_to_run, stdout=outfile)
655      if not test_result.IsPass():
656        return test_result
657
658    # If the expected file does not exist, the output is expected to be empty.
659    expected_txt_path = os.path.join(self.source_dir,
660                                     f'{self.test_id}_expected.txt')
661    if not os.path.exists(expected_txt_path):
662      return self._VerifyEmptyText(txt_path)
663
664    # If JavaScript is disabled, the output should be empty.
665    # However, if the test is suppressed and JavaScript is disabled, do not
666    # verify that the text is empty so the suppressed test does not surprise.
667    if self.options.disable_javascript and not self.IsResultSuppressed():
668      return self._VerifyEmptyText(txt_path)
669
670    return self.RunCommand([
671        sys.executable, _per_process_state.text_diff_path, expected_txt_path,
672        txt_path
673    ])
674
675  def _VerifyEmptyText(self, txt_path):
676    with open(txt_path, "rb") as txt_file:
677      txt_data = txt_file.read()
678
679    if txt_data:
680      return self.test_case.NewResult(
681          result_types.FAIL, log=txt_data, reason=f'{txt_path} should be empty')
682
683    return self.test_case.NewResult(result_types.PASS)
684
685  # TODO(crbug.com/pdfium/1656): Remove when ready to fully switch over to
686  # Skia Gold
687  def TestPixel(self):
688    # Remove any existing generated images from previous runs.
689    self.actual_images = _per_process_state.image_differ.GetActualFiles(
690        self.input_filename, self.source_dir, self.working_dir)
691    self._CleanupPixelTest()
692
693    # Generate images.
694    cmd_to_run = [
695        _per_process_state.pdfium_test_path, '--send-events', '--png', '--md5',
696        f'--time={TEST_SEED_TIME}'
697    ]
698
699    if 'use_ahem' in self.source_dir or 'use_symbolneu' in self.source_dir:
700      cmd_to_run.append(f'--font-dir={_per_process_state.font_dir}')
701    else:
702      cmd_to_run.append(f'--font-dir={_per_process_state.third_party_font_dir}')
703      cmd_to_run.append('--croscore-font-names')
704
705    if self.options.disable_javascript:
706      cmd_to_run.append('--disable-javascript')
707
708    if self.options.disable_xfa:
709      cmd_to_run.append('--disable-xfa')
710
711    if self.options.render_oneshot:
712      cmd_to_run.append('--render-oneshot')
713
714    if self.options.reverse_byte_order:
715      cmd_to_run.append('--reverse-byte-order')
716
717    cmd_to_run.append(self.pdf_path)
718
719    with BytesIO() as command_output:
720      test_result = self.RunCommand(cmd_to_run, stdout=command_output)
721      if not test_result.IsPass():
722        return test_result
723
724      test_result.image_artifacts = []
725      for line in command_output.getvalue().splitlines():
726        # Expect this format: MD5:<path to image file>:<hexadecimal MD5 hash>
727        line = bytes.decode(line).strip()
728        if line.startswith('MD5:'):
729          image_path, md5_hash = line[4:].rsplit(':', 1)
730          test_result.image_artifacts.append(
731              self._NewImageArtifact(
732                  image_path=image_path.strip(), md5_hash=md5_hash.strip()))
733
734    if self.actual_images:
735      image_diffs = _per_process_state.image_differ.ComputeDifferences(
736          self.input_filename,
737          self.source_dir,
738          self.working_dir,
739          image_matching_algorithm=self.GetImageMatchingAlgorithm())
740      if image_diffs:
741        test_result.status = result_types.FAIL
742        test_result.reason = 'Images differ'
743
744        # Merge image diffs into test result.
745        diff_map = {}
746        diff_log = []
747        for diff in image_diffs:
748          diff_map[diff.actual_path] = diff
749          diff_log.append(f'{os.path.basename(diff.actual_path)} vs. ')
750          if diff.expected_path:
751            diff_log.append(f'{os.path.basename(diff.expected_path)}\n')
752          else:
753            diff_log.append('missing expected file\n')
754
755        for artifact in test_result.image_artifacts:
756          artifact.image_diff = diff_map.get(artifact.image_path)
757        test_result.log = ''.join(diff_log).encode()
758
759    elif _per_process_state.enforce_expected_images:
760      if not self.IsImageDiffSuppressed():
761        test_result.status = result_types.FAIL
762        test_result.reason = 'Missing expected images'
763
764    if not test_result.IsPass():
765      self._RegenerateIfNeeded()
766      return test_result
767
768    if _per_process_state.delete_output_on_success:
769      self._CleanupPixelTest()
770    return test_result
771
772  def _NewImageArtifact(self, *, image_path, md5_hash):
773    artifact = ImageArtifact(image_path=image_path, md5_hash=md5_hash)
774
775    if self.options.run_skia_gold:
776      if _per_process_state.GetSkiaGoldTester().UploadTestResultToSkiaGold(
777          artifact.GetSkiaGoldId(), artifact.image_path):
778        artifact.skia_gold_status = result_types.PASS
779      else:
780        artifact.skia_gold_status = result_types.FAIL
781
782    return artifact
783
784  def _CleanupPixelTest(self):
785    for image_file in self.actual_images:
786      if os.path.exists(image_file):
787        os.remove(image_file)
788
789
790@dataclass
791class TestCase:
792  """Description of a test case to run.
793
794  Attributes:
795    test_id: A unique identifier for the test.
796    input_path: The absolute path to the test file.
797  """
798  test_id: str
799  input_path: str
800
801  def NewResult(self, status, **kwargs):
802    """Derives a new test result corresponding to this test case."""
803    return TestResult(test_id=self.test_id, status=status, **kwargs)
804
805
806@dataclass
807class TestResult:
808  """Results from running a test case.
809
810  Attributes:
811    test_id: The corresponding test case ID.
812    status: The overall `result_types` status.
813    duration_milliseconds: Test time in milliseconds.
814    log: Optional log of the test's output.
815    image_artfacts: Optional list of image artifacts.
816    reason: Optional reason why the test failed.
817  """
818  test_id: str
819  status: str
820  duration_milliseconds: float = None
821  log: str = None
822  image_artifacts: list = field(default_factory=list)
823  reason: str = None
824
825  def IsPass(self):
826    """Whether the test passed."""
827    return self.status == result_types.PASS
828
829
830@dataclass
831class ImageArtifact:
832  """Image artifact for a test result.
833
834  Attributes:
835    image_path: The absolute path to the image file.
836    md5_hash: The MD5 hash of the pixel buffer.
837    skia_gold_status: Optional Skia Gold status.
838    image_diff: Optional image diff.
839  """
840  image_path: str
841  md5_hash: str
842  skia_gold_status: str = None
843  image_diff: pngdiffer.ImageDiff = None
844
845  def GetSkiaGoldId(self):
846    # The output filename without image extension becomes the test ID. For
847    # example, "/path/to/.../testing/corpus/example_005.pdf.0.png" becomes
848    # "example_005.pdf.0".
849    return _GetTestId(os.path.basename(self.image_path))
850
851  def GetDiffStatus(self):
852    return result_types.FAIL if self.image_diff else result_types.PASS
853
854  def GetDiffReason(self):
855    return self.image_diff.reason if self.image_diff else None
856
857  def GetDiffArtifacts(self):
858    if not self.image_diff:
859      return None
860    if not self.image_diff.expected_path or not self.image_diff.diff_path:
861      return None
862    return {
863        'actual_image':
864            _GetArtifactFromFilePath(self.image_path),
865        'expected_image':
866            _GetArtifactFromFilePath(self.image_diff.expected_path),
867        'image_diff':
868            _GetArtifactFromFilePath(self.image_diff.diff_path)
869    }
870
871
872class TestCaseManager:
873  """Manages a collection of test cases."""
874
875  def __init__(self):
876    self.test_cases = {}
877
878  def __len__(self):
879    return len(self.test_cases)
880
881  def __iter__(self):
882    return iter(self.test_cases.values())
883
884  def NewTestCase(self, input_path, **kwargs):
885    """Creates and registers a new test case."""
886    input_basename = os.path.basename(input_path)
887    test_id = _GetTestId(input_basename)
888    if test_id in self.test_cases:
889      raise ValueError(
890          f'Test ID "{test_id}" derived from "{input_basename}" must be unique')
891
892    test_case = TestCase(test_id=test_id, input_path=input_path, **kwargs)
893    self.test_cases[test_id] = test_case
894    return test_case
895
896  def GetTestCase(self, test_id):
897    """Looks up a test case previously registered by `NewTestCase()`."""
898    return self.test_cases[test_id]
899
900
901def _GetTestId(input_basename):
902  """Constructs a test ID by stripping the last extension from the basename."""
903  return os.path.splitext(input_basename)[0]
904
905
906def _GetArtifactFromFilePath(file_path):
907  """Constructs a ResultSink artifact from a file path."""
908  return {'filePath': file_path}
909