1# Copyright 2018 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5from __future__ import absolute_import 6from __future__ import division 7from __future__ import print_function 8 9import dbus 10import gzip 11import logging 12import os 13import subprocess 14import shutil 15import tempfile 16 17from autotest_lib.client.bin import test 18from autotest_lib.client.common_lib import error 19from autotest_lib.client.common_lib import file_utils 20from autotest_lib.client.cros import debugd_util 21 22import archiver 23import configurator 24import helpers 25import fake_printer 26import log_reader 27import multithreaded_processor 28from six.moves import range 29 30# Timeout for printing documents in seconds 31_FAKE_PRINTER_TIMEOUT = 200 32 33# Prefix for CUPS printer name 34_FAKE_PRINTER_ID = 'FakePrinter' 35 36# First port number to use, this test uses consecutive ports numbers, 37# different for every PPD file 38_FIRST_PORT_NUMBER = 9000 39 40# Values are from platform/system_api/dbus/debugd/dbus-constants.h. 41_CUPS_SUCCESS = 0 42 43class platform_PrinterPpds(test.test): 44 """ 45 This test gets a list of PPD files and a list of test documents. It tries 46 to add printer using each PPD file and to print all test documents on 47 every printer created this way. Becasue the number of PPD files to test can 48 be large (more then 3K), PPD files are tested simultaneously in many 49 threads. 50 51 """ 52 version = 3 53 54 55 def _get_filenames_from_PPD_indexes(self): 56 """ 57 It returns all PPD filenames from SCS server. 58 59 @returns a list of PPD filenames without duplicates 60 61 """ 62 # extracts PPD filenames from all 20 index files (in parallel) 63 outputs = self._processor.run(helpers.get_filenames_from_PPD_index, 20) 64 # joins obtained lists and performs deduplication 65 ppd_files = set() 66 for output in outputs: 67 ppd_files.update(output) 68 return list(ppd_files) 69 70 71 def _calculate_full_path(self, path): 72 """ 73 Converts path given as a parameter to absolute path. 74 75 @param path: a path set in configuration (relative, absolute or None) 76 77 @returns absolute path or None if the input parameter was None 78 79 """ 80 if path is None or os.path.isabs(path): 81 return path 82 path_current = os.path.dirname(os.path.realpath(__file__)) 83 return os.path.join(path_current, path) 84 85 86 def initialize(self, 87 path_docs, 88 path_ppds, 89 path_digests=None, 90 debug_mode=False, 91 threads_count=8): 92 """ 93 @param path_docs: path to local directory with documents to print 94 @param path_ppds: path to local directory with PPD files to test; 95 the directory is supposed to be compressed as .tar.xz. 96 @param path_digests: path to local directory with digests files for 97 test documents; if None is set then content of printed 98 documents is not verified 99 @param debug_mode: if set to True, then the autotest temporarily 100 remounts the root partition in R/W mode and changes CUPS 101 configuration, what allows to extract pipelines for all tested 102 PPDs and rerun the outside CUPS 103 @param threads_count: number of threads to use 104 105 """ 106 # Calculates absolute paths for all parameters 107 self._location_of_test_docs = self._calculate_full_path(path_docs) 108 self._location_of_PPD_files = self._calculate_full_path(path_ppds) 109 location_of_digests_files = self._calculate_full_path(path_digests) 110 111 # This object is used for running tasks in many threads simultaneously 112 self._processor = multithreaded_processor.MultithreadedProcessor( 113 threads_count) 114 115 # This object is responsible for parsing CUPS logs 116 self._log_reader = log_reader.LogReader() 117 118 # This object is responsible for the system configuration 119 self._configurator = configurator.Configurator() 120 self._configurator.configure(debug_mode) 121 122 # Read list of test documents 123 self._docs = helpers.list_entries_from_directory( 124 path=self._location_of_test_docs, 125 with_suffixes=('.pdf'), 126 nonempty_results=True, 127 include_directories=False) 128 129 # Load the list of PPD files to omit 130 do_not_test_path = self._calculate_full_path('do_not_test.txt') 131 do_not_test_set = set(helpers.load_lines_from_file(do_not_test_path)) 132 133 # Unpack an archive with the PPD files: 134 path_archive = self._location_of_PPD_files + '.tar.xz' 135 path_target_dir = self._calculate_full_path('.') 136 file_utils.rm_dir_if_exists(os.path.join(path_target_dir, path_ppds)) 137 subprocess.call(['tar', 'xJf', path_archive, '-C', path_target_dir]) 138 # Load PPD files from the unpacked directory 139 self._ppds = helpers.list_entries_from_directory( 140 path=self._location_of_PPD_files, 141 with_suffixes=('.ppd', '.ppd.gz'), 142 nonempty_results=True, 143 include_directories=False) 144 # Remove from the list all PPD files to omit and sort it 145 self._ppds = list(set(self._ppds) - do_not_test_set) 146 self._ppds.sort() 147 148 # Load digests files 149 self._digests = dict() 150 self._sizes = dict() 151 if location_of_digests_files is None: 152 for doc_name in self._docs: 153 self._digests[doc_name] = dict() 154 self._sizes[doc_name] = dict() 155 else: 156 path_denylist = os.path.join(location_of_digests_files, 157 'denylist.txt') 158 denylist = helpers.load_lines_from_file(path_denylist) 159 for doc_name in self._docs: 160 digests_name = doc_name + '.digests' 161 path = os.path.join(location_of_digests_files, digests_name) 162 digests, sizes = helpers.parse_digests_file(path, denylist) 163 self._digests[doc_name] = digests 164 self._sizes[doc_name] = sizes 165 166 # Prepare a working directory for pipelines 167 if debug_mode: 168 self._pipeline_dir = tempfile.mkdtemp(dir='/tmp') 169 else: 170 self._pipeline_dir = None 171 172 173 def cleanup(self): 174 """ 175 Cleanup. 176 177 """ 178 # Resore previous system settings 179 self._configurator.restore() 180 181 # Delete directories with PPD files 182 path_ppds = self._calculate_full_path('ppds_100') 183 file_utils.rm_dir_if_exists(path_ppds) 184 path_ppds = self._calculate_full_path('ppds_all') 185 file_utils.rm_dir_if_exists(path_ppds) 186 187 # Delete pipeline working directory 188 if self._pipeline_dir is not None: 189 file_utils.rm_dir_if_exists(self._pipeline_dir) 190 191 192 def run_once(self, path_outputs=None): 193 """ 194 This is the main test function. It runs the testing procedure for 195 every PPD file. Tests are run simultaneously in many threads. 196 197 @param path_outputs: if it is not None, raw outputs sent 198 to printers are dumped here; the directory is overwritten if 199 already exists (is deleted and recreated) 200 201 @raises error.TestFail if at least one of the tests failed 202 203 """ 204 # Set directory for output documents 205 self._path_output_directory = self._calculate_full_path(path_outputs) 206 if self._path_output_directory is not None: 207 # Delete whole directory if already exists 208 file_utils.rm_dir_if_exists(self._path_output_directory) 209 # Create archivers 210 self._archivers = dict() 211 for doc_name in self._docs: 212 path_for_archiver = os.path.join(self._path_output_directory, 213 doc_name) 214 self._archivers[doc_name] = archiver.Archiver(path_for_archiver, 215 self._ppds, 50) 216 # A place for new digests 217 self._new_digests = dict() 218 self._new_sizes = dict() 219 for doc_name in self._docs: 220 self._new_digests[doc_name] = dict() 221 self._new_sizes[doc_name] = dict() 222 223 # Runs tests for all PPD files (in parallel) 224 outputs = self._processor.run(self._thread_test_PPD, len(self._ppds)) 225 226 # Analyses tests' outputs, prints a summary report and builds a list 227 # of PPD filenames that failed 228 failures = [] 229 for i, output in enumerate(outputs): 230 ppd_file = self._ppds[i] 231 if output != True: 232 failures.append(ppd_file) 233 else: 234 output = 'OK' 235 line = "%s: %s" % (ppd_file, output) 236 logging.info(line) 237 238 # Calculate digests files for output documents (if dumped) 239 if self._path_output_directory is not None: 240 for doc_name in self._docs: 241 path = os.path.join(self._path_output_directory, 242 doc_name + '.digests') 243 helpers.save_digests_file(path, self._new_digests[doc_name], 244 self._new_sizes[doc_name], failures) 245 246 # Raises an exception if at least one test failed 247 if len(failures) > 0: 248 failures.sort() 249 raise error.TestFail( 250 'Test failed for %d PPD files: %s' 251 % (len(failures), ', '.join(failures)) ) 252 253 254 def _thread_test_PPD(self, task_id): 255 """ 256 Runs a test procedure for single PPD file. 257 258 It retrieves assigned PPD file and run for it a test procedure. 259 260 @param task_id: an index of the PPD file in self._ppds 261 262 @returns True when the test was passed or description of the error 263 (string) if the test failed 264 265 """ 266 # Gets content of the PPD file 267 try: 268 ppd_file = self._ppds[task_id] 269 if self._location_of_PPD_files is None: 270 # Downloads PPD file from the SCS server 271 ppd_content = helpers.download_PPD_file(ppd_file) 272 else: 273 # Reads PPD file from local filesystem 274 path_ppd = os.path.join(self._location_of_PPD_files, ppd_file) 275 with open(path_ppd, 'rb') as ppd_file_descriptor: 276 ppd_content = ppd_file_descriptor.read() 277 except BaseException as e: 278 return 'MISSING PPD: ' + str(e) 279 280 # Runs the test procedure 281 try: 282 port = _FIRST_PORT_NUMBER + task_id 283 self._PPD_test_procedure(ppd_file, ppd_content, port) 284 except BaseException as e: 285 return 'FAIL: ' + str(e) 286 287 return True 288 289 290 def _PPD_test_procedure(self, ppd_name, ppd_content, port): 291 """ 292 Test procedure for single PPD file. 293 294 It tries to run the following steps: 295 1. Starts an instance of FakePrinter 296 2. Configures CUPS printer 297 3. For each test document run the following steps: 298 3a. Sends tests documents to the CUPS printer 299 3b. Fetches the raw document from the FakePrinter 300 3c. Parse CUPS logs and check for any errors 301 3d. If self._pipeline_dir is set, extract the executed CUPS 302 pipeline, rerun it in bash console and verify every step and 303 final output 304 3e. If self._path_output_directory is set, save the raw document 305 and all intermediate steps in the provided directory 306 3f. If the digest is available, verify a digest of an output 307 documents 308 4. Removes CUPS printer and stops FakePrinter 309 If the test fails this method throws an exception. 310 311 @param ppd_name: a name of the PPD file 312 @param ppd_content: a content of the PPD file 313 @param port: a port for the printer 314 315 @throws Exception when the test fails 316 317 """ 318 # Create work directory for external pipelines and save the PPD file 319 # there (if needed) 320 path_ppd = None 321 if self._pipeline_dir is not None: 322 path_pipeline_ppd_dir = os.path.join(self._pipeline_dir, ppd_name) 323 os.makedirs(path_pipeline_ppd_dir) 324 path_ppd = os.path.join(path_pipeline_ppd_dir, ppd_name) 325 with open(path_ppd, 'wb') as file_ppd: 326 file_ppd.write(ppd_content) 327 if path_ppd.endswith('.gz'): 328 subprocess.call(['gzip', '-d', path_ppd]) 329 path_ppd = path_ppd[0:-3] 330 331 try: 332 # Starts the fake printer 333 with fake_printer.FakePrinter(port) as printer: 334 335 # Add a CUPS printer manually with given ppd file 336 cups_printer_id = '%s_at_%05d' % (_FAKE_PRINTER_ID,port) 337 result = debugd_util.iface().CupsAddManuallyConfiguredPrinter( 338 cups_printer_id, 339 'socket://127.0.0.1:%d' % port, 340 dbus.ByteArray(ppd_content)) 341 if result != _CUPS_SUCCESS: 342 raise Exception('valid_config - Could not setup valid ' 343 'printer %d' % result) 344 345 # Prints all test documents 346 try: 347 for doc_name in self._docs: 348 # Full path to the test document 349 path_doc = os.path.join( 350 self._location_of_test_docs, doc_name) 351 # Sends test document to printer 352 argv = ['lp', '-d', cups_printer_id] 353 argv += [path_doc] 354 subprocess.call(argv) 355 # Prepare a workdir for the pipeline (if needed) 356 path_pipeline_workdir_temp = None 357 if self._pipeline_dir is not None: 358 path_pipeline_workdir = os.path.join( 359 path_pipeline_ppd_dir, doc_name) 360 path_pipeline_workdir_temp = os.path.join( 361 path_pipeline_workdir, 'temp') 362 os.makedirs(path_pipeline_workdir_temp) 363 # Gets the output document from the fake printer 364 doc = printer.fetch_document(_FAKE_PRINTER_TIMEOUT) 365 digest = helpers.calculate_digest(doc) 366 # Retrive data from the log file 367 no_errors, logs, pipeline = \ 368 self._log_reader.extract_result( 369 cups_printer_id, path_ppd, path_doc, 370 path_pipeline_workdir_temp) 371 # Archive obtained results in the output directory 372 if self._path_output_directory is not None: 373 self._archivers[doc_name].save_file( 374 ppd_name, '.out', doc, apply_gzip=True) 375 self._archivers[doc_name].save_file( 376 ppd_name, '.log', logs.encode()) 377 if pipeline is not None: 378 self._archivers[doc_name].save_file( 379 ppd_name, '.sh', pipeline.encode()) 380 # Set new digest 381 self._new_digests[doc_name][ppd_name] = digest 382 self._new_sizes[doc_name][ppd_name] = len(doc) 383 # Fail if any of CUPS filters failed 384 if not no_errors: 385 raise Exception('One of the CUPS filters failed') 386 # Reruns the pipeline and dump intermediate outputs 387 if self._pipeline_dir is not None: 388 self._rerun_whole_pipeline( 389 pipeline, path_pipeline_workdir, 390 ppd_name, doc_name, digest) 391 shutil.rmtree(path_pipeline_workdir) 392 # Check document's digest (if known) 393 if ppd_name in self._digests[doc_name]: 394 digest_expected = self._digests[doc_name][ppd_name] 395 if digest_expected != digest: 396 message = 'Document\'s digest does not match' 397 if ppd_name in self._sizes[doc_name]: 398 message += ', old size: ' + \ 399 str(self._sizes[doc_name][ppd_name]) 400 message += ', new size: ' + str(len(doc)) 401 raise Exception(message) 402 else: 403 # Simple validation 404 if len(doc) < 16: 405 raise Exception('Empty output') 406 finally: 407 # Remove CUPS printer 408 debugd_util.iface().CupsRemovePrinter(cups_printer_id) 409 410 # The fake printer is stopped at the end of "with" statement 411 finally: 412 # Finalize archivers and cleaning 413 if self._path_output_directory is not None: 414 for doc_name in self._docs: 415 self._archivers[doc_name].finalize_prefix(ppd_name) 416 # Clean the pipelines' working directories 417 if self._pipeline_dir is not None: 418 shutil.rmtree(path_pipeline_ppd_dir) 419 420 421 def _rerun_whole_pipeline( 422 self, pipeline, path_workdir, ppd_name, doc_name, digest): 423 """ 424 Reruns the whole pipeline outside CUPS server. 425 426 Reruns a printing pipeline dumped from CUPS. All intermediate outputs 427 are dumped and archived for future analysis. 428 429 @param pipeline: a pipeline as a bash script 430 @param path_workdir: an existing directory to use as working directory 431 @param ppd_name: a filenames prefix used for archivers 432 @param doc_name: a document name, used to select a proper archiver 433 @param digest: an digest of the output produced by CUPS (for comparison) 434 435 @raises Exception in case of any errors 436 437 """ 438 # Save pipeline to a file 439 path_pipeline = os.path.join(path_workdir, 'pipeline.sh') 440 with open(path_pipeline, 'wb') as file_pipeline: 441 file_pipeline.write(pipeline) 442 # Run the pipeline 443 argv = ['/bin/bash', '-e', path_pipeline] 444 ret = subprocess.Popen(argv, cwd=path_workdir).wait() 445 # Find the number of output files 446 i = 1 447 while os.path.isfile(os.path.join(path_workdir, "%d.doc.gz" % i)): 448 i += 1 449 files_count = i-1 450 # Reads the last output (to compare it with the output produced by CUPS) 451 if ret == 0: 452 with gzip.open(os.path.join(path_workdir, 453 "%d.doc.gz" % files_count)) as last_file: 454 content_digest = helpers.calculate_digest(last_file.read()) 455 # Archives all intermediate files (if desired) 456 if self._path_output_directory is not None: 457 for i in range(1,files_count+1): 458 self._archivers[doc_name].move_file(ppd_name, ".err%d" % i, 459 os.path.join(path_workdir, "%d.err" % i)) 460 self._archivers[doc_name].move_file(ppd_name, ".out%d.gz" % i, 461 os.path.join(path_workdir, "%d.doc.gz" % i)) 462 # Validation 463 if ret != 0: 464 raise Exception("A pipeline script returned %d" % ret) 465 if content_digest != digest: 466 raise Exception("The output returned by the pipeline is different" 467 " than the output produced by CUPS") 468