1#!/usr/bin/env python3 2# -*- coding: utf-8 -*- 3 4# 5# Copyright (c) 2023 Huawei Device Co., Ltd. 6# Licensed under the Apache License, Version 2.0 (the "License"); 7# you may not use this file except in compliance with the License. 8# You may obtain a copy of the License at 9# 10# http://www.apache.org/licenses/LICENSE-2.0 11# 12# Unless required by applicable law or agreed to in writing, software 13# distributed under the License is distributed on an "AS IS" BASIS, 14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15# See the License for the specific language governing permissions and 16# limitations under the License. 17 18 19import os 20import re 21import shutil 22import sys 23import subprocess 24import time 25import copy 26import queue 27import select 28import pty 29import pytest 30 31from datetime import datetime, timedelta 32 33sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "mylogger.py")) 34from mylogger import get_logger, parse_json 35 36Log = get_logger("build_option") 37current_file_path = os.path.abspath(__file__) 38script_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 39log_info = Log.info 40log_error = Log.error 41 42config = parse_json() 43if not config: 44 log_error("config file: build_example.json not exist") 45 sys.exit(0) 46 47out_dir = os.path.join(script_path, "out") 48exclude = config.get("build_option").get("exclude") 49try: 50 if os.path.exists(out_dir): 51 for tmp_dir in os.listdir(out_dir): 52 if tmp_dir in exclude: 53 continue 54 if os.path.isdir(os.path.join(out_dir, tmp_dir)): 55 shutil.rmtree(os.path.join(out_dir, tmp_dir)) 56 else: 57 os.remove(os.path.join(out_dir, tmp_dir)) 58except Exception as e: 59 log_error(e) 60 61 62@pytest.fixture() 63def init_build_env(): 64 def find_top_dir(): 65 cur_dir = os.getcwd() 66 while cur_dir != "/": 67 build_scripts = os.path.join( 68 cur_dir, 'build/scripts/build_package_list.json') 69 if os.path.exists(build_scripts): 70 return cur_dir 71 cur_dir = os.path.dirname(cur_dir) 72 73 os.chdir(find_top_dir()) 74 subprocess.run(['repo', 'forall', '-c', 'git reset --hard'], 75 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 76 subprocess.run(['repo', 'forall', '-c', 'git clean -dfx'], 77 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 78 79 80class TestBuildOption: 81 FLAGS = {"gn": {"pattern": r"Excuting gn command", "flag": False}, 82 "done": {"pattern": r"Done\. Made \d+ targets from \d+ files in \d+ms", "flag": False}, 83 "ninja": {"pattern": r"Excuting ninja command", "flag": False}, 84 "success": {"pattern": r"=====build successful=====", "flag": False} 85 } 86 87 try: 88 LOG_PATH = script_path + config.get("build_option").get("log_path") 89 CMD = script_path + config.get("build_option").get("common_cmd") 90 NINJIA_CMD = script_path + config.get("build_option").get("ninjia_cmd") 91 TIMEOUT = int(config.get("build_option").get("exec_timeout")) 92 TIME_OVER = int(config.get("build_option").get("file_time_intever")) 93 COMMAND_TYPE = config.get("build_option").get("cmd_type") 94 PTYFLAG = True if config.get("build_option").get("ptyflag").lower() == "true" else False 95 select_timeout = float(config.get("build_option").get("select_timeout")) 96 log_info("TIMEOUT:{}".format(TIMEOUT)) 97 log_info("COMMAND_TYPE:{}".format(COMMAND_TYPE)) 98 log_info("TIME_OVER:{}".format(TIME_OVER)) 99 log_info("PTYFLAG:{}".format(PTYFLAG)) 100 log_info("select_timeout:{}".format(select_timeout)) 101 except Exception as e: 102 log_error("build_example.json has error") 103 log_error(e) 104 raise e 105 106 def exec_command_select(self, cmd, timeout=60, ptyflag=False): 107 out_queue = queue.Queue() 108 log_info("select_exec cmd is :{}".format(" ".join(cmd))) 109 if not ptyflag: 110 try: 111 proc = subprocess.Popen( 112 cmd, 113 stdout=subprocess.PIPE, 114 stderr=subprocess.PIPE, 115 encoding="utf-8", 116 universal_newlines=True, 117 errors='ignore', 118 cwd=script_path 119 ) 120 start_time = time.time() 121 while True: 122 if timeout and time.time() - start_time > timeout: 123 raise Exception("exec cmd time out,select") 124 ready_to_read, _, _ = select.select([proc.stdout, proc.stderr], [], [], self.select_timeout) 125 for stream in ready_to_read: 126 output = stream.readline().strip() 127 if output: 128 out_queue.put(output) 129 if proc.poll() is not None: 130 break 131 returncode = proc.wait() 132 out_res = list(out_queue.queue) 133 return out_res, returncode 134 except Exception as err: 135 log_error("An error occurred: {}".format(err)) 136 raise Exception(err) 137 else: 138 try: 139 master, slave = pty.openpty() 140 proc = subprocess.Popen( 141 cmd, 142 stdin=slave, 143 stdout=slave, 144 stderr=slave, 145 encoding="utf-8", 146 universal_newlines=True, 147 errors='ignore', 148 cwd=script_path 149 ) 150 start_time = time.time() 151 incomplete_line = "" 152 while True: 153 if timeout and time.time() - start_time > timeout: 154 raise Exception("exec cmd time out,select") 155 ready_to_read, _, _ = select.select([master, ], [], [], self.select_timeout) 156 for stream in ready_to_read: 157 output_bytes = os.read(stream, 1024) 158 output = output_bytes.decode('utf-8') 159 lines = (incomplete_line + output).split("\n") 160 for line in lines[:-1]: 161 line = line.strip() 162 if line: 163 out_queue.put(line) 164 incomplete_line = lines[-1] 165 if proc.poll() is not None: 166 break 167 returncode = proc.wait() 168 out_res = list(out_queue.queue) 169 return out_res, returncode 170 except Exception as e: 171 log_error("An error occurred: {}".format(e)) 172 raise Exception(e) 173 174 @staticmethod 175 def exec_command_communicate(cmd, timeout=60): 176 try: 177 log_info("communicate_exec cmd is :{}".format(" ".join(cmd))) 178 proc = subprocess.Popen( 179 cmd, 180 stdout=subprocess.PIPE, 181 stderr=subprocess.PIPE, 182 encoding="utf-8", 183 universal_newlines=True, 184 errors='ignore', 185 cwd=script_path 186 ) 187 out, err = proc.communicate(timeout=timeout) 188 out_res = out.splitlines() + err.splitlines() 189 return out_res, proc.returncode 190 except Exception as e: 191 log_error("An error occurred: {}".format(e)) 192 raise Exception("exec cmd time out,communicate") 193 194 def exec_command(self, cmd, ptyflag=PTYFLAG, timeout=TIMEOUT): 195 if TestBuildOption.COMMAND_TYPE == "select": 196 return self.exec_command_select(cmd, timeout=timeout, ptyflag=ptyflag) 197 else: 198 return self.exec_command_communicate(cmd, timeout=timeout) 199 200 @staticmethod 201 def resolve_res(cmd_res, flag_res): 202 for line_count, line in enumerate(cmd_res): 203 for flag_name, value in flag_res.items(): 204 re_match = re.search(value["pattern"], line) 205 if re_match: 206 log_info("【match success {}】:{}\n".format(line_count, line)) # 输出命令终端的显示 207 if len(re_match.groups()) > 0: 208 if isinstance(flag_res[flag_name]["flag"], bool): 209 flag_res[flag_name]["flag"] = [re_match.group(1)] 210 else: 211 data = flag_res[flag_name]["flag"] 212 data.append(re_match.group(1)) 213 flag_res[flag_name]["flag"] = data 214 else: 215 flag_res[flag_name]["flag"] = True 216 return flag_res 217 218 @staticmethod 219 def check_flags(flags, expect_dict=None, returncode=0): 220 new_dict = copy.deepcopy(flags) 221 if returncode != 0: 222 log_error("returncode != 0") 223 return 1 224 if expect_dict: 225 error_count = 0 226 for k in expect_dict.keys(): 227 flags.pop(k) 228 if k in new_dict and new_dict[k]["flag"] != expect_dict[k]: 229 error_count += 1 230 if error_count != 0: 231 log_error("【actual_result】:{}\n".format(new_dict)) 232 return 1 233 check_li = [item for item in flags.values() if not item["flag"]] 234 log_info("【expect_result】:{}\n".format(expect_dict)) 235 log_info("【actual_result】:{}\n".format(new_dict)) 236 if len(check_li) > 0: 237 return 1 238 return 0 239 240 @staticmethod 241 def is_exist(path): 242 if os.path.exists(path): 243 return True 244 return False 245 246 @staticmethod 247 def same_element(list1, list2): 248 for el in list1: 249 if el not in list2: 250 return False 251 return True 252 253 def get_match_result(self, cmd, para_type, para_value, ptyflag=PTYFLAG): 254 cmd_res, returncode = self.exec_command(cmd, ptyflag=ptyflag) 255 before_flags, expect_dict = self.get_match_flags(para_type, para_value) 256 flag_res = self.resolve_res(cmd_res, before_flags) 257 result = self.check_flags(flag_res, expect_dict, returncode) 258 if result == 1: 259 self.print_error_line(cmd_res) 260 else: 261 self.print_error_line(cmd_res, is_success=True) 262 return result 263 264 @staticmethod 265 def print_error_line(cmd_res, is_success=False): 266 if is_success: 267 for ind, line in enumerate(cmd_res): 268 log_info("【{}】:{}".format(ind, line)) 269 else: 270 for ind, line in enumerate(cmd_res): 271 log_error("【{}】:{}".format(ind, line)) 272 273 def get_match_flags(self, para_type, para_value): 274 method_name = "get_{}_flags".format(para_type) 275 if hasattr(self, method_name): 276 method = self.__getattribute__(method_name) 277 flags, expect_dict = method(para_value) 278 return flags, expect_dict 279 280 def get_common_spec_result(self, option, cmd, para_type=None, ptyflag=PTYFLAG): 281 if not para_type: 282 flag_res, expect_dict = self.get_common_flags(option, check_file=True) 283 else: 284 flag_res, expect_dict = self.get_match_flags(para_type, option) 285 cmd_res, returncode = self.exec_command(cmd, ptyflag=ptyflag) 286 resolve_result = self.resolve_res(cmd_res, flag_res) 287 result = self.check_flags(resolve_result, expect_dict, returncode) 288 if result == 1: 289 self.print_error_line(cmd_res) 290 else: 291 self.print_error_line(cmd_res, is_success=True) 292 return resolve_result, result, cmd_res 293 294 @staticmethod 295 def get_build_only_gn_flags(para_value): 296 flags = copy.deepcopy(TestBuildOption.FLAGS) 297 expect_dict = {} 298 299 if para_value.lower() == "true": 300 expect_dict["ninja"] = False 301 else: 302 expect_dict["ninja"] = True 303 304 return flags, expect_dict 305 306 @staticmethod 307 def get_ccache_flags(para_value): 308 flags = copy.deepcopy(TestBuildOption.FLAGS) 309 expect_dict = {} 310 flags["ccache"] = {"pattern": r"Excuting gn command.*ohos_build_enable_ccache=true", "flag": False} 311 312 if para_value.lower() == "true": 313 expect_dict["ccache"] = True 314 else: 315 expect_dict["ccache"] = False 316 317 return flags, expect_dict 318 319 @staticmethod 320 def get_target_cpu_flags(para_value): 321 flags = copy.deepcopy(TestBuildOption.FLAGS) 322 expect_dict = {"loader": True} 323 324 flags["loader"] = {"pattern": r"loader args.*'target_cpu={}".format(para_value), "flag": False} 325 326 return flags, expect_dict 327 328 @staticmethod 329 def get_rename_last_log_flags(para_value): 330 flags = copy.deepcopy(TestBuildOption.FLAGS) 331 expect_dict = {} 332 return flags, expect_dict 333 334 @staticmethod 335 def get_enable_pycache_flags(para_value): 336 flags = copy.deepcopy(TestBuildOption.FLAGS) 337 expect_dict = {} 338 if para_value.lower() == "true": 339 expect_dict["pycache"] = True 340 else: 341 expect_dict["pycache"] = False 342 flags["pycache"] = {"pattern": r"Starting pycache daemon at", "flag": False} 343 flags["os_level"] = {"pattern": r"loader args.*os_level=([a-zA-Z]+)\'", "flag": False} 344 flags["root_dir"] = {"pattern": r"""loader args.*source_root_dir="([a-zA-Z\d/\\_]+)""""", "flag": False} 345 flags["gn_dir"] = {"pattern": r"""loader args.*gn_root_out_dir="([a-zA-Z\d/\\_]+)""""", "flag": False} 346 flags["start_end_time"] = {"pattern": r"(\d+-\d+-\d+ \d+:\d+:\d+)", "flag": False} 347 flags["cost_time"] = {"pattern": r"Cost time:.*(\d+:\d+:\d+)", "flag": False} 348 return flags, expect_dict 349 350 @staticmethod 351 def get_build_target_flags(para_value): 352 flags = copy.deepcopy(TestBuildOption.FLAGS) 353 flags["use_thin"] = {"pattern": r"Excuting gn command.*use_thin_lto=false.*", "flag": False} 354 flags["ninja_build_target"] = {"pattern": r"Excuting ninja command.*{}$".format(para_value), "flag": False} 355 expect_dict = {} 356 test_target_list = ['build_all_test_pkg', 'package_testcase', 'package_testcase_mlf'] 357 358 if para_value.endswith('make_test') or para_value.split(':')[-1] in test_target_list: 359 expect_dict["use_thin"] = True 360 expect_dict["ninja_build_target"] = True 361 else: 362 expect_dict["use_thin"] = False 363 expect_dict["ninja_build_target"] = True 364 return flags, expect_dict 365 366 @staticmethod 367 def get_ninja_args_flags(para_value): 368 flags = copy.deepcopy(TestBuildOption.FLAGS) 369 expect_dict = {"ninja_args": True} 370 flags["ninja_args"] = {"pattern": r"Excuting ninja command.*{}".format(para_value), "flag": False} 371 372 return flags, expect_dict 373 374 @staticmethod 375 def get_full_compilation_flags(para_value): 376 flags = copy.deepcopy(TestBuildOption.FLAGS) 377 flags["full_compilation_gn"] = {"pattern": r"Excuting gn command.*use_thin_lto=false.*", "flag": False} 378 flags["full_compilation_ninja"] = {"pattern": r"Excuting ninja command.*make_all make_test$", "flag": False} 379 expect_dict = {} 380 381 if para_value in ["", "True"]: 382 expect_dict["full_compilation_gn"] = True 383 expect_dict["full_compilation_ninja"] = True 384 else: 385 expect_dict["full_compilation_gn"] = False 386 expect_dict["full_compilation_ninja"] = False 387 388 return flags, expect_dict 389 390 @staticmethod 391 def get_strict_mode_flags(para_value): 392 flags = copy.deepcopy(TestBuildOption.FLAGS) 393 expect_dict = {} 394 395 return flags, expect_dict 396 397 @staticmethod 398 def get_scalable_build_flags(para_value): 399 flags = copy.deepcopy(TestBuildOption.FLAGS) 400 expect_dict = {} 401 402 return flags, expect_dict 403 404 @staticmethod 405 def get_build_example_flags(para_value): 406 flags = copy.deepcopy(TestBuildOption.FLAGS) 407 build_example_file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname( 408 os.path.abspath(__file__)))), "subsystem_config_example.json") 409 flags["build_example"] = { 410 "pattern": r"loader args.*example_subsystem_file=.*{}.*".format(build_example_file_path), "flag": False} 411 expect_dict = {} 412 413 if para_value.lower() == "true": 414 expect_dict["build_example"] = True 415 else: 416 expect_dict["build_example"] = False 417 418 return flags, expect_dict 419 420 @staticmethod 421 def get_build_platform_name_flags(para_value): 422 flags = copy.deepcopy(TestBuildOption.FLAGS) 423 expect_dict = {} 424 425 if para_value == "phone": 426 flags["build_platform"] = { 427 "pattern": r"loader args.*build_platform_name=phone", "flag": False} 428 expect_dict["build_platform"] = True 429 430 return flags, expect_dict 431 432 @staticmethod 433 def get_build_xts_flags(para_value): 434 flags = copy.deepcopy(TestBuildOption.FLAGS) 435 expect_dict = {} 436 437 flags["build_xts"] = {"pattern": r"loader args.*build_xts={}.*".format(para_value.capitalize()), "flag": False} 438 439 return flags, expect_dict 440 441 @staticmethod 442 def get_ignore_api_check_flags(para_value): 443 flags = copy.deepcopy(TestBuildOption.FLAGS) 444 expect_dict = {} 445 446 if para_value == "": 447 flags["ignore_api_check"] = {"pattern": r"loader args.*ignore_api_check=\['xts', 'common', 'testfwk'\]", 448 "flag": False} 449 else: 450 flags["ignore_api_check"] = { 451 "pattern": r"loader args.*ignore_api_check=(.*)\",", 452 "flag": False} 453 454 return flags, expect_dict 455 456 @staticmethod 457 def get_load_test_config_flags(para_value): 458 flags = copy.deepcopy(TestBuildOption.FLAGS) 459 expect_dict = {} 460 461 flags["load_test"] = {"pattern": r"loader args.*load_test_config={}.*".format(para_value.capitalize()), 462 "flag": False} 463 464 return flags, expect_dict 465 466 @staticmethod 467 def get_build_type_flags(para_value): 468 flags = copy.deepcopy(TestBuildOption.FLAGS) 469 expect_dict = {} 470 flags["build_type_debug"] = {"pattern": r"Excuting gn command.*is_debug=true", 471 "flag": False} 472 flags["build_type_profile"] = {"pattern": r"Excuting gn command.*is_profile=true", 473 "flag": False} 474 flags["build_type_none"] = { 475 "pattern": r'Excuting gn command.*ohos_build_type=\\"debug\\"', 476 "flag": False} 477 478 if para_value == "debug": 479 expect_dict["build_type_debug"] = True 480 expect_dict["build_type_profile"] = False 481 expect_dict["build_type_none"] = True 482 elif para_value == "profile": 483 expect_dict["build_type_debug"] = False 484 expect_dict["build_type_profile"] = True 485 expect_dict["build_type_none"] = True 486 else: 487 expect_dict["build_type_debug"] = False 488 expect_dict["build_type_profile"] = False 489 expect_dict["build_type_none"] = True 490 491 return flags, expect_dict 492 493 @staticmethod 494 def get_log_level_flags(para_value): 495 flags = copy.deepcopy(TestBuildOption.FLAGS) 496 497 flags["tracelog"] = {"pattern": r"Excuting gn command.*--tracelog=.*/gn_trace.log.*--ide=json", "flag": False} 498 flags["ninja_v"] = {"pattern": r"Excuting ninja command.*-v.*", "flag": False} 499 expect_dict = {} 500 501 if para_value == "info": 502 expect_dict["tracelog"] = False 503 expect_dict["ninja_v"] = False 504 elif para_value == "debug": 505 expect_dict["tracelog"] = True 506 expect_dict["ninja_v"] = True 507 508 return flags, expect_dict 509 510 @staticmethod 511 def get_test_flags(para_value): 512 flags = copy.deepcopy(TestBuildOption.FLAGS) 513 expect_dict = {} 514 flags["notest"] = {"pattern": r'Excuting gn command.*ohos_test_args=\\"notest\\"', 515 "flag": False} 516 flags["xts"] = {"pattern": r'Excuting gn command.*ohos_xts_test_args=\\"xxx\\"', 517 "flag": False} 518 519 if para_value == "": 520 expect_dict["notest"] = False 521 expect_dict["xts"] = False 522 elif para_value == "notest xxx": 523 expect_dict["notest"] = True 524 expect_dict["xts"] = False 525 elif para_value in ["xts xxx", "xxx xts"]: 526 expect_dict["notest"] = False 527 expect_dict["xts"] = True 528 elif para_value == "xxx ccc": 529 expect_dict["notest"] = False 530 expect_dict["xts"] = False 531 532 return flags, expect_dict 533 534 @staticmethod 535 def get_gn_args_flags(para_value): 536 flags = copy.deepcopy(TestBuildOption.FLAGS) 537 expect_dict = {} 538 539 flags["device_type"] = { 540 "pattern": r'Excuting gn command.*device_type=\\"default\\"', "flag": False} 541 flags["build_variant"] = { 542 "pattern": r'Excuting gn command.*build_variant=\\"root\\"', "flag": False} 543 flags["para"] = { 544 "pattern": r'Excuting gn command.*{}'.format(para_value), "flag": False} 545 546 return flags, expect_dict 547 548 @staticmethod 549 def get_fast_rebuild_flags(para_value): 550 flags = copy.deepcopy(TestBuildOption.FLAGS) 551 expect_dict = {} 552 553 if para_value.lower() == "true" or para_value == "": 554 expect_dict["gn"] = False 555 expect_dict["done"] = False 556 return flags, expect_dict 557 558 @staticmethod 559 def get_skip_partlist_check_flags(para_value): 560 flags = copy.deepcopy(TestBuildOption.FLAGS) 561 expect_dict = {} 562 partlist_flag = True if para_value.lower() == "true" else False 563 flags["partlist"] = {"pattern": r"loader args.*skip_partlist_check={}".format(partlist_flag), "flag": False} 564 return flags, expect_dict 565 566 @staticmethod 567 def get_deps_guard_flags(para_value): 568 flags = copy.deepcopy(TestBuildOption.FLAGS) 569 expect_dict = {} 570 flags["os_level"] = {"pattern": r"loader args.*os_level=([a-zA-Z]+)\'", "flag": False} 571 return flags, expect_dict 572 573 @staticmethod 574 def get_compute_overlap_rate_flags(para_value): 575 flags = copy.deepcopy(TestBuildOption.FLAGS) 576 flags["c_targets"] = {"pattern": r"c targets overlap rate statistics", "flag": False} 577 flags["c_overall"] = {"pattern": r"c overall build overlap rate", "flag": False} 578 expect_dict = {} 579 580 if para_value.lower() in ("true", ""): 581 expect_dict["c_targets"] = True 582 expect_dict["c_overall"] = True 583 else: 584 expect_dict["c_targets"] = False 585 expect_dict["c_overall"] = False 586 return flags, expect_dict 587 588 @staticmethod 589 def get_stat_ccache_flags(para_value): 590 flags = copy.deepcopy(TestBuildOption.FLAGS) 591 expect_dict = {} 592 flags["ccache_dir"] = {"pattern": r"ccache_dir =.*, ccache_exec =.*", "flag": False} 593 flags["ccache_summary"] = {"pattern": r"ccache summary", "flag": False} 594 595 if para_value.lower() in ("true", ""): 596 expect_dict["ccache_dir"] = True 597 expect_dict["ccache_summary"] = True 598 else: 599 expect_dict["ccache_dir"] = False 600 expect_dict["ccache_summary"] = False 601 602 return flags, expect_dict 603 604 @staticmethod 605 def get_keep_ninja_going_flags(para_value): 606 flags = copy.deepcopy(TestBuildOption.FLAGS) 607 expect_dict = {} 608 if para_value.lower() == "true": 609 flags.setdefault("ninja", {}).setdefault("pattern", r"Excuting ninja command.*-k1000000.*") 610 611 return flags, expect_dict 612 613 @staticmethod 614 def get_common_flags(para_value, check_file=False): 615 flags = copy.deepcopy(TestBuildOption.FLAGS) 616 expect_dict = {} 617 if check_file: 618 flags["os_level"] = {"pattern": r"loader args.*os_level=([a-zA-Z]+)\'", "flag": False} 619 flags["root_dir"] = {"pattern": r"""loader args.*source_root_dir="([a-zA-Z\d/\\_]+)""""", "flag": False} 620 flags["gn_dir"] = {"pattern": r"""loader args.*gn_root_out_dir="([a-zA-Z\d/\\_]+)""""", "flag": False} 621 flags["start_end_time"] = {"pattern": r"(\d+-\d+-\d+ \d+:\d+:\d+)", "flag": False} 622 flags["cost_time"] = {"pattern": r"Cost time:.*(\d+:\d+:\d+)", "flag": False} 623 return flags, expect_dict 624 625 @staticmethod 626 def check_file_res(resolve_result, file_list, is_real_path=False, time_over=TIME_OVER): 627 root_dir = resolve_result["root_dir"]["flag"][0] 628 gn_dir = resolve_result["gn_dir"]["flag"][0] 629 start_time_str = resolve_result["start_end_time"]["flag"][0] 630 end_time_str = resolve_result["start_end_time"]["flag"][-1] 631 632 start_time = datetime.strptime(start_time_str, "%Y-%m-%d %H:%M:%S") 633 end_time = datetime.strptime(end_time_str, "%Y-%m-%d %H:%M:%S") 634 635 start_timestamp = int(datetime.timestamp(start_time)) 636 end_timestamp = int(datetime.timestamp(end_time)) 637 638 file_list_new = [] 639 for tmp_file in file_list: 640 real_path = tmp_file if is_real_path else os.path.join(root_dir, gn_dir, tmp_file) 641 if os.path.exists(real_path): 642 file_list_new.append(real_path) 643 if not file_list_new: 644 log_info("all file {} not exist".format(file_list)) 645 return True 646 file_timestamp_li = {tmp_file: int(os.stat(tmp_file).st_mtime) for tmp_file in file_list_new} 647 648 cost_time_str = resolve_result["cost_time"]["flag"][0] 649 cost_time = datetime.strptime(cost_time_str, "%H:%M:%S") 650 cost_time_int = timedelta(hours=cost_time.hour, minutes=cost_time.minute, seconds=cost_time.second) 651 total_seconds = int(cost_time_int.total_seconds()) 652 new_start_timestamp = end_timestamp - total_seconds 653 log_info("log_cost_time:{}s".format(total_seconds)) 654 log_info("start_timestamp:{}".format(start_timestamp)) 655 log_info("new_start_timestamp:{}".format(new_start_timestamp)) 656 log_info("end_timestamp:{}".format(end_timestamp)) 657 file_flag = False 658 file_tmp_flag_li = [] 659 660 for file_tmp, file_timestamp in file_timestamp_li.items(): 661 log_info("{}:{}".format(file_tmp, file_timestamp)) 662 file_tmp_flag = new_start_timestamp - time_over <= file_timestamp <= end_timestamp + time_over 663 file_tmp_flag_li.append(file_tmp_flag) 664 665 if all(file_tmp_flag_li): 666 file_flag = True 667 668 return file_flag 669 670 @pytest.mark.parametrize('cpu_para', ['arm', 'arm64', 'x86_64']) 671 def test_target_cpu(self, cpu_para): 672 """ 673 test target_cpu parameter 674 """ 675 cmd = self.CMD.format('--target-cpu', cpu_para).split() 676 677 result = self.get_match_result(cmd, "target_cpu", cpu_para) 678 679 assert result == 0, "target cpu para {} failed".format(cpu_para) 680 681 @pytest.mark.parametrize('ccache_para', ['True', 'False']) 682 def test_ccache(self, ccache_para): 683 """ 684 test ccache_para parameter 685 """ 686 cmd = self.CMD.format('--ccache', ccache_para).split() 687 688 result = self.get_match_result(cmd, "ccache", ccache_para) 689 690 assert result == 0, "ccache para {} failed".format(ccache_para) 691 692 @pytest.mark.parametrize('rename_last_log_para', ['True', 'False']) 693 def test_rename_last_log(self, rename_last_log_para): 694 """ 695 test rename_last_log parameter 696 """ 697 cmd = self.CMD.format('--rename-last-log', rename_last_log_para).split() 698 mtime = "" 699 file_name = "" 700 701 if self.is_exist(self.LOG_PATH): 702 mtime = os.stat(self.LOG_PATH).st_mtime 703 file_name = '{}/build.{}.log'.format(self.LOG_PATH, mtime) 704 log_info("test_rename_last_log,file name is {}".format(file_name)) 705 result = self.get_match_result(cmd, "rename_last_log", rename_last_log_para) 706 new_path = os.path.join(os.path.dirname(self.LOG_PATH), "build.{}.log".format(mtime)) 707 log_info("test_rename_last_log,new path is {}".format(new_path)) 708 709 if rename_last_log_para == 'True': 710 assert self.is_exist(new_path) and result == 0, "rename_last_log para {} failed".format( 711 rename_last_log_para) 712 elif rename_last_log_para == 'False': 713 assert not self.is_exist(new_path) and result == 0, "rename_last_log para {} failed".format( 714 rename_last_log_para) 715 716 @pytest.mark.parametrize('build_target', ['', 'package_testcase']) 717 def test_build_target(self, build_target): 718 """ 719 test build_target parameter 720 """ 721 cmd = self.CMD.format('--build-target', build_target).split() 722 723 result = self.get_match_result(cmd, "build_target", build_target) 724 725 assert result == 0, "build target para {} failed".format(build_target) 726 727 @pytest.mark.parametrize('ninja_args', ['-dkeeprsp']) 728 def test_ninja_args(self, ninja_args): 729 """ 730 test ninja_args parameter 731 """ 732 cmd = self.NINJIA_CMD.format(ninja_args).split() 733 734 result = self.get_match_result(cmd, "ninja_args", ninja_args) 735 736 assert result == 0, "ninja args para {} failed".format(ninja_args) 737 738 @pytest.mark.parametrize('full_compilation', ['True', 'False', '']) 739 def test_full_compilation(self, full_compilation): 740 """ 741 test full_compilation parameter 742 """ 743 cmd = self.CMD.format('--full-compilation', full_compilation).split() 744 745 result = self.get_match_result(cmd, "full_compilation", full_compilation) 746 747 assert result == 0, "full compilation para {} failed".format(full_compilation) 748 749 @pytest.mark.parametrize('strict_mode', ['True', 'False', 'false']) 750 def test_strict_mode(self, strict_mode): 751 """ 752 test strict_mode parameter 753 """ 754 cmd = self.CMD.format('--strict-mode', strict_mode).split() 755 756 result = self.get_match_result(cmd, "strict_mode", strict_mode) 757 758 assert result == 0, "strict mode para {} failed".format(strict_mode) 759 760 @pytest.mark.parametrize('scalable_build', ['True', 'False', 'false']) 761 def test_scalable_build(self, scalable_build): 762 """ 763 test scalable_build parameter 764 """ 765 cmd = self.CMD.format('--scalable-build', scalable_build).split() 766 767 result = self.get_match_result(cmd, "scalable_build", scalable_build) 768 769 assert result == 0, "scalable build para {} failed".format(scalable_build) 770 771 @pytest.mark.parametrize('build_example', ['True', 'False', 'true', 'false']) 772 def test_build_example(self, build_example): 773 """ 774 test build_example parameter 775 """ 776 cmd = self.CMD.format('--build-example', build_example).split() 777 778 result = self.get_match_result(cmd, "build_example", build_example) 779 780 assert result == 0, "build example para {} failed".format(build_example) 781 782 @pytest.mark.parametrize('build_platform_name', ['phone']) 783 def test_build_platform_name(self, build_platform_name): 784 """ 785 test build_platform_name parameter 786 """ 787 cmd = self.CMD.format('--build-platform-name', build_platform_name).split() 788 789 result = self.get_match_result(cmd, "build_platform_name", build_platform_name) 790 791 assert result == 0, "build platform name para {} failed".format(build_platform_name) 792 793 @pytest.mark.parametrize('build_xts', ['True', 'False', 'true', 'false']) 794 def test_build_xts(self, build_xts): 795 """ 796 test build_xts parameter 797 """ 798 cmd = self.CMD.format('--build-xts', build_xts).split() 799 800 result = self.get_match_result(cmd, "build_xts", build_xts) 801 802 assert result == 0, "build xts para {} failed".format(build_xts) 803 804 @pytest.mark.parametrize('ignore_api_check', ['common xts', '']) 805 def test_ignore_api_check(self, ignore_api_check): 806 """ 807 test ignore_api_check parameter 808 """ 809 para_list = ignore_api_check.split() 810 cmd = self.CMD.format('--ignore-api-check', ignore_api_check).split() 811 resolve_result, result, _ = self.get_common_spec_result(ignore_api_check, cmd, 812 para_type="ignore_api_check") 813 if result != 0: 814 assert result == 0, "ignore api check para {} failed".format(ignore_api_check) 815 else: 816 if ignore_api_check: 817 ignore_str = resolve_result["ignore_api_check"]["flag"][0] # ['xts', 'common'] 818 log_info("ignore_str is {}".format(ignore_str)) 819 ignor_li = eval(ignore_str) 820 log_info("ignor_li is {0},type is {1}".format(ignor_li, type(ignor_li))) 821 assert self.same_element(para_list, ignor_li) and result == 0, "ignore api check para {} failed".format( 822 ignore_api_check) 823 824 @pytest.mark.parametrize('load_test_config', ['True', 'False', 'true', 'false']) 825 def test_load_test_config(self, load_test_config): 826 """ 827 test load_test_config parameter 828 """ 829 cmd = self.CMD.format('--load-test-config', load_test_config).split() 830 831 result = self.get_match_result(cmd, "load_test_config", load_test_config) 832 833 assert result == 0, "load test config para {} failed".format(load_test_config) 834 835 @pytest.mark.parametrize('build_type', ['debug', 'release', 'profile']) 836 def test_build_type(self, build_type): 837 """ 838 test build_type parameter 839 """ 840 cmd = self.CMD.format('--build-type', build_type).split() 841 result = self.get_match_result(cmd, "build_type", build_type) 842 843 assert result == 0, "build type para {} failed".format(build_type) 844 845 @pytest.mark.parametrize('log_level', ['info', 'debug']) 846 def test_log_level(self, log_level): 847 """ 848 test log_level parameter 849 """ 850 cmd = self.CMD.format('--log-level', log_level).split() 851 852 result = self.get_match_result(cmd, "log_level", log_level) 853 854 assert result == 0, "log level para {} failed".format(log_level) 855 856 @pytest.mark.parametrize('build_only_gn', ['True', 'False']) 857 def test_build_only_gn(self, build_only_gn): 858 """ 859 test build_only_gn parameter 860 """ 861 cmd = self.CMD.format('--build-only-gn', build_only_gn).split() 862 863 result = self.get_match_result(cmd, "build_only_gn", build_only_gn) 864 865 assert result == 0, "build only gn para {} failed".format(build_only_gn) 866 867 @pytest.mark.parametrize('test', ['', 'notest xxx', 'xts xxx', 'xxx xts']) 868 def test_test(self, test): 869 """ 870 test test parameter 871 """ 872 cmd = self.CMD.format('--test', test).split() 873 874 result = self.get_match_result(cmd, "test", test) 875 876 assert result == 0, "test para {} failed".format(test) 877 878 @pytest.mark.parametrize('gn_args', ['', 'is_debug=true']) 879 def test_gn_args(self, gn_args): 880 """ 881 test gn_args parameter 882 """ 883 cmd = self.CMD.format('--gn-args', gn_args).split() 884 885 result = self.get_match_result(cmd, "gn_args", gn_args) 886 887 assert result == 0, "gn args para {} failed".format(gn_args) 888 889 @pytest.mark.parametrize('fast_rebuild', ['True', 'False', '']) 890 def test_fast_rebuild(self, fast_rebuild): 891 """ 892 test fast_rebuild parameter 893 """ 894 cmd = self.CMD.format('--fast-rebuild', fast_rebuild).split() 895 896 result = self.get_match_result(cmd, "fast_rebuild", fast_rebuild) 897 898 assert result == 0, "fast rebuild para {} failed".format(fast_rebuild) 899 900 @pytest.mark.parametrize('going_option', ['True', 'False']) 901 def test_keep_ninja_going(self, going_option): 902 """ 903 test keep_ninja_going parameter 904 """ 905 cmd = self.CMD.format('--keep-ninja-going', going_option).split() 906 907 result = self.get_match_result(cmd, "keep_ninja_going", going_option) 908 909 assert result == 0, "keep_ninja_going para {} failed".format(going_option) 910 911 @pytest.mark.parametrize('variant_option', ['user', 'root']) 912 def test_build_variant(self, variant_option): 913 """ 914 test build_variant parameter 915 """ 916 cmd = self.CMD.format('--build-variant', variant_option).split() 917 918 resolve_result, result, _ = self.get_common_spec_result(variant_option, cmd) 919 if result != 0: 920 assert result == 0, "build_variant para {} failed".format(variant_option) 921 else: 922 root_dir = resolve_result["root_dir"]["flag"][0] 923 gn_dir = resolve_result["gn_dir"]["flag"][0] 924 925 ohos_para_path = "packages/phone/system/etc/param/ohos.para" 926 if os.path.exists(os.path.join(root_dir, gn_dir, ohos_para_path)): 927 check_file_li = [ohos_para_path] 928 check_file_flag = self.check_file_res(resolve_result, check_file_li) 929 assert result == 0 and check_file_flag, "build_variant para {} failed".format(variant_option) 930 else: 931 assert result == 0, "build_variant para {} failed".format(variant_option) 932 933 @pytest.mark.parametrize('device_option', ['default', 'unkown']) 934 def test_device_type(self, device_option): 935 """ 936 test device_type parameter 937 """ 938 cmd = self.CMD.format('--device-type', device_option).split() 939 940 resolve_result, result, _ = self.get_common_spec_result(device_option, cmd) 941 if result != 0: 942 if device_option == "unkown": 943 assert result == 1, "device_type para {} failed".format(device_option) 944 else: 945 assert result == 0, "device_type para {} failed".format(device_option) 946 947 else: 948 if device_option == "default": 949 assert result == 0, "device_type para {} failed".format(device_option) 950 else: 951 check_file_li = ["packages/phone/system/etc/param/ohos.para"] 952 check_file_flag = self.check_file_res(resolve_result, check_file_li) 953 assert result == 0 and check_file_flag, "device_type para {} failed".format(device_option) 954 955 @pytest.mark.parametrize('archive_option', ['True', 'False']) 956 def test_archive_image(self, archive_option): 957 """ 958 test archive_image parameter 959 """ 960 cmd = self.CMD.format('--archive-image', archive_option).split() 961 962 resolve_result, result, cmd_res = self.get_common_spec_result(archive_option, cmd) 963 if result != 0: 964 assert result == 0, "archive_image para {} failed".format(archive_option) 965 else: 966 root_dir = resolve_result["root_dir"]["flag"][0] 967 gn_dir = resolve_result["gn_dir"]["flag"][0] 968 image_path = os.path.join("packages", "phone", "images") 969 if archive_option.lower() == "true": 970 if os.path.exists(os.path.join(root_dir, gn_dir, image_path)): 971 check_file_li = ["images.tar.gz"] 972 check_file_flag = self.check_file_res(resolve_result, check_file_li) 973 assert result == 0 and check_file_flag, "archive_image para {} failed".format( 974 archive_option) 975 else: 976 archive_flags = {"archive_image": {"pattern": r'"--archive-image" option not work', "flag": False}} 977 archive_resolve_result = self.resolve_res(cmd_res, archive_flags) 978 archive_result = self.check_flags(archive_resolve_result) 979 assert result == 0 and archive_result == 0, "archive_image para {} failed".format(archive_option) 980 else: 981 assert result == 0, "archive_image para {} failed".format(archive_option) 982 983 @pytest.mark.parametrize('rom_option', ['True', 'False']) 984 def test_rom_size_statistics(self, rom_option): 985 """ 986 test rom_size_statistics parameter 987 """ 988 cmd = self.CMD.format('--rom-size-statistics', rom_option).split() 989 990 resolve_result, result, _ = self.get_common_spec_result(rom_option, cmd, ptyflag=True) 991 if result != 0: 992 assert result == 0, "rom_size_statistics para {} failed".format(rom_option) 993 else: 994 os_level = resolve_result["os_level"]["flag"][0] 995 log_info("os_level:{}".format(os_level)) 996 if os_level in ("mini", "small"): 997 assert result == 0, "rom_size_statistics para {} failed".format(rom_option) 998 else: 999 check_file_li = ["rom_statistics_table.json"] 1000 check_file_flag = self.check_file_res(resolve_result, check_file_li) 1001 if rom_option.lower() == "false": 1002 assert result == 0 and not check_file_flag, "rom_option para {} failed".format( 1003 rom_option) 1004 else: 1005 assert result == 0 and check_file_flag, "rom_option para {} failed".format(rom_option) 1006 1007 @pytest.mark.parametrize('ccache_option', ['True', 'False']) 1008 def test_stat_ccache(self, ccache_option): 1009 """ 1010 test stat_ccache parameter 1011 """ 1012 cmd = self.CMD.format('--stat-ccache', ccache_option).split() 1013 1014 result = self.get_match_result(cmd, "stat_ccache", ccache_option) 1015 1016 assert result == 0, "stat_ccache para {} failed".format(ccache_option) 1017 1018 @pytest.mark.parametrize('warning_option', ['True', 'False']) 1019 def test_get_warning_list(self, warning_option): 1020 """ 1021 test get_warning_list parameter 1022 """ 1023 cmd = self.CMD.format('--get-warning-list', warning_option).split() 1024 resolve_result, result, _ = self.get_common_spec_result(warning_option, cmd) 1025 if result != 0: 1026 assert result == 0, "get_warning_list para {} failed".format(warning_option) 1027 else: 1028 check_file_li = ["packages/WarningList.txt"] 1029 check_file_flag = self.check_file_res(resolve_result, check_file_li) 1030 if warning_option.lower() == "false": 1031 assert result == 0 and not check_file_flag, "get_warning_list para {} failed".format( 1032 warning_option) 1033 else: 1034 assert result == 0 and check_file_flag, "get_warning_list para {} failed".format(warning_option) 1035 1036 @pytest.mark.parametrize('ninja_option', ["True", "False", "true", "false"]) 1037 def test_generate_ninja_trace(self, ninja_option): 1038 """ 1039 test generate_ninja_trace parameter 1040 """ 1041 cmd = self.CMD.format('--generate-ninja-trace', ninja_option).split() 1042 resolve_result, result, _ = self.get_common_spec_result(ninja_option, cmd) 1043 if result != 0: 1044 assert result == 0, "generate_ninja_trace para {} failed".format(ninja_option) 1045 else: 1046 check_file_li = ["build.trace.gz", "sorted_action_duration.txt"] 1047 check_file_flag = self.check_file_res(resolve_result, check_file_li) 1048 if ninja_option.lower() == "false": 1049 assert result == 0 and not check_file_flag, "generate_ninja_trace para {} failed".format( 1050 ninja_option) 1051 else: 1052 assert result == 0 and check_file_flag, "generate_ninja_trace para {} failed".format( 1053 ninja_option) 1054 1055 @pytest.mark.parametrize('overlap_option', ['True', 'False']) 1056 def test_compute_overlap_rate(self, overlap_option): 1057 """ 1058 test compute_overlap_rate parameter 1059 """ 1060 cmd = self.CMD.format('--compute-overlap-rate', overlap_option).split() 1061 result = self.get_match_result(cmd, "compute_overlap_rate", overlap_option) 1062 1063 assert result == 0, "compute_overlap_rate para {} failed".format(overlap_option) 1064 1065 @pytest.mark.parametrize('clean_option', ['True', 'False']) 1066 def test_clean_args(self, clean_option): 1067 """ 1068 test clean-args parameter 1069 """ 1070 cmd = self.CMD.format('--clean-args', clean_option).split() 1071 resolve_result, result, _ = self.get_common_spec_result(clean_option, cmd) 1072 if result != 0: 1073 assert result == 0, "clean_args para {} failed".format(clean_option) 1074 else: 1075 root_dir = resolve_result["root_dir"]["flag"][0] 1076 json_path = os.path.join(root_dir, "out", "hb_args") 1077 json_file_li = [file for file in os.listdir(json_path) if os.path.splitext(file)[-1] == ".json"] 1078 log_info("test_clean_args, json_file_li:{}".format(json_file_li)) 1079 if clean_option.lower() == "false": 1080 exist_flag = bool(json_file_li) 1081 else: 1082 exist_flag = not json_file_li 1083 1084 assert result == 0 and exist_flag, "clean_args para {} failed".format(clean_option) 1085 1086 @pytest.mark.parametrize('deps_guard_option', ['True', 'False']) 1087 def test_deps_guard(self, deps_guard_option): 1088 """ 1089 test deps-guard parameter 1090 """ 1091 cmd = self.CMD.format('--deps-guard', deps_guard_option).split() 1092 resolve_result, result, cmd_res = self.get_common_spec_result(deps_guard_option, cmd, 1093 para_type="deps_guard") 1094 if result != 0: 1095 assert result == 0, "deps_guard para {}failed.".format(deps_guard_option) 1096 else: 1097 os_level = resolve_result["os_level"]["flag"][0] 1098 log_info("test_deps_guard,os_level:{}".format(os_level)) 1099 if deps_guard_option.lower() == "false" and os_level == "standard": 1100 standard_flags = {"Scanning": {"pattern": r"Scanning.*ELF files now", "flag": False}, 1101 "rules": {"pattern": r"All rules passed", "flag": False}} 1102 standard_resolve_result = self.resolve_res(cmd_res, standard_flags) 1103 log_info("continue match Scanning and rules ...") 1104 standard_result = self.check_flags(standard_resolve_result) 1105 assert result == 0 and standard_result == 0, "deps_guard para {},os_level {} failed.".format( 1106 deps_guard_option, os_level) 1107 else: 1108 assert result == 0, "deps_guard para {},os_level {} failed.".format(deps_guard_option, os_level) 1109 1110 @pytest.mark.parametrize('partlist_option', ['True', 'False']) 1111 def test_skip_partlist_check(self, partlist_option): 1112 """ 1113 test skip-partlist-check parameter 1114 """ 1115 cmd = self.CMD.format('--skip-partlist-check', partlist_option).split() 1116 result = self.get_match_result(cmd, "skip_partlist_check", partlist_option) 1117 assert result == 0, "skip_partlist_check para {} failed".format(partlist_option) 1118 1119 @pytest.mark.parametrize('enable_pycache', ['True', 'true', 'False', 'false']) 1120 def test_enable_pycache(self, enable_pycache): 1121 """ 1122 test enable_pycache parameter 1123 """ 1124 cmd = self.CMD.format('--enable-pycache', enable_pycache).split() 1125 1126 pycache_dir = os.environ.get('CCACHE_BASE') 1127 if not pycache_dir: 1128 pycache_dir = os.environ.get('HOME') 1129 pycache_config = os.path.join(pycache_dir, '.pycache', '.config') 1130 resolve_result, result, _ = self.get_common_spec_result(enable_pycache, cmd, 1131 para_type="enable_pycache", ptyflag=True) 1132 if result != 0: 1133 assert result == 0, "enable pycache para {} failed".format(enable_pycache) 1134 else: 1135 check_file_li = [pycache_config] 1136 check_file_flag = self.check_file_res(resolve_result, check_file_li, is_real_path=True) 1137 1138 if enable_pycache.lower() == "true": 1139 assert result == 0 and check_file_flag, "enable pycache para {} failed".format(enable_pycache) 1140 else: 1141 assert result == 0 and not check_file_flag, "enable pycache para {} failed".format(enable_pycache) 1142 1143