1// Copyright 2016 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5package gen_tasks_logic 6 7/* 8 Generate the tasks.json file. 9*/ 10 11import ( 12 "encoding/json" 13 "fmt" 14 "io/ioutil" 15 "log" 16 "path" 17 "path/filepath" 18 "regexp" 19 "runtime" 20 "sort" 21 "strconv" 22 "strings" 23 "time" 24 25 "go.skia.org/infra/go/cas/rbe" 26 "go.skia.org/infra/go/cipd" 27 "go.skia.org/infra/task_scheduler/go/specs" 28) 29 30const ( 31 CAS_BAZEL = "bazel" 32 CAS_CANVASKIT = "canvaskit" 33 CAS_COMPILE = "compile" 34 CAS_EMPTY = "empty" // TODO(borenet): It'd be nice if this wasn't necessary. 35 CAS_LOTTIE_CI = "lottie-ci" 36 CAS_LOTTIE_WEB = "lottie-web" 37 CAS_PATHKIT = "pathkit" 38 CAS_PERF = "perf" 39 CAS_PUPPETEER = "puppeteer" 40 CAS_RUN_RECIPE = "run-recipe" 41 CAS_RECIPES = "recipes" 42 CAS_RECREATE_SKPS = "recreate-skps" 43 CAS_SKOTTIE_WASM = "skottie-wasm" 44 CAS_SKPBENCH = "skpbench" 45 CAS_TASK_DRIVERS = "task-drivers" 46 CAS_TEST = "test" 47 CAS_WASM_GM = "wasm-gm" 48 CAS_WHOLE_REPO = "whole-repo" 49 50 BUILD_TASK_DRIVERS_PREFIX = "Housekeeper-PerCommit-BuildTaskDrivers" 51 BUNDLE_RECIPES_NAME = "Housekeeper-PerCommit-BundleRecipes" 52 ISOLATE_GCLOUD_LINUX_NAME = "Housekeeper-PerCommit-IsolateGCloudLinux" 53 ISOLATE_SKIMAGE_NAME = "Housekeeper-PerCommit-IsolateSkImage" 54 ISOLATE_SKP_NAME = "Housekeeper-PerCommit-IsolateSKP" 55 ISOLATE_MSKP_NAME = "Housekeeper-PerCommit-IsolateMSKP" 56 ISOLATE_SVG_NAME = "Housekeeper-PerCommit-IsolateSVG" 57 ISOLATE_NDK_LINUX_NAME = "Housekeeper-PerCommit-IsolateAndroidNDKLinux" 58 ISOLATE_SDK_LINUX_NAME = "Housekeeper-PerCommit-IsolateAndroidSDKLinux" 59 ISOLATE_WIN_TOOLCHAIN_NAME = "Housekeeper-PerCommit-IsolateWinToolchain" 60 61 DEBIAN_11_OS = "Debian-11.5" 62 DEFAULT_OS_DEBIAN = "Debian-10.10" 63 DEFAULT_OS_LINUX_GCE = "Debian-10.3" 64 OLD_OS_LINUX_GCE = "Debian-9.8" 65 COMPILE_TASK_NAME_OS_LINUX = "Debian10" 66 COMPILE_TASK_NAME_OS_LINUX_OLD = "Debian9" 67 DEFAULT_OS_MAC = "Mac-10.15.7" 68 DEFAULT_OS_WIN = "Windows-Server-17763" 69 70 // Small is a 2-core machine. 71 // TODO(dogben): Would n1-standard-1 or n1-standard-2 be sufficient? 72 MACHINE_TYPE_SMALL = "n1-highmem-2" 73 // Medium is a 16-core machine 74 MACHINE_TYPE_MEDIUM = "n1-standard-16" 75 // Large is a 64-core machine. (We use "highcpu" because we don't need more than 57GB memory for 76 // any of our tasks.) 77 MACHINE_TYPE_LARGE = "n1-highcpu-64" 78 79 // Swarming output dirs. 80 OUTPUT_NONE = "output_ignored" // This will result in outputs not being isolated. 81 OUTPUT_BUILD = "build" 82 OUTPUT_BUILD_NOPATCH = "build_nopatch" 83 OUTPUT_TEST = "test" 84 OUTPUT_PERF = "perf" 85 86 // Name prefix for upload jobs. 87 PREFIX_UPLOAD = "Upload" 88 89 // This will have to kept in sync with the kMin_Version in 90 // src/core/SkPicturePriv.h 91 // See the comment in that file on how to find the version to use here. 92 oldestSupportedSkpVersion = 293 93) 94 95var ( 96 // "Constants" 97 98 // Named caches used by tasks. 99 CACHES_GIT = []*specs.Cache{ 100 { 101 Name: "git", 102 Path: "cache/git", 103 }, 104 { 105 Name: "git_cache", 106 Path: "cache/git_cache", 107 }, 108 } 109 CACHES_GO = []*specs.Cache{ 110 { 111 Name: "go_cache", 112 Path: "cache/go_cache", 113 }, 114 { 115 Name: "gopath", 116 Path: "cache/gopath", 117 }, 118 } 119 CACHES_WORKDIR = []*specs.Cache{ 120 { 121 Name: "work", 122 Path: "cache/work", 123 }, 124 } 125 CACHES_CCACHE = []*specs.Cache{ 126 { 127 Name: "ccache", 128 Path: "cache/ccache", 129 }, 130 } 131 // The "docker" cache is used as a persistent working directory for 132 // tasks which use Docker. It is not to be confused with Docker's own 133 // cache, which stores images. We do not currently use a named Swarming 134 // cache for the latter. 135 // TODO(borenet): We should ensure that any task which uses Docker does 136 // not also use the normal "work" cache, to prevent issues like 137 // https://bugs.chromium.org/p/skia/issues/detail?id=9749. 138 CACHES_DOCKER = []*specs.Cache{ 139 { 140 Name: "docker", 141 Path: "cache/docker", 142 }, 143 } 144 145 // CAS_SPEC_LOTTIE_CI is a CasSpec which includes the files needed for 146 // lottie-ci. This is global so that it can be overridden by other 147 // repositories which import this file. 148 CAS_SPEC_LOTTIE_CI = &specs.CasSpec{ 149 Root: "..", 150 Paths: []string{ 151 "skia/.vpython", 152 "skia/infra/bots/run_recipe.py", 153 "skia/infra/lottiecap", 154 "skia/tools/lottie-web-perf", 155 "skia/tools/lottiecap", 156 }, 157 Excludes: []string{rbe.ExcludeGitDir}, 158 } 159 160 // CAS_SPEC_WHOLE_REPO is a CasSpec which includes the entire repo. This is 161 // global so that it can be overridden by other repositories which import 162 // this file. 163 CAS_SPEC_WHOLE_REPO = &specs.CasSpec{ 164 Root: "..", 165 Paths: []string{"skia"}, 166 Excludes: []string{rbe.ExcludeGitDir}, 167 } 168 169 // TODO(borenet): This hacky and bad. 170 CIPD_PKG_LUCI_AUTH = cipd.MustGetPackage("infra/tools/luci-auth/${platform}") 171 172 CIPD_PKGS_GOLDCTL = cipd.MustGetPackage("skia/tools/goldctl/${platform}") 173 174 CIPD_PKGS_XCODE = []*specs.CipdPackage{ 175 // https://chromium.googlesource.com/chromium/tools/build/+/e19b7d9390e2bb438b566515b141ed2b9ed2c7c2/scripts/slave/recipe_modules/ios/api.py#317 176 // This package is really just an installer for XCode. 177 { 178 Name: "infra/tools/mac_toolchain/${platform}", 179 Path: "mac_toolchain", 180 // When this is updated, also update 181 // https://skia.googlesource.com/skcms.git/+/f1e2b45d18facbae2dece3aca673fe1603077846/infra/bots/gen_tasks.go#56 182 Version: "git_revision:796d2b92cff93fc2059623ce0a66284373ceea0a", 183 }, 184 } 185 186 // These properties are required by some tasks, eg. for running 187 // bot_update, but they prevent de-duplication, so they should only be 188 // used where necessary. 189 EXTRA_PROPS = map[string]string{ 190 "buildbucket_build_id": specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID, 191 "patch_issue": specs.PLACEHOLDER_ISSUE_INT, 192 "patch_ref": specs.PLACEHOLDER_PATCH_REF, 193 "patch_repo": specs.PLACEHOLDER_PATCH_REPO, 194 "patch_set": specs.PLACEHOLDER_PATCHSET_INT, 195 "patch_storage": specs.PLACEHOLDER_PATCH_STORAGE, 196 "repository": specs.PLACEHOLDER_REPO, 197 "revision": specs.PLACEHOLDER_REVISION, 198 "task_id": specs.PLACEHOLDER_TASK_ID, 199 } 200 201 // ISOLATE_ASSET_MAPPING maps the name of an asset to the configuration 202 // for how the CIPD package should be installed for a given task. 203 ISOLATE_ASSET_MAPPING = map[string]uploadAssetCASCfg{ 204 "gcloud_linux": { 205 uploadTaskName: ISOLATE_GCLOUD_LINUX_NAME, 206 path: "gcloud_linux", 207 }, 208 "skimage": { 209 uploadTaskName: ISOLATE_SKIMAGE_NAME, 210 path: "skimage", 211 }, 212 "skp": { 213 uploadTaskName: ISOLATE_SKP_NAME, 214 path: "skp", 215 }, 216 "svg": { 217 uploadTaskName: ISOLATE_SVG_NAME, 218 path: "svg", 219 }, 220 "mskp": { 221 uploadTaskName: ISOLATE_MSKP_NAME, 222 path: "mskp", 223 }, 224 "android_ndk_linux": { 225 uploadTaskName: ISOLATE_NDK_LINUX_NAME, 226 path: "android_ndk_linux", 227 }, 228 "android_sdk_linux": { 229 uploadTaskName: ISOLATE_SDK_LINUX_NAME, 230 path: "android_sdk_linux", 231 }, 232 "win_toolchain": { 233 alwaysIsolate: true, 234 uploadTaskName: ISOLATE_WIN_TOOLCHAIN_NAME, 235 path: "win_toolchain", 236 }, 237 } 238 239 // Set dontReduceOpsTaskSplitting option on these models 240 DONT_REDUCE_OPS_TASK_SPLITTING_MODELS = []string{ 241 "NUC5PPYH", 242 } 243) 244 245// Config contains general configuration information. 246type Config struct { 247 // Directory containing assets. Assumed to be relative to the directory 248 // which contains the calling gen_tasks.go file. If not specified, uses 249 // the infra/bots/assets from this repo. 250 AssetsDir string `json:"assets_dir"` 251 252 // Path to the builder name schema JSON file. Assumed to be relative to 253 // the directory which contains the calling gen_tasks.go file. If not 254 // specified, uses infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json 255 // from this repo. 256 BuilderNameSchemaFile string `json:"builder_name_schema"` 257 258 // URL of the Skia Gold known hashes endpoint. 259 GoldHashesURL string `json:"gold_hashes_url"` 260 261 // GCS bucket used for GM results. 262 GsBucketGm string `json:"gs_bucket_gm"` 263 264 // GCS bucket used for Nanobench results. 265 GsBucketNano string `json:"gs_bucket_nano"` 266 267 // Optional function which returns a bot ID for internal devices. 268 InternalHardwareLabel func(parts map[string]string) *int `json:"-"` 269 270 // List of task names for which we'll never upload results. 271 NoUpload []string `json:"no_upload"` 272 273 // PathToSkia is the relative path from the root of the current checkout to 274 // the root of the Skia checkout. 275 PathToSkia string `json:"path_to_skia"` 276 277 // Swarming pool used for triggering tasks. 278 Pool string `json:"pool"` 279 280 // LUCI project associated with this repo. 281 Project string `json:"project"` 282 283 // Service accounts. 284 ServiceAccountCanary string `json:"service_account_canary"` 285 ServiceAccountCompile string `json:"service_account_compile"` 286 ServiceAccountHousekeeper string `json:"service_account_housekeeper"` 287 ServiceAccountRecreateSKPs string `json:"service_account_recreate_skps"` 288 ServiceAccountUploadBinary string `json:"service_account_upload_binary"` 289 ServiceAccountUploadGM string `json:"service_account_upload_gm"` 290 ServiceAccountUploadNano string `json:"service_account_upload_nano"` 291 292 // Optional override function which derives Swarming bot dimensions 293 // from parts of task names. 294 SwarmDimensions func(parts map[string]string) []string `json:"-"` 295} 296 297// JobInfo is the type of each entry in the jobs.json file. 298type JobInfo struct { 299 // The name of the job. 300 Name string `json:"name"` 301 302 // The optional CQ config of this job. If the CQ config is missing then the 303 // job will not be added to the CQ of this branch. 304 CQConfig *specs.CommitQueueJobConfig `json:"cq_config,omitempty"` 305} 306 307// LoadConfig loads the Config from a cfg.json file which is the sibling of the 308// calling gen_tasks.go file. 309func LoadConfig() *Config { 310 cfgDir := getCallingDirName() 311 var cfg Config 312 LoadJson(filepath.Join(cfgDir, "cfg.json"), &cfg) 313 return &cfg 314} 315 316// CheckoutRoot is a wrapper around specs.GetCheckoutRoot which prevents the 317// caller from needing a dependency on the specs package. 318func CheckoutRoot() string { 319 root, err := specs.GetCheckoutRoot() 320 if err != nil { 321 log.Fatal(err) 322 } 323 return root 324} 325 326// LoadJson loads JSON from the given file and unmarshals it into the given 327// destination. 328func LoadJson(filename string, dest interface{}) { 329 b, err := ioutil.ReadFile(filename) 330 if err != nil { 331 log.Fatalf("Unable to read %q: %s", filename, err) 332 } 333 if err := json.Unmarshal(b, dest); err != nil { 334 log.Fatalf("Unable to parse %q: %s", filename, err) 335 } 336} 337 338// In returns true if |s| is *in* |a| slice. 339// TODO(borenet): This is copied from go.skia.org/infra/go/util to avoid the 340// huge set of additional dependencies added by that package. 341func In(s string, a []string) bool { 342 for _, x := range a { 343 if x == s { 344 return true 345 } 346 } 347 return false 348} 349 350// GenTasks regenerates the tasks.json file. Loads the job list from a jobs.json 351// file which is the sibling of the calling gen_tasks.go file. If cfg is nil, it 352// is similarly loaded from a cfg.json file which is the sibling of the calling 353// gen_tasks.go file. 354func GenTasks(cfg *Config) { 355 b := specs.MustNewTasksCfgBuilder() 356 357 // Find the paths to the infra/bots directories in this repo and the 358 // repo of the calling file. 359 relpathTargetDir := getThisDirName() 360 relpathBaseDir := getCallingDirName() 361 362 // Parse jobs.json. 363 var jobsWithInfo []*JobInfo 364 LoadJson(filepath.Join(relpathBaseDir, "jobs.json"), &jobsWithInfo) 365 // Create a slice with only job names. 366 jobs := []string{} 367 for _, j := range jobsWithInfo { 368 jobs = append(jobs, j.Name) 369 } 370 371 if cfg == nil { 372 cfg = new(Config) 373 LoadJson(filepath.Join(relpathBaseDir, "cfg.json"), cfg) 374 } 375 376 // Create the JobNameSchema. 377 builderNameSchemaFile := filepath.Join(relpathTargetDir, "recipe_modules", "builder_name_schema", "builder_name_schema.json") 378 if cfg.BuilderNameSchemaFile != "" { 379 builderNameSchemaFile = filepath.Join(relpathBaseDir, cfg.BuilderNameSchemaFile) 380 } 381 schema, err := NewJobNameSchema(builderNameSchemaFile) 382 if err != nil { 383 log.Fatal(err) 384 } 385 386 // Set the assets dir. 387 assetsDir := filepath.Join(relpathTargetDir, "assets") 388 if cfg.AssetsDir != "" { 389 assetsDir = filepath.Join(relpathBaseDir, cfg.AssetsDir) 390 } 391 b.SetAssetsDir(assetsDir) 392 393 // Create Tasks and Jobs. 394 builder := &builder{ 395 TasksCfgBuilder: b, 396 cfg: cfg, 397 jobNameSchema: schema, 398 jobs: jobs, 399 } 400 for _, j := range jobsWithInfo { 401 jb := newJobBuilder(builder, j.Name) 402 jb.genTasksForJob() 403 jb.finish() 404 405 // Add the CQ spec if it is a CQ job. 406 if j.CQConfig != nil { 407 b.MustAddCQJob(j.Name, j.CQConfig) 408 } 409 } 410 411 // Create CasSpecs. 412 b.MustAddCasSpec(CAS_BAZEL, &specs.CasSpec{ 413 Root: "..", 414 Paths: []string{ 415 // source code 416 "skia/example", 417 "skia/experimental/bazel_test", 418 "skia/include", 419 "skia/modules", 420 "skia/src", 421 "skia/tests", 422 "skia/third_party", 423 "skia/tools", 424 // needed for tests 425 "skia/gn", // some Python scripts still live here 426 "skia/resources", 427 "skia/package.json", 428 "skia/package-lock.json", 429 "skia/DEPS", // needed to check generation 430 // Needed to run bazel 431 "skia/.bazelrc", 432 "skia/.bazelversion", 433 "skia/BUILD.bazel", 434 "skia/WORKSPACE.bazel", 435 "skia/bazel", 436 "skia/defines.bzl", 437 "skia/go_repositories.bzl", 438 "skia/requirements.txt", 439 "skia/toolchain", 440 }, 441 Excludes: []string{ 442 rbe.ExcludeGitDir, 443 "skia/third_party/externals", 444 }, 445 }) 446 b.MustAddCasSpec(CAS_CANVASKIT, &specs.CasSpec{ 447 Root: "..", 448 Paths: []string{ 449 "skia/.vpython", 450 "skia/infra/bots/run_recipe.py", 451 "skia/infra/canvaskit", 452 "skia/modules/canvaskit", 453 "skia/modules/pathkit/perf/perfReporter.js", 454 "skia/modules/pathkit/tests/testReporter.js", 455 }, 456 Excludes: []string{rbe.ExcludeGitDir}, 457 }) 458 b.MustAddCasSpec(CAS_EMPTY, specs.EmptyCasSpec) 459 b.MustAddCasSpec(CAS_LOTTIE_CI, CAS_SPEC_LOTTIE_CI) 460 b.MustAddCasSpec(CAS_LOTTIE_WEB, &specs.CasSpec{ 461 Root: "..", 462 Paths: []string{ 463 "skia/.vpython", 464 "skia/infra/bots/run_recipe.py", 465 "skia/tools/lottie-web-perf", 466 }, 467 Excludes: []string{rbe.ExcludeGitDir}, 468 }) 469 b.MustAddCasSpec(CAS_PATHKIT, &specs.CasSpec{ 470 Root: "..", 471 Paths: []string{ 472 "skia/.vpython", 473 "skia/infra/bots/run_recipe.py", 474 "skia/infra/pathkit", 475 "skia/modules/pathkit", 476 }, 477 Excludes: []string{rbe.ExcludeGitDir}, 478 }) 479 b.MustAddCasSpec(CAS_PERF, &specs.CasSpec{ 480 Root: "..", 481 Paths: []string{ 482 "skia/.vpython", 483 "skia/infra/bots/assets", 484 "skia/infra/bots/run_recipe.py", 485 "skia/platform_tools/ios/bin", 486 "skia/resources", 487 "skia/tools/valgrind.supp", 488 }, 489 Excludes: []string{rbe.ExcludeGitDir}, 490 }) 491 b.MustAddCasSpec(CAS_PUPPETEER, &specs.CasSpec{ 492 Root: "../skia", // Needed for other repos. 493 Paths: []string{ 494 ".vpython", 495 "tools/perf-canvaskit-puppeteer", 496 }, 497 Excludes: []string{rbe.ExcludeGitDir}, 498 }) 499 b.MustAddCasSpec(CAS_RECIPES, &specs.CasSpec{ 500 Root: "..", 501 Paths: []string{ 502 "skia/infra/config/recipes.cfg", 503 "skia/infra/bots/bundle_recipes.sh", 504 "skia/infra/bots/README.recipes.md", 505 "skia/infra/bots/recipe_modules", 506 "skia/infra/bots/recipes", 507 "skia/infra/bots/recipes.py", 508 }, 509 Excludes: []string{rbe.ExcludeGitDir}, 510 }) 511 b.MustAddCasSpec(CAS_RUN_RECIPE, &specs.CasSpec{ 512 Root: "..", 513 Paths: []string{ 514 "skia/.vpython", 515 "skia/infra/bots/run_recipe.py", 516 }, 517 Excludes: []string{rbe.ExcludeGitDir}, 518 }) 519 b.MustAddCasSpec(CAS_SKOTTIE_WASM, &specs.CasSpec{ 520 Root: "..", 521 Paths: []string{ 522 "skia/.vpython", 523 "skia/infra/bots/run_recipe.py", 524 "skia/tools/skottie-wasm-perf", 525 }, 526 Excludes: []string{rbe.ExcludeGitDir}, 527 }) 528 b.MustAddCasSpec(CAS_SKPBENCH, &specs.CasSpec{ 529 Root: "..", 530 Paths: []string{ 531 "skia/.vpython", 532 "skia/infra/bots/assets", 533 "skia/infra/bots/run_recipe.py", 534 "skia/tools/skpbench", 535 "skia/tools/valgrind.supp", 536 }, 537 Excludes: []string{rbe.ExcludeGitDir}, 538 }) 539 b.MustAddCasSpec(CAS_TASK_DRIVERS, &specs.CasSpec{ 540 Root: "..", 541 Paths: []string{ 542 // Deps needed to use Bazel 543 "skia/.bazelrc", 544 "skia/.bazelversion", 545 "skia/BUILD.bazel", 546 "skia/WORKSPACE.bazel", 547 "skia/bazel", 548 "skia/go_repositories.bzl", 549 "skia/include/config", // There's a WORKSPACE.bazel in here 550 "skia/requirements.txt", 551 "skia/toolchain", 552 // Actually needed to build the task drivers 553 "skia/infra/bots/BUILD.bazel", 554 "skia/infra/bots/build_task_drivers.sh", 555 "skia/infra/bots/task_drivers", 556 }, 557 Excludes: []string{rbe.ExcludeGitDir}, 558 }) 559 b.MustAddCasSpec(CAS_TEST, &specs.CasSpec{ 560 Root: "..", 561 Paths: []string{ 562 "skia/.vpython", 563 "skia/infra/bots/assets", 564 "skia/infra/bots/run_recipe.py", 565 "skia/platform_tools/ios/bin", 566 "skia/resources", 567 "skia/tools/valgrind.supp", 568 }, 569 Excludes: []string{rbe.ExcludeGitDir}, 570 }) 571 b.MustAddCasSpec(CAS_WASM_GM, &specs.CasSpec{ 572 Root: "../skia", // Needed for other repos. 573 Paths: []string{ 574 ".vpython", 575 "resources", 576 "tools/run-wasm-gm-tests", 577 }, 578 Excludes: []string{rbe.ExcludeGitDir}, 579 }) 580 b.MustAddCasSpec(CAS_WHOLE_REPO, CAS_SPEC_WHOLE_REPO) 581 b.MustAddCasSpec(CAS_RECREATE_SKPS, &specs.CasSpec{ 582 Root: "..", 583 Paths: []string{ 584 "skia/.vpython", 585 "skia/DEPS", 586 "skia/bin/fetch-sk", 587 "skia/infra/bots/assets/skp", 588 "skia/infra/bots/utils.py", 589 "skia/infra/config/recipes.cfg", 590 "skia/tools/skp", 591 }, 592 Excludes: []string{rbe.ExcludeGitDir}, 593 }) 594 generateCompileCAS(b, cfg) 595 596 builder.MustFinish() 597} 598 599// getThisDirName returns the infra/bots directory which is an ancestor of this 600// file. 601func getThisDirName() string { 602 _, thisFileName, _, ok := runtime.Caller(0) 603 if !ok { 604 log.Fatal("Unable to find path to current file.") 605 } 606 return filepath.Dir(filepath.Dir(thisFileName)) 607} 608 609// getCallingDirName returns the infra/bots directory which is an ancestor of 610// the calling gen_tasks.go file. WARNING: assumes that the calling gen_tasks.go 611// file appears two steps up the stack; do not call from a function which is not 612// directly called by gen_tasks.go. 613func getCallingDirName() string { 614 _, callingFileName, _, ok := runtime.Caller(2) 615 if !ok { 616 log.Fatal("Unable to find path to calling file.") 617 } 618 return filepath.Dir(callingFileName) 619} 620 621// builder is a wrapper for specs.TasksCfgBuilder. 622type builder struct { 623 *specs.TasksCfgBuilder 624 cfg *Config 625 jobNameSchema *JobNameSchema 626 jobs []string 627} 628 629// marshalJson encodes the given data as JSON and fixes escaping of '<' which Go 630// does by default. 631func marshalJson(data interface{}) string { 632 j, err := json.Marshal(data) 633 if err != nil { 634 log.Fatal(err) 635 } 636 return strings.Replace(string(j), "\\u003c", "<", -1) 637} 638 639// kitchenTaskNoBundle sets up the task to run a recipe via Kitchen, without the 640// recipe bundle. 641func (b *taskBuilder) kitchenTaskNoBundle(recipe string, outputDir string) { 642 b.cipd(CIPD_PKG_LUCI_AUTH) 643 b.cipd(cipd.MustGetPackage("infra/tools/luci/kitchen/${platform}")) 644 b.env("RECIPES_USE_PY3", "true") 645 b.envPrefixes("VPYTHON_DEFAULT_SPEC", "skia/.vpython") 646 b.usesPython() 647 b.recipeProp("swarm_out_dir", outputDir) 648 if outputDir != OUTPUT_NONE { 649 b.output(outputDir) 650 } 651 const python = "cipd_bin_packages/vpython3${EXECUTABLE_SUFFIX}" 652 b.cmd(python, "-u", "skia/infra/bots/run_recipe.py", "${ISOLATED_OUTDIR}", recipe, b.getRecipeProps(), b.cfg.Project) 653 // Most recipes want this isolate; they can override if necessary. 654 b.cas(CAS_RUN_RECIPE) 655 b.timeout(time.Hour) 656 b.addToPATH("cipd_bin_packages", "cipd_bin_packages/bin") 657 b.Spec.ExtraTags = map[string]string{ 658 "log_location": fmt.Sprintf("logdog://logs.chromium.org/%s/${SWARMING_TASK_ID}/+/annotations", b.cfg.Project), 659 } 660 661 // Attempts. 662 if !b.role("Build", "Upload") && b.extraConfig("ASAN", "HWASAN", "MSAN", "TSAN", "Valgrind") { 663 // Sanitizers often find non-deterministic issues that retries would hide. 664 b.attempts(1) 665 } else { 666 // Retry by default to hide random bot/hardware failures. 667 b.attempts(2) 668 } 669} 670 671// kitchenTask sets up the task to run a recipe via Kitchen. 672func (b *taskBuilder) kitchenTask(recipe string, outputDir string) { 673 b.kitchenTaskNoBundle(recipe, outputDir) 674 b.dep(b.bundleRecipes()) 675} 676 677// internalHardwareLabel returns the internal ID for the bot, if any. 678func (b *taskBuilder) internalHardwareLabel() *int { 679 if b.cfg.InternalHardwareLabel != nil { 680 return b.cfg.InternalHardwareLabel(b.parts) 681 } 682 return nil 683} 684 685// linuxGceDimensions adds the Swarming bot dimensions for Linux GCE instances. 686func (b *taskBuilder) linuxGceDimensions(machineType string) { 687 b.dimension( 688 // Specify CPU to avoid running builds on bots with a more unique CPU. 689 "cpu:x86-64-Haswell_GCE", 690 "gpu:none", 691 // Currently all Linux GCE tasks run on 16-CPU machines. 692 fmt.Sprintf("machine_type:%s", machineType), 693 fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE), 694 fmt.Sprintf("pool:%s", b.cfg.Pool), 695 ) 696} 697 698// codesizeTaskNameRegexp captures the "CodeSize-<binary name>-" prefix of a CodeSize task name. 699var codesizeTaskNameRegexp = regexp.MustCompile("^CodeSize-[a-zA-Z0-9_]+-") 700 701// deriveCompileTaskName returns the name of a compile task based on the given 702// job name. 703func (b *jobBuilder) deriveCompileTaskName() string { 704 if b.role("Test", "Perf", "FM") { 705 task_os := b.parts["os"] 706 ec := []string{} 707 if val := b.parts["extra_config"]; val != "" { 708 ec = strings.Split(val, "_") 709 ignore := []string{ 710 "Skpbench", "AbandonGpuContext", "PreAbandonGpuContext", "Valgrind", "FailFlushTimeCallbacks", 711 "ReleaseAndAbandonGpuContext", "FSAA", "FAAA", "FDAA", "NativeFonts", "GDI", 712 "NoGPUThreads", "DDL1", "DDL3", "T8888", 713 "DDLTotal", "DDLRecord", "9x9", "BonusConfigs", "ColorSpaces", "GL", "SkottieTracing", "SkottieWASM", 714 "GpuTess", "DMSAAStats", "Mskp", "Docker", "PDF", "SkVM", "Puppeteer", 715 "SkottieFrames", "RenderSKP", "CanvasPerf", "AllPathsVolatile", "WebGL2", "i5", 716 "OldestSupportedSkpVersion"} 717 keep := make([]string, 0, len(ec)) 718 for _, part := range ec { 719 if !In(part, ignore) { 720 keep = append(keep, part) 721 } 722 } 723 ec = keep 724 } 725 if b.matchOs("Android") { 726 if !In("Android", ec) { 727 ec = append([]string{"Android"}, ec...) 728 } 729 task_os = COMPILE_TASK_NAME_OS_LINUX 730 } else if b.os("ChromeOS") { 731 ec = append([]string{"Chromebook", "GLES"}, ec...) 732 task_os = COMPILE_TASK_NAME_OS_LINUX 733 } else if b.os("iOS") { 734 ec = append([]string{task_os}, ec...) 735 task_os = "Mac" 736 } else if b.matchOs("Win") { 737 task_os = "Win" 738 } else if b.compiler("GCC") { 739 // GCC compiles are now on a Docker container. We use the same OS and 740 // version to compile as to test. 741 ec = append(ec, "Docker") 742 } else if b.matchOs("Debian11") { 743 // We compile using the Debian11 machines in the skolo. 744 task_os = "Debian11" 745 } else if b.matchOs("Ubuntu", "Debian") { 746 task_os = COMPILE_TASK_NAME_OS_LINUX 747 } else if b.matchOs("Mac") { 748 task_os = "Mac" 749 } 750 jobNameMap := map[string]string{ 751 "role": "Build", 752 "os": task_os, 753 "compiler": b.parts["compiler"], 754 "target_arch": b.parts["arch"], 755 "configuration": b.parts["configuration"], 756 } 757 if b.extraConfig("PathKit") { 758 ec = []string{"PathKit"} 759 // We prefer to compile this in the cloud because we have more resources there 760 jobNameMap["os"] = "Debian10" 761 } 762 if b.extraConfig("CanvasKit", "SkottieWASM", "Puppeteer") { 763 if b.cpu() { 764 ec = []string{"CanvasKit_CPU"} 765 } else { 766 ec = []string{"CanvasKit"} 767 } 768 // We prefer to compile this in the cloud because we have more resources there 769 jobNameMap["os"] = "Debian10" 770 } 771 if len(ec) > 0 { 772 jobNameMap["extra_config"] = strings.Join(ec, "_") 773 } 774 name, err := b.jobNameSchema.MakeJobName(jobNameMap) 775 if err != nil { 776 log.Fatal(err) 777 } 778 return name 779 } else if b.role("BuildStats") { 780 return strings.Replace(b.Name, "BuildStats", "Build", 1) 781 } else if b.role("CodeSize") { 782 return codesizeTaskNameRegexp.ReplaceAllString(b.Name, "Build-") 783 } else { 784 return b.Name 785 } 786} 787 788// swarmDimensions generates swarming bot dimensions for the given task. 789func (b *taskBuilder) swarmDimensions() { 790 if b.cfg.SwarmDimensions != nil { 791 dims := b.cfg.SwarmDimensions(b.parts) 792 if dims != nil { 793 b.dimension(dims...) 794 return 795 } 796 } 797 b.defaultSwarmDimensions() 798} 799 800// defaultSwarmDimensions generates default swarming bot dimensions for the given task. 801func (b *taskBuilder) defaultSwarmDimensions() { 802 d := map[string]string{ 803 "pool": b.cfg.Pool, 804 } 805 if os, ok := b.parts["os"]; ok { 806 d["os"], ok = map[string]string{ 807 "Android": "Android", 808 "Android12": "Android", 809 "ChromeOS": "ChromeOS", 810 "Debian9": DEFAULT_OS_LINUX_GCE, // Runs in Deb9 Docker. 811 "Debian10": DEFAULT_OS_LINUX_GCE, 812 "Debian11": DEBIAN_11_OS, 813 "Mac": DEFAULT_OS_MAC, 814 "Mac10.12": "Mac-10.12", 815 "Mac10.13": "Mac-10.13.6", 816 "Mac10.14": "Mac-10.14", 817 "Mac10.15.1": "Mac-10.15.1", 818 "Mac10.15.7": "Mac-10.15.7", // Same as 'Mac', but explicit. 819 "Mac11": "Mac-11.4", 820 "Mac12": "Mac-12", 821 "Mac13": "Mac-13", 822 "Ubuntu18": "Ubuntu-18.04", 823 "Win": DEFAULT_OS_WIN, 824 "Win10": "Windows-10-19045", 825 "Win2019": DEFAULT_OS_WIN, 826 "Win8": "Windows-8.1-SP0", 827 "iOS": "iOS-13.3.1", 828 }[os] 829 if !ok { 830 log.Fatalf("Entry %q not found in OS mapping.", os) 831 } 832 if os == "Win10" && b.parts["model"] == "Golo" { 833 // ChOps-owned machines have Windows 10 21h1. 834 d["os"] = "Windows-10-19043" 835 } 836 if b.parts["model"] == "iPhone11" { 837 d["os"] = "iOS-13.6" 838 } 839 if b.parts["model"] == "iPadPro" { 840 d["os"] = "iOS-13.6" 841 } 842 } else { 843 d["os"] = DEFAULT_OS_DEBIAN 844 } 845 if b.role("Test", "Perf") { 846 if b.os("Android") { 847 // For Android, the device type is a better dimension 848 // than CPU or GPU. 849 deviceInfo, ok := map[string][]string{ 850 "AndroidOne": {"sprout", "MOB30Q"}, 851 "GalaxyS7_G930FD": {"herolte", "R16NW_G930FXXS2ERH6"}, // This is Oreo. 852 "GalaxyS9": {"starlte", "QP1A.190711.020"}, // This is Android10. 853 "GalaxyS20": {"exynos990", "QP1A.190711.020"}, 854 "JioNext": {"msm8937", "RKQ1.210602.002"}, 855 "Nexus5": {"hammerhead", "M4B30Z_3437181"}, 856 "Nexus7": {"grouper", "LMY47V_1836172"}, // 2012 Nexus 7 857 "P30": {"HWELE", "HUAWEIELE-L29"}, 858 "Pixel2XL": {"taimen", "PPR1.180610.009"}, 859 "Pixel3": {"blueline", "PQ1A.190105.004"}, 860 "Pixel3a": {"sargo", "QP1A.190711.020"}, 861 "Pixel4": {"flame", "RPB2.200611.009"}, // R Preview 862 "Pixel4a": {"sunfish", "AOSP.MASTER_7819821"}, // Pixel4a flashed with an Android HWASan build. 863 "Pixel4XL": {"coral", "QD1A.190821.011.C4"}, 864 "Pixel5": {"redfin", "RD1A.200810.022.A4"}, 865 "Pixel6": {"oriole", "SD1A.210817.037"}, 866 "Pixel7": {"cheetah", "TD1A.221105.002"}, 867 "TecnoSpark3Pro": {"TECNO-KB8", "PPR1.180610.011"}, 868 "Wembley": {"wembley", "SP2A.220505.008"}, 869 }[b.parts["model"]] 870 if !ok { 871 log.Fatalf("Entry %q not found in Android mapping.", b.parts["model"]) 872 } 873 d["device_type"] = deviceInfo[0] 874 d["device_os"] = deviceInfo[1] 875 876 // Tests using Android's HWAddress Sanitizer require an HWASan build of Android. 877 // See https://developer.android.com/ndk/guides/hwasan. 878 if b.extraConfig("HWASAN") { 879 d["android_hwasan_build"] = "1" 880 } 881 } else if b.os("Android12") { 882 // For Android, the device type is a better dimension 883 // than CPU or GPU. 884 deviceInfo, ok := map[string][]string{ 885 "Pixel5": {"redfin", "SP2A.220305.012"}, 886 }[b.parts["model"]] 887 if !ok { 888 log.Fatalf("Entry %q not found in Android mapping.", b.parts["model"]) 889 } 890 d["device_type"] = deviceInfo[0] 891 d["device_os"] = deviceInfo[1] 892 893 // Tests using Android's HWAddress Sanitizer require an HWASan build of Android. 894 // See https://developer.android.com/ndk/guides/hwasan. 895 if b.extraConfig("HWASAN") { 896 d["android_hwasan_build"] = "1" 897 } 898 } else if b.os("iOS") { 899 device, ok := map[string]string{ 900 "iPadMini4": "iPad5,1", 901 "iPhone7": "iPhone9,1", 902 "iPhone8": "iPhone10,1", 903 "iPhone11": "iPhone12,1", 904 "iPadPro": "iPad6,3", 905 }[b.parts["model"]] 906 if !ok { 907 log.Fatalf("Entry %q not found in iOS mapping.", b.parts["model"]) 908 } 909 d["device_type"] = device 910 } else if b.cpu() || b.extraConfig("CanvasKit", "Docker", "SwiftShader") { 911 modelMapping, ok := map[string]map[string]string{ 912 "AppleM1": { 913 "MacMini9.1": "arm64-64-Apple_M1", 914 }, 915 "AVX": { 916 "VMware7.1": "x86-64", 917 }, 918 "AVX2": { 919 "GCE": "x86-64-Haswell_GCE", 920 "MacBookAir7.2": "x86-64-i5-5350U", 921 "MacBookPro11.5": "x86-64-i7-4870HQ", 922 "MacMini7.1": "x86-64-i5-4278U", 923 "NUC5i7RYH": "x86-64-i7-5557U", 924 "NUC9i7QN": "x86-64-i7-9750H", 925 }, 926 "AVX512": { 927 "GCE": "x86-64-Skylake_GCE", 928 "Golo": "Intel64_Family_6_Model_85_Stepping_7__GenuineIntel", 929 }, 930 "Rome": { 931 "GCE": "x86-64-AMD_Rome_GCE", 932 }, 933 "SwiftShader": { 934 "GCE": "x86-64-Haswell_GCE", 935 }, 936 }[b.parts["cpu_or_gpu_value"]] 937 if !ok { 938 log.Fatalf("Entry %q not found in CPU mapping.", b.parts["cpu_or_gpu_value"]) 939 } 940 cpu, ok := modelMapping[b.parts["model"]] 941 if !ok { 942 log.Fatalf("Entry %q not found in %q model mapping.", b.parts["model"], b.parts["cpu_or_gpu_value"]) 943 } 944 d["cpu"] = cpu 945 if b.model("GCE") && b.matchOs("Debian") { 946 d["os"] = DEFAULT_OS_LINUX_GCE 947 } 948 if b.model("GCE") && d["cpu"] == "x86-64-Haswell_GCE" { 949 d["machine_type"] = MACHINE_TYPE_MEDIUM 950 } 951 } else { 952 // It's a GPU job. 953 if b.matchOs("Win") { 954 gpu, ok := map[string]string{ 955 // At some point this might use the device ID, but for now it's like Chromebooks. 956 "GTX660": "10de:11c0-26.21.14.4120", 957 "GTX960": "10de:1401-31.0.15.1694", 958 "IntelHD4400": "8086:0a16-20.19.15.4963", 959 "IntelIris540": "8086:1926-31.0.101.2115", 960 "IntelIris6100": "8086:162b-20.19.15.4963", 961 "IntelIris655": "8086:3ea5-26.20.100.7463", 962 "IntelIrisXe": "8086:9a49-31.0.101.3222", 963 "RadeonHD7770": "1002:683d-26.20.13031.18002", 964 "RadeonR9M470X": "1002:6646-26.20.13031.18002", 965 "QuadroP400": "10de:1cb3-30.0.15.1179", 966 "RadeonVega6": "1002:1636-31.0.12027.7000", 967 "RTX3060": "10de:2489-31.0.15.1694", 968 }[b.parts["cpu_or_gpu_value"]] 969 if !ok { 970 log.Fatalf("Entry %q not found in Win GPU mapping.", b.parts["cpu_or_gpu_value"]) 971 } 972 d["gpu"] = gpu 973 } else if b.isLinux() { 974 gpu, ok := map[string]string{ 975 // Intel drivers come from CIPD, so no need to specify the version here. 976 "IntelBayTrail": "8086:0f31", 977 "IntelHD2000": "8086:0102", 978 "IntelHD405": "8086:22b1", 979 "IntelIris640": "8086:5926", 980 "QuadroP400": "10de:1cb3-510.60.02", 981 "RTX3060": "10de:2489-470.141.03", 982 "IntelIrisXe": "8086:9a49", 983 "RadeonVega6": "1002:1636", 984 }[b.parts["cpu_or_gpu_value"]] 985 if !ok { 986 log.Fatalf("Entry %q not found in Ubuntu GPU mapping.", b.parts["cpu_or_gpu_value"]) 987 } 988 d["gpu"] = gpu 989 990 if b.matchOs("Debian11") { 991 d["os"] = DEBIAN_11_OS 992 } else if b.matchOs("Debian") { 993 // The Debian10 machines in the skolo are 10.10, not 10.3. 994 d["os"] = DEFAULT_OS_DEBIAN 995 } 996 if b.parts["cpu_or_gpu_value"] == "IntelIrisXe" { 997 // The Intel Iris Xe devices are Debian 11.3. 998 d["os"] = "Debian-bookworm/sid" 999 } 1000 } else if b.matchOs("Mac") { 1001 gpu, ok := map[string]string{ 1002 "AppleM1": "AppleM1", 1003 "IntelHD6000": "8086:1626", 1004 "IntelHD615": "8086:591e", 1005 "IntelIris5100": "8086:0a2e", 1006 "IntelIrisPlus": "8086:8a53", 1007 "RadeonHD8870M": "1002:6821-4.0.20-3.2.8", 1008 }[b.parts["cpu_or_gpu_value"]] 1009 if !ok { 1010 log.Fatalf("Entry %q not found in Mac GPU mapping.", b.parts["cpu_or_gpu_value"]) 1011 } 1012 if gpu == "AppleM1" { 1013 // No GPU dimension yet, but we can constrain by CPU. 1014 d["cpu"] = "arm64-64-Apple_M1" 1015 } else { 1016 d["gpu"] = gpu 1017 } 1018 // We have two different types of MacMini7,1 with the same GPU but different CPUs. 1019 if b.gpu("IntelIris5100") { 1020 if b.extraConfig("i5") { 1021 // If we say "i5", run on our MacMini7,1s in the Skolo: 1022 d["cpu"] = "x86-64-i5-4278U" 1023 } else { 1024 // Otherwise, run on Golo machines, just because that's 1025 // where those jobs have always run. Plus, some of them 1026 // are Perf jobs, which we want to keep consistent. 1027 d["cpu"] = "x86-64-i7-4578U" 1028 } 1029 } 1030 } else if b.os("ChromeOS") { 1031 version, ok := map[string]string{ 1032 "IntelUHDGraphics605": "15236.2.0", 1033 "RadeonVega3": "14233.0.0", 1034 "Adreno618": "14150.39.0", 1035 "MaliT860": "14092.77.0", 1036 }[b.parts["cpu_or_gpu_value"]] 1037 if !ok { 1038 log.Fatalf("Entry %q not found in ChromeOS GPU mapping.", b.parts["cpu_or_gpu_value"]) 1039 } 1040 d["gpu"] = b.parts["cpu_or_gpu_value"] 1041 d["release_version"] = version 1042 } else { 1043 log.Fatalf("Unknown GPU mapping for OS %q.", b.parts["os"]) 1044 } 1045 } 1046 } else { 1047 if d["os"] == DEBIAN_11_OS { 1048 // The Debian11 compile machines in the skolo have 1049 // GPUs, but we still use them for compiles also. 1050 1051 // Dodge Raspberry Pis. 1052 d["cpu"] = "x86-64" 1053 // Target the RTX3060 Intel machines, as they are beefy and we have 1054 // 20 of them, and they are setup to compile. 1055 d["gpu"] = "10de:2489" 1056 } else { 1057 d["gpu"] = "none" 1058 } 1059 if d["os"] == DEFAULT_OS_LINUX_GCE { 1060 if b.extraConfig("CanvasKit", "CMake", "Docker", "PathKit") || b.role("BuildStats", "CodeSize") { 1061 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 1062 return 1063 } 1064 // Use many-core machines for Build tasks. 1065 b.linuxGceDimensions(MACHINE_TYPE_LARGE) 1066 return 1067 } else if d["os"] == DEFAULT_OS_WIN { 1068 // Windows CPU bots. 1069 d["cpu"] = "x86-64-Haswell_GCE" 1070 // Use many-core machines for Build tasks. 1071 d["machine_type"] = MACHINE_TYPE_LARGE 1072 } else if d["os"] == DEFAULT_OS_MAC { 1073 // Mac CPU bots are no longer VMs. 1074 d["cpu"] = "x86-64" 1075 d["cores"] = "12" 1076 delete(d, "gpu") 1077 } 1078 } 1079 1080 dims := make([]string, 0, len(d)) 1081 for k, v := range d { 1082 dims = append(dims, fmt.Sprintf("%s:%s", k, v)) 1083 } 1084 sort.Strings(dims) 1085 b.dimension(dims...) 1086} 1087 1088// bundleRecipes generates the task to bundle and isolate the recipes. Returns 1089// the name of the task, which may be added as a dependency. 1090func (b *jobBuilder) bundleRecipes() string { 1091 b.addTask(BUNDLE_RECIPES_NAME, func(b *taskBuilder) { 1092 b.cipd(specs.CIPD_PKGS_GIT_LINUX_AMD64...) 1093 b.cmd("/bin/bash", "skia/infra/bots/bundle_recipes.sh", specs.PLACEHOLDER_ISOLATED_OUTDIR) 1094 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1095 b.idempotent() 1096 b.cas(CAS_RECIPES) 1097 b.usesPython() 1098 b.addToPATH("cipd_bin_packages", "cipd_bin_packages/bin") 1099 }) 1100 return BUNDLE_RECIPES_NAME 1101} 1102 1103// buildTaskDrivers generates the task to compile the task driver code to run on 1104// all platforms. Returns the name of the task, which may be added as a 1105// dependency. 1106func (b *jobBuilder) buildTaskDrivers(goos, goarch string) string { 1107 name := BUILD_TASK_DRIVERS_PREFIX + "_" + goos + "_" + goarch 1108 b.addTask(name, func(b *taskBuilder) { 1109 b.cmd("/bin/bash", "skia/infra/bots/build_task_drivers.sh", 1110 specs.PLACEHOLDER_ISOLATED_OUTDIR, 1111 goos+"_"+goarch) 1112 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 1113 b.cipd(b.MustGetCipdPackageFromAsset("bazelisk")) 1114 b.addToPATH("bazelisk") 1115 b.idempotent() 1116 b.cas(CAS_TASK_DRIVERS) 1117 }) 1118 return name 1119} 1120 1121// createDockerImage creates the specified docker image. Returns the name of the 1122// generated task. 1123func (b *jobBuilder) createDockerImage(wasm bool) string { 1124 // First, derive the name of the task. 1125 imageName := "skia-release" 1126 taskName := "Housekeeper-PerCommit-CreateDockerImage_Skia_Release" 1127 if wasm { 1128 imageName = "skia-wasm-release" 1129 taskName = "Housekeeper-PerCommit-CreateDockerImage_Skia_WASM_Release" 1130 } 1131 imageDir := path.Join("docker", imageName) 1132 1133 // Add the task. 1134 b.addTask(taskName, func(b *taskBuilder) { 1135 // TODO(borenet): Make this task not use Git. 1136 b.usesGit() 1137 b.cmd( 1138 "./build_push_docker_image", 1139 "--image_name", fmt.Sprintf("gcr.io/skia-public/%s", imageName), 1140 "--dockerfile_dir", imageDir, 1141 "--project_id", "skia-swarming-bots", 1142 "--task_id", specs.PLACEHOLDER_TASK_ID, 1143 "--task_name", b.Name, 1144 "--workdir", ".", 1145 "--gerrit_project", "skia", 1146 "--gerrit_url", "https://skia-review.googlesource.com", 1147 "--repo", specs.PLACEHOLDER_REPO, 1148 "--revision", specs.PLACEHOLDER_REVISION, 1149 "--patch_issue", specs.PLACEHOLDER_ISSUE, 1150 "--patch_set", specs.PLACEHOLDER_PATCHSET, 1151 "--patch_server", specs.PLACEHOLDER_CODEREVIEW_SERVER, 1152 "--swarm_out_dir", specs.PLACEHOLDER_ISOLATED_OUTDIR, 1153 ) 1154 b.dep(b.buildTaskDrivers("linux", "amd64")) 1155 b.addToPATH("cipd_bin_packages", "cipd_bin_packages/bin") 1156 b.cas(CAS_EMPTY) 1157 b.serviceAccount(b.cfg.ServiceAccountCompile) 1158 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 1159 b.usesDocker() 1160 b.cache(CACHES_DOCKER...) 1161 }) 1162 return taskName 1163} 1164 1165// createPushAppsFromSkiaDockerImage creates and pushes docker images of some apps 1166// (eg: fiddler, api) using the skia-release docker image. 1167func (b *jobBuilder) createPushAppsFromSkiaDockerImage() { 1168 b.addTask(b.Name, func(b *taskBuilder) { 1169 // TODO(borenet): Make this task not use Git. 1170 b.usesGit() 1171 b.cmd( 1172 "./push_apps_from_skia_image", 1173 "--project_id", "skia-swarming-bots", 1174 "--task_id", specs.PLACEHOLDER_TASK_ID, 1175 "--task_name", b.Name, 1176 "--workdir", ".", 1177 "--repo", specs.PLACEHOLDER_REPO, 1178 "--revision", specs.PLACEHOLDER_REVISION, 1179 "--patch_issue", specs.PLACEHOLDER_ISSUE, 1180 "--patch_set", specs.PLACEHOLDER_PATCHSET, 1181 "--patch_server", specs.PLACEHOLDER_CODEREVIEW_SERVER, 1182 ) 1183 b.dep(b.buildTaskDrivers("linux", "amd64")) 1184 b.dep(b.createDockerImage(false)) 1185 b.addToPATH("cipd_bin_packages", "cipd_bin_packages/bin", "bazelisk") 1186 b.cas(CAS_EMPTY) 1187 b.cipd(b.MustGetCipdPackageFromAsset("bazelisk")) 1188 b.serviceAccount(b.cfg.ServiceAccountCompile) 1189 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 1190 b.usesDocker() 1191 b.cache(CACHES_DOCKER...) 1192 b.timeout(2 * time.Hour) 1193 }) 1194} 1195 1196// createPushBazelAppsFromWASMDockerImage pushes those infra apps that have been ported to Bazel 1197// and require assets built in the WASM docker image. 1198// TODO(kjlubick) The inputs to this job should not be the docker build, but a Bazel build. 1199func (b *jobBuilder) createPushBazelAppsFromWASMDockerImage() { 1200 b.addTask(b.Name, func(b *taskBuilder) { 1201 // TODO(borenet): Make this task not use Git. 1202 b.usesGit() 1203 b.cmd( 1204 "./push_bazel_apps_from_wasm_image", 1205 "--project_id", "skia-swarming-bots", 1206 "--task_id", specs.PLACEHOLDER_TASK_ID, 1207 "--task_name", b.Name, 1208 "--workdir", ".", 1209 "--skia_revision", specs.PLACEHOLDER_REVISION, 1210 ) 1211 b.dep(b.buildTaskDrivers("linux", "amd64")) 1212 b.dep(b.createDockerImage(true)) 1213 b.addToPATH("cipd_bin_packages", "cipd_bin_packages/bin", "bazelisk") 1214 b.cas(CAS_EMPTY) 1215 b.cipd(b.MustGetCipdPackageFromAsset("bazelisk")) 1216 b.serviceAccount(b.cfg.ServiceAccountCompile) 1217 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 1218 b.usesDocker() 1219 b.cache(CACHES_DOCKER...) 1220 }) 1221} 1222 1223var iosRegex = regexp.MustCompile(`os:iOS-(.*)`) 1224 1225func (b *taskBuilder) maybeAddIosDevImage() { 1226 for _, dim := range b.Spec.Dimensions { 1227 if m := iosRegex.FindStringSubmatch(dim); len(m) >= 2 { 1228 var asset string 1229 switch m[1] { 1230 // Other patch versions can be added to the same case. 1231 case "11.4.1": 1232 asset = "ios-dev-image-11.4" 1233 case "13.3.1": 1234 asset = "ios-dev-image-13.3" 1235 case "13.4.1": 1236 asset = "ios-dev-image-13.4" 1237 case "13.5.1": 1238 asset = "ios-dev-image-13.5" 1239 case "13.6": 1240 asset = "ios-dev-image-13.6" 1241 default: 1242 log.Fatalf("Unable to determine correct ios-dev-image asset for %s. If %s is a new iOS release, you must add a CIPD package containing the corresponding iOS dev image; see ios-dev-image-11.4 for an example.", b.Name, m[1]) 1243 } 1244 b.asset(asset) 1245 break 1246 } else if strings.Contains(dim, "iOS") { 1247 log.Fatalf("Must specify iOS version for %s to obtain correct dev image; os dimension is missing version: %s", b.Name, dim) 1248 } 1249 } 1250} 1251 1252// compile generates a compile task. Returns the name of the compile task. 1253func (b *jobBuilder) compile() string { 1254 name := b.deriveCompileTaskName() 1255 if b.extraConfig("WasmGMTests") { 1256 b.compileWasmGMTests(name) 1257 } else { 1258 b.addTask(name, func(b *taskBuilder) { 1259 recipe := "compile" 1260 casSpec := CAS_COMPILE 1261 if b.extraConfig("NoDEPS", "CMake", "Flutter", "NoPatch") { 1262 recipe = "sync_and_compile" 1263 casSpec = CAS_RUN_RECIPE 1264 b.recipeProps(EXTRA_PROPS) 1265 b.usesGit() 1266 if !b.extraConfig("NoDEPS") { 1267 b.cache(CACHES_WORKDIR...) 1268 } 1269 } else { 1270 b.idempotent() 1271 } 1272 if b.extraConfig("NoPatch") { 1273 b.kitchenTask(recipe, OUTPUT_BUILD_NOPATCH) 1274 } else { 1275 b.kitchenTask(recipe, OUTPUT_BUILD) 1276 } 1277 b.cas(casSpec) 1278 b.serviceAccount(b.cfg.ServiceAccountCompile) 1279 b.swarmDimensions() 1280 if b.extraConfig("Docker", "LottieWeb", "CMake") || b.compiler("EMCC") { 1281 b.usesDocker() 1282 b.cache(CACHES_DOCKER...) 1283 } 1284 if b.extraConfig("Dawn") { 1285 // https://dawn.googlesource.com/dawn/+/516701da8184655a47c92a573cc84da7db5e69d4/generator/dawn_version_generator.py#21 1286 b.usesGit() 1287 } 1288 1289 // Android bots require a toolchain. 1290 if b.extraConfig("Android") { 1291 if b.matchOs("Mac") { 1292 b.asset("android_ndk_darwin") 1293 } else if b.matchOs("Win") { 1294 pkg := b.MustGetCipdPackageFromAsset("android_ndk_windows") 1295 pkg.Path = "n" 1296 b.cipd(pkg) 1297 } else { 1298 b.asset("android_ndk_linux") 1299 } 1300 } else if b.extraConfig("Chromebook") { 1301 b.asset("clang_linux") 1302 if b.arch("x86_64") { 1303 b.asset("chromebook_x86_64_gles") 1304 } else if b.arch("arm") { 1305 b.asset("armhf_sysroot") 1306 b.asset("chromebook_arm_gles") 1307 } 1308 } else if b.isLinux() { 1309 if b.compiler("Clang") { 1310 b.asset("clang_linux") 1311 } 1312 if b.extraConfig("SwiftShader") { 1313 b.asset("cmake_linux") 1314 } 1315 b.asset("ccache_linux") 1316 b.usesCCache() 1317 } else if b.matchOs("Win") { 1318 b.asset("win_toolchain") 1319 if b.compiler("Clang") { 1320 b.asset("clang_win") 1321 } 1322 } else if b.matchOs("Mac") { 1323 b.cipd(CIPD_PKGS_XCODE...) 1324 b.Spec.Caches = append(b.Spec.Caches, &specs.Cache{ 1325 Name: "xcode", 1326 Path: "cache/Xcode.app", 1327 }) 1328 b.asset("ccache_mac") 1329 b.usesCCache() 1330 if b.extraConfig("iOS") { 1331 b.asset("provisioning_profile_ios") 1332 } 1333 } 1334 }) 1335 } 1336 1337 // All compile tasks are runnable as their own Job. Assert that the Job 1338 // is listed in jobs. 1339 if !In(name, b.jobs) { 1340 log.Fatalf("Job %q is missing from the jobs list! Derived from: %q", name, b.Name) 1341 } 1342 1343 return name 1344} 1345 1346// recreateSKPs generates a RecreateSKPs task. 1347func (b *jobBuilder) recreateSKPs() { 1348 cmd := []string{ 1349 "./recreate_skps", 1350 "--local=false", 1351 "--project_id", "skia-swarming-bots", 1352 "--task_id", specs.PLACEHOLDER_TASK_ID, 1353 "--task_name", b.Name, 1354 "--skia_revision", specs.PLACEHOLDER_REVISION, 1355 "--patch_ref", specs.PLACEHOLDER_PATCH_REF, 1356 "--git_cache", "cache/git", 1357 "--checkout_root", "cache/work", 1358 "--dm_path", "build/dm", 1359 } 1360 if b.matchExtraConfig("DryRun") { 1361 cmd = append(cmd, "--dry_run") 1362 } 1363 b.addTask(b.Name, func(b *taskBuilder) { 1364 b.cas(CAS_RECREATE_SKPS) 1365 b.dep(b.buildTaskDrivers("linux", "amd64")) 1366 b.dep("Build-Debian10-Clang-x86_64-Release") // To get DM. 1367 b.cmd(cmd...) 1368 b.cipd(CIPD_PKG_LUCI_AUTH) 1369 b.serviceAccount(b.cfg.ServiceAccountRecreateSKPs) 1370 b.dimension( 1371 "pool:SkiaCT", 1372 fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE), 1373 ) 1374 b.usesGo() 1375 b.cache(CACHES_WORKDIR...) 1376 b.timeout(6 * time.Hour) 1377 b.usesPython() 1378 b.addToPATH("cipd_bin_packages", "cipd_bin_packages/bin") 1379 b.attempts(2) 1380 }) 1381} 1382 1383// checkGeneratedFiles verifies that no generated SKSL files have been edited 1384// by hand. 1385func (b *jobBuilder) checkGeneratedFiles() { 1386 b.addTask(b.Name, func(b *taskBuilder) { 1387 b.cas(CAS_BAZEL) 1388 b.dep(b.buildTaskDrivers("linux", "amd64")) 1389 b.cmd("./check_generated_files", 1390 "--local=false", 1391 "--git_path=cipd_bin_packages/git", 1392 "--project_id", "skia-swarming-bots", 1393 "--task_id", specs.PLACEHOLDER_TASK_ID, 1394 "--task_name", b.Name, 1395 "--bazel_arg=--config=for_linux_x64_with_rbe", 1396 "--bazel_arg=--jobs=100", 1397 ) 1398 b.cipd(specs.CIPD_PKGS_GIT_LINUX_AMD64...) 1399 b.cipd(b.MustGetCipdPackageFromAsset("bazelisk")) 1400 b.addToPATH("bazelisk") 1401 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 1402 b.serviceAccount(b.cfg.ServiceAccountHousekeeper) 1403 }) 1404} 1405 1406// checkGnToBp verifies that the gn_to_bp.py script continues to work. 1407func (b *jobBuilder) checkGnToBp() { 1408 b.addTask(b.Name, func(b *taskBuilder) { 1409 b.cas(CAS_COMPILE) 1410 b.dep(b.buildTaskDrivers("linux", "amd64")) 1411 b.cmd("./run_gn_to_bp", 1412 "--local=false", 1413 "--project_id", "skia-swarming-bots", 1414 "--task_id", specs.PLACEHOLDER_TASK_ID, 1415 "--task_name", b.Name, 1416 ) 1417 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1418 b.usesPython() 1419 b.serviceAccount(b.cfg.ServiceAccountHousekeeper) 1420 }) 1421} 1422 1423// housekeeper generates a Housekeeper task. 1424func (b *jobBuilder) housekeeper() { 1425 b.addTask(b.Name, func(b *taskBuilder) { 1426 b.recipeProps(EXTRA_PROPS) 1427 b.kitchenTask("housekeeper", OUTPUT_NONE) 1428 b.serviceAccount(b.cfg.ServiceAccountHousekeeper) 1429 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1430 b.usesGit() 1431 b.cache(CACHES_WORKDIR...) 1432 }) 1433} 1434 1435// g3FrameworkCanary generates a G3 Framework Canary task. Returns 1436// the name of the last task in the generated chain of tasks, which the Job 1437// should add as a dependency. 1438func (b *jobBuilder) g3FrameworkCanary() { 1439 b.addTask(b.Name, func(b *taskBuilder) { 1440 b.cas(CAS_EMPTY) 1441 b.dep(b.buildTaskDrivers("linux", "amd64")) 1442 b.cmd("./g3_canary", 1443 "--local=false", 1444 "--project_id", "skia-swarming-bots", 1445 "--task_id", specs.PLACEHOLDER_TASK_ID, 1446 "--task_name", b.Name, 1447 "--repo", specs.PLACEHOLDER_REPO, 1448 "--revision", specs.PLACEHOLDER_REVISION, 1449 "--patch_issue", specs.PLACEHOLDER_ISSUE, 1450 "--patch_set", specs.PLACEHOLDER_PATCHSET, 1451 "--patch_server", specs.PLACEHOLDER_CODEREVIEW_SERVER, 1452 ) 1453 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1454 b.cipd(CIPD_PKG_LUCI_AUTH) 1455 b.serviceAccount("skia-g3-framework-compile@skia-swarming-bots.iam.gserviceaccount.com") 1456 b.timeout(3 * time.Hour) 1457 b.attempts(1) 1458 }) 1459} 1460 1461// infra generates an infra_tests task. 1462func (b *jobBuilder) infra() { 1463 b.addTask(b.Name, func(b *taskBuilder) { 1464 if b.matchOs("Win") || b.matchExtraConfig("Win") { 1465 b.dimension( 1466 // Specify CPU to avoid running builds on bots with a more unique CPU. 1467 "cpu:x86-64-Haswell_GCE", 1468 "gpu:none", 1469 fmt.Sprintf("machine_type:%s", MACHINE_TYPE_MEDIUM), // We don't have any small Windows instances. 1470 fmt.Sprintf("os:%s", DEFAULT_OS_WIN), 1471 fmt.Sprintf("pool:%s", b.cfg.Pool), 1472 ) 1473 } else { 1474 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1475 } 1476 b.recipeProp("repository", specs.PLACEHOLDER_REPO) 1477 b.kitchenTask("infra", OUTPUT_NONE) 1478 b.cas(CAS_WHOLE_REPO) 1479 b.serviceAccount(b.cfg.ServiceAccountCompile) 1480 b.usesGSUtil() 1481 b.idempotent() 1482 b.usesGo() 1483 }) 1484} 1485 1486// buildstats generates a builtstats task, which compiles code and generates 1487// statistics about the build. 1488func (b *jobBuilder) buildstats() { 1489 compileTaskName := b.compile() 1490 b.addTask(b.Name, func(b *taskBuilder) { 1491 b.recipeProps(EXTRA_PROPS) 1492 b.kitchenTask("compute_buildstats", OUTPUT_PERF) 1493 b.dep(compileTaskName) 1494 b.asset("bloaty") 1495 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 1496 b.usesDocker() 1497 b.usesGit() 1498 b.cache(CACHES_WORKDIR...) 1499 }) 1500 // Upload release results (for tracking in perf) 1501 // We have some jobs that are FYI (e.g. Debug-CanvasKit, tree-map generator) 1502 if b.release() && !b.arch("x86_64") { 1503 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, b.Name) 1504 depName := b.Name 1505 b.addTask(uploadName, func(b *taskBuilder) { 1506 b.recipeProp("gs_bucket", b.cfg.GsBucketNano) 1507 b.recipeProps(EXTRA_PROPS) 1508 // TODO(borenet): I'm not sure why the upload task is 1509 // using the BuildStats task name, but I've done this 1510 // to maintain existing behavior. 1511 b.Name = depName 1512 b.kitchenTask("upload_buildstats_results", OUTPUT_NONE) 1513 b.Name = uploadName 1514 b.serviceAccount(b.cfg.ServiceAccountUploadNano) 1515 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1516 b.usesGSUtil() 1517 b.dep(depName) 1518 }) 1519 } 1520} 1521 1522// codesize generates a codesize task, which takes binary produced by a 1523// compile task, runs Bloaty against it, and uploads the resulting code size 1524// statistics to the GCS bucket belonging to the codesize.skia.org service. 1525func (b *jobBuilder) codesize() { 1526 compileTaskName := b.compile() 1527 compileTaskNameNoPatch := compileTaskName 1528 if b.extraConfig("Android") { 1529 compileTaskNameNoPatch += "_NoPatch" // add a second "extra config" 1530 } else { 1531 compileTaskNameNoPatch += "-NoPatch" // add the only "extra config" 1532 } 1533 1534 bloatyCipdPkg := b.MustGetCipdPackageFromAsset("bloaty") 1535 1536 b.addTask(b.Name, func(b *taskBuilder) { 1537 b.cas(CAS_EMPTY) 1538 b.dep(b.buildTaskDrivers("linux", "amd64"), compileTaskName) 1539 b.dep(b.buildTaskDrivers("linux", "amd64"), compileTaskNameNoPatch) 1540 cmd := []string{ 1541 "./codesize", 1542 "--local=false", 1543 "--project_id", "skia-swarming-bots", 1544 "--task_id", specs.PLACEHOLDER_TASK_ID, 1545 "--task_name", b.Name, 1546 "--compile_task_name", compileTaskName, 1547 "--compile_task_name_no_patch", compileTaskNameNoPatch, 1548 // Note: the binary name cannot contain dashes, otherwise the naming 1549 // schema logic will partition it into multiple parts. 1550 // 1551 // If we ever need to define a CodeSize-* task for a binary with 1552 // dashes in its name (e.g. "my-binary"), a potential workaround is to 1553 // create a mapping from a new, non-dashed binary name (e.g. "my_binary") 1554 // to the actual binary name with dashes. This mapping can be hardcoded 1555 // in this function; no changes to the task driver would be necessary. 1556 "--binary_name", b.parts["binary_name"], 1557 "--bloaty_cipd_version", bloatyCipdPkg.Version, 1558 "--bloaty_binary", "bloaty/bloaty", 1559 1560 "--repo", specs.PLACEHOLDER_REPO, 1561 "--revision", specs.PLACEHOLDER_REVISION, 1562 "--patch_issue", specs.PLACEHOLDER_ISSUE, 1563 "--patch_set", specs.PLACEHOLDER_PATCHSET, 1564 "--patch_server", specs.PLACEHOLDER_CODEREVIEW_SERVER, 1565 } 1566 if strings.Contains(compileTaskName, "Android") { 1567 b.asset("android_ndk_linux") 1568 cmd = append(cmd, "--strip_binary", 1569 "android_ndk_linux/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin/arm-linux-androideabi-strip") 1570 } else { 1571 b.asset("binutils_linux_x64") 1572 cmd = append(cmd, "--strip_binary", "binutils_linux_x64/strip") 1573 } 1574 b.cmd(cmd...) 1575 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1576 b.cache(CACHES_WORKDIR...) 1577 b.cipd(CIPD_PKG_LUCI_AUTH) 1578 b.asset("bloaty") 1579 b.serviceAccount("skia-external-codesize@skia-swarming-bots.iam.gserviceaccount.com") 1580 b.timeout(20 * time.Minute) 1581 b.attempts(1) 1582 }) 1583} 1584 1585// doUpload indicates whether the given Job should upload its results. 1586func (b *jobBuilder) doUpload() bool { 1587 for _, s := range b.cfg.NoUpload { 1588 m, err := regexp.MatchString(s, b.Name) 1589 if err != nil { 1590 log.Fatal(err) 1591 } 1592 if m { 1593 return false 1594 } 1595 } 1596 return true 1597} 1598 1599// commonTestPerfAssets adds the assets needed by Test and Perf tasks. 1600func (b *taskBuilder) commonTestPerfAssets() { 1601 // Docker-based tests don't need the standard CIPD assets 1602 if b.extraConfig("CanvasKit", "PathKit") || (b.role("Test") && b.extraConfig("LottieWeb")) { 1603 return 1604 } 1605 if b.extraConfig("Skpbench") { 1606 // Skpbench only needs skps 1607 b.asset("skp", "mskp") 1608 } else if b.os("Android", "ChromeOS", "iOS") { 1609 b.asset("skp", "svg", "skimage") 1610 } else if b.extraConfig("OldestSupportedSkpVersion") { 1611 b.assetWithVersion("skp", oldestSupportedSkpVersion) 1612 } else { 1613 // for desktop machines 1614 b.asset("skimage", "skp", "svg") 1615 } 1616 1617 if b.isLinux() && b.matchExtraConfig("SAN") { 1618 b.asset("clang_linux") 1619 } 1620 1621 if b.isLinux() { 1622 if b.extraConfig("Vulkan") { 1623 b.asset("linux_vulkan_sdk") 1624 } 1625 if b.matchGpu("Intel") { 1626 if b.matchGpu("IrisXe") { 1627 b.asset("mesa_intel_driver_linux_22") 1628 } else { 1629 // Use this for legacy drivers that were culled in v22 of Mesa. 1630 // https://www.phoronix.com/scan.php?page=news_item&px=Mesa-22.0-Drops-OpenSWR 1631 b.asset("mesa_intel_driver_linux") 1632 } 1633 } 1634 } 1635} 1636 1637// directUpload adds prerequisites for uploading to GCS. 1638func (b *taskBuilder) directUpload(gsBucket, serviceAccount string) { 1639 b.recipeProp("gs_bucket", gsBucket) 1640 b.serviceAccount(serviceAccount) 1641 b.usesGSUtil() 1642} 1643 1644// dm generates a Test task using dm. 1645func (b *jobBuilder) dm() { 1646 compileTaskName := "" 1647 // LottieWeb doesn't require anything in Skia to be compiled. 1648 if !b.extraConfig("LottieWeb") { 1649 compileTaskName = b.compile() 1650 } 1651 directUpload := false 1652 b.addTask(b.Name, func(b *taskBuilder) { 1653 cas := CAS_TEST 1654 recipe := "test" 1655 if b.extraConfig("PathKit") { 1656 cas = CAS_PATHKIT 1657 recipe = "test_pathkit" 1658 if b.doUpload() { 1659 b.directUpload(b.cfg.GsBucketGm, b.cfg.ServiceAccountUploadGM) 1660 directUpload = true 1661 } 1662 } else if b.extraConfig("CanvasKit") { 1663 cas = CAS_CANVASKIT 1664 recipe = "test_canvaskit" 1665 if b.doUpload() { 1666 b.directUpload(b.cfg.GsBucketGm, b.cfg.ServiceAccountUploadGM) 1667 directUpload = true 1668 } 1669 } else if b.extraConfig("LottieWeb") { 1670 // CAS_LOTTIE_CI differs from CAS_LOTTIE_WEB in that it includes 1671 // more of the files, especially those brought in via DEPS in the 1672 // lottie-ci repo. The main difference between Perf.+LottieWeb and 1673 // Test.+LottieWeb is that the former pulls in the lottie build via 1674 // npm and the latter always tests at lottie's 1675 // ToT. 1676 cas = CAS_LOTTIE_CI 1677 recipe = "test_lottie_web" 1678 if b.doUpload() { 1679 b.directUpload(b.cfg.GsBucketGm, b.cfg.ServiceAccountUploadGM) 1680 directUpload = true 1681 } 1682 } else { 1683 // Default recipe supports direct upload. 1684 // TODO(http://skbug.com/11785): Windows jobs are unable to extract gsutil. 1685 // https://bugs.chromium.org/p/chromium/issues/detail?id=1192611 1686 if b.doUpload() && !b.matchOs("Win") { 1687 b.directUpload(b.cfg.GsBucketGm, b.cfg.ServiceAccountUploadGM) 1688 directUpload = true 1689 } 1690 } 1691 b.recipeProp("gold_hashes_url", b.cfg.GoldHashesURL) 1692 b.recipeProps(EXTRA_PROPS) 1693 iid := b.internalHardwareLabel() 1694 iidStr := "" 1695 if iid != nil { 1696 iidStr = strconv.Itoa(*iid) 1697 } 1698 if recipe == "test" { 1699 b.dmFlags(iidStr) 1700 } 1701 b.kitchenTask(recipe, OUTPUT_TEST) 1702 b.cas(cas) 1703 b.swarmDimensions() 1704 if b.extraConfig("CanvasKit", "Docker", "LottieWeb", "PathKit") { 1705 b.usesDocker() 1706 } 1707 if compileTaskName != "" { 1708 b.dep(compileTaskName) 1709 } 1710 if b.matchOs("Android") && b.extraConfig("ASAN") { 1711 b.asset("android_ndk_linux") 1712 } 1713 b.commonTestPerfAssets() 1714 if b.matchExtraConfig("Lottie") { 1715 b.asset("lottie-samples") 1716 } 1717 b.expiration(20 * time.Hour) 1718 1719 b.timeout(4 * time.Hour) 1720 if b.extraConfig("Valgrind") { 1721 b.timeout(9 * time.Hour) 1722 b.expiration(48 * time.Hour) 1723 b.asset("valgrind") 1724 // Since Valgrind runs on the same bots as the CQ, we restrict Valgrind to a subset of the bots 1725 // to ensure there are always bots free for CQ tasks. 1726 b.dimension("valgrind:1") 1727 } else if b.extraConfig("MSAN") { 1728 b.timeout(9 * time.Hour) 1729 } else if b.arch("x86") && b.debug() { 1730 // skia:6737 1731 b.timeout(6 * time.Hour) 1732 } 1733 b.maybeAddIosDevImage() 1734 }) 1735 1736 // Upload results if necessary. TODO(kjlubick): If we do coverage analysis at the same 1737 // time as normal tests (which would be nice), cfg.json needs to have Coverage removed. 1738 if b.doUpload() && !directUpload { 1739 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, b.Name) 1740 depName := b.Name 1741 b.addTask(uploadName, func(b *taskBuilder) { 1742 b.recipeProp("gs_bucket", b.cfg.GsBucketGm) 1743 b.recipeProps(EXTRA_PROPS) 1744 b.kitchenTask("upload_dm_results", OUTPUT_NONE) 1745 b.serviceAccount(b.cfg.ServiceAccountUploadGM) 1746 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1747 b.usesGSUtil() 1748 b.dep(depName) 1749 }) 1750 } 1751} 1752 1753func (b *jobBuilder) fm() { 1754 goos := "linux" 1755 if strings.Contains(b.parts["os"], "Win") { 1756 goos = "windows" 1757 } 1758 if strings.Contains(b.parts["os"], "Mac") { 1759 goos = "darwin" 1760 } 1761 1762 b.addTask(b.Name, func(b *taskBuilder) { 1763 b.asset("skimage", "skp", "svg") 1764 b.cas(CAS_TEST) 1765 b.dep(b.buildTaskDrivers(goos, "amd64"), b.compile()) 1766 b.cmd("./fm_driver${EXECUTABLE_SUFFIX}", 1767 "--local=false", 1768 "--resources=skia/resources", 1769 "--imgs=skimage", 1770 "--skps=skp", 1771 "--svgs=svg", 1772 "--project_id", "skia-swarming-bots", 1773 "--task_id", specs.PLACEHOLDER_TASK_ID, 1774 "--bot", b.Name, 1775 "--gold="+strconv.FormatBool(!b.matchExtraConfig("SAN")), 1776 "--gold_hashes_url", b.cfg.GoldHashesURL, 1777 "build/fm${EXECUTABLE_SUFFIX}") 1778 b.serviceAccount(b.cfg.ServiceAccountUploadGM) 1779 b.swarmDimensions() 1780 b.attempts(1) 1781 1782 if b.isLinux() && b.matchExtraConfig("SAN") { 1783 b.asset("clang_linux") 1784 // Sanitizers may want to run llvm-symbolizer for readable stack traces. 1785 b.addToPATH("clang_linux/bin") 1786 1787 // Point sanitizer builds at our prebuilt libc++ for this sanitizer. 1788 if b.extraConfig("MSAN") { 1789 // We'd see false positives in std::basic_string<char> if this weren't set. 1790 b.envPrefixes("LD_LIBRARY_PATH", "clang_linux/msan") 1791 } else if b.extraConfig("TSAN") { 1792 // Occasional false positives may crop up in the standard library without this. 1793 b.envPrefixes("LD_LIBRARY_PATH", "clang_linux/tsan") 1794 } else { 1795 // The machines we run on may not have libstdc++ installed. 1796 b.envPrefixes("LD_LIBRARY_PATH", "clang_linux/lib/x86_64-unknown-linux-gnu") 1797 } 1798 } 1799 }) 1800} 1801 1802// canary generates a task that uses TaskDrivers to trigger canary manual rolls on autorollers. 1803// Canary-G3 does not use this path because it is very different from other autorollers. 1804func (b *jobBuilder) canary(rollerName, canaryCQKeyword, targetProjectBaseURL string) { 1805 b.addTask(b.Name, func(b *taskBuilder) { 1806 b.cas(CAS_EMPTY) 1807 b.dep(b.buildTaskDrivers("linux", "amd64")) 1808 b.cmd("./canary", 1809 "--local=false", 1810 "--project_id", "skia-swarming-bots", 1811 "--task_id", specs.PLACEHOLDER_TASK_ID, 1812 "--task_name", b.Name, 1813 "--roller_name", rollerName, 1814 "--cq_keyword", canaryCQKeyword, 1815 "--target_project_base_url", targetProjectBaseURL, 1816 "--repo", specs.PLACEHOLDER_REPO, 1817 "--revision", specs.PLACEHOLDER_REVISION, 1818 "--patch_issue", specs.PLACEHOLDER_ISSUE, 1819 "--patch_set", specs.PLACEHOLDER_PATCHSET, 1820 "--patch_server", specs.PLACEHOLDER_CODEREVIEW_SERVER, 1821 ) 1822 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1823 b.cipd(CIPD_PKG_LUCI_AUTH) 1824 b.serviceAccount(b.cfg.ServiceAccountCanary) 1825 b.timeout(3 * time.Hour) 1826 b.attempts(1) 1827 }) 1828} 1829 1830// puppeteer generates a task that uses TaskDrivers combined with a node script and puppeteer to 1831// benchmark something using Chromium (e.g. CanvasKit, LottieWeb). 1832func (b *jobBuilder) puppeteer() { 1833 compileTaskName := b.compile() 1834 b.addTask(b.Name, func(b *taskBuilder) { 1835 b.defaultSwarmDimensions() 1836 b.usesNode() 1837 b.cipd(CIPD_PKG_LUCI_AUTH) 1838 b.dep(b.buildTaskDrivers("linux", "amd64"), compileTaskName) 1839 b.output(OUTPUT_PERF) 1840 b.timeout(60 * time.Minute) 1841 b.cas(CAS_PUPPETEER) 1842 b.serviceAccount(b.cfg.ServiceAccountCompile) 1843 1844 webglversion := "2" 1845 if b.extraConfig("WebGL1") { 1846 webglversion = "1" 1847 } 1848 1849 if b.extraConfig("SkottieFrames") { 1850 b.cmd( 1851 "./perf_puppeteer_skottie_frames", 1852 "--project_id", "skia-swarming-bots", 1853 "--git_hash", specs.PLACEHOLDER_REVISION, 1854 "--task_id", specs.PLACEHOLDER_TASK_ID, 1855 "--task_name", b.Name, 1856 "--canvaskit_bin_path", "./build", 1857 "--lotties_path", "./lotties_with_assets", 1858 "--node_bin_path", "./node/node/bin", 1859 "--benchmark_path", "./tools/perf-canvaskit-puppeteer", 1860 "--output_path", OUTPUT_PERF, 1861 "--os_trace", b.parts["os"], 1862 "--model_trace", b.parts["model"], 1863 "--cpu_or_gpu_trace", b.parts["cpu_or_gpu"], 1864 "--cpu_or_gpu_value_trace", b.parts["cpu_or_gpu_value"], 1865 "--webgl_version", webglversion, // ignore when running with cpu backend 1866 ) 1867 b.needsLottiesWithAssets() 1868 } else if b.extraConfig("RenderSKP") { 1869 b.cmd( 1870 "./perf_puppeteer_render_skps", 1871 "--project_id", "skia-swarming-bots", 1872 "--git_hash", specs.PLACEHOLDER_REVISION, 1873 "--task_id", specs.PLACEHOLDER_TASK_ID, 1874 "--task_name", b.Name, 1875 "--canvaskit_bin_path", "./build", 1876 "--skps_path", "./skp", 1877 "--node_bin_path", "./node/node/bin", 1878 "--benchmark_path", "./tools/perf-canvaskit-puppeteer", 1879 "--output_path", OUTPUT_PERF, 1880 "--os_trace", b.parts["os"], 1881 "--model_trace", b.parts["model"], 1882 "--cpu_or_gpu_trace", b.parts["cpu_or_gpu"], 1883 "--cpu_or_gpu_value_trace", b.parts["cpu_or_gpu_value"], 1884 "--webgl_version", webglversion, 1885 ) 1886 b.asset("skp") 1887 } else if b.extraConfig("CanvasPerf") { // refers to the canvas_perf.js test suite 1888 b.cmd( 1889 "./perf_puppeteer_canvas", 1890 "--project_id", "skia-swarming-bots", 1891 "--git_hash", specs.PLACEHOLDER_REVISION, 1892 "--task_id", specs.PLACEHOLDER_TASK_ID, 1893 "--task_name", b.Name, 1894 "--canvaskit_bin_path", "./build", 1895 "--node_bin_path", "./node/node/bin", 1896 "--benchmark_path", "./tools/perf-canvaskit-puppeteer", 1897 "--output_path", OUTPUT_PERF, 1898 "--os_trace", b.parts["os"], 1899 "--model_trace", b.parts["model"], 1900 "--cpu_or_gpu_trace", b.parts["cpu_or_gpu"], 1901 "--cpu_or_gpu_value_trace", b.parts["cpu_or_gpu_value"], 1902 "--webgl_version", webglversion, 1903 ) 1904 b.asset("skp") 1905 } 1906 1907 }) 1908 1909 // Upload results to Perf after. 1910 // TODO(kjlubick,borenet) deduplicate this with the logic in perf(). 1911 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, b.Name) 1912 depName := b.Name 1913 b.addTask(uploadName, func(b *taskBuilder) { 1914 b.recipeProp("gs_bucket", b.cfg.GsBucketNano) 1915 b.recipeProps(EXTRA_PROPS) 1916 // TODO(borenet): I'm not sure why the upload task is 1917 // using the Perf task name, but I've done this to 1918 // maintain existing behavior. 1919 b.Name = depName 1920 b.kitchenTask("upload_nano_results", OUTPUT_NONE) 1921 b.Name = uploadName 1922 b.serviceAccount(b.cfg.ServiceAccountUploadNano) 1923 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 1924 b.usesGSUtil() 1925 b.dep(depName) 1926 }) 1927} 1928 1929// perf generates a Perf task. 1930func (b *jobBuilder) perf() { 1931 compileTaskName := "" 1932 // LottieWeb doesn't require anything in Skia to be compiled. 1933 if !b.extraConfig("LottieWeb") { 1934 compileTaskName = b.compile() 1935 } 1936 doUpload := !b.debug() && b.doUpload() 1937 b.addTask(b.Name, func(b *taskBuilder) { 1938 recipe := "perf" 1939 cas := CAS_PERF 1940 if b.extraConfig("Skpbench") { 1941 recipe = "skpbench" 1942 cas = CAS_SKPBENCH 1943 } else if b.extraConfig("PathKit") { 1944 cas = CAS_PATHKIT 1945 recipe = "perf_pathkit" 1946 } else if b.extraConfig("CanvasKit") { 1947 cas = CAS_CANVASKIT 1948 recipe = "perf_canvaskit" 1949 } else if b.extraConfig("SkottieTracing") { 1950 recipe = "perf_skottietrace" 1951 } else if b.extraConfig("SkottieWASM") { 1952 recipe = "perf_skottiewasm_lottieweb" 1953 cas = CAS_SKOTTIE_WASM 1954 } else if b.extraConfig("LottieWeb") { 1955 recipe = "perf_skottiewasm_lottieweb" 1956 cas = CAS_LOTTIE_WEB 1957 } 1958 b.recipeProps(EXTRA_PROPS) 1959 if recipe == "perf" { 1960 b.nanobenchFlags(doUpload) 1961 } else if recipe == "skpbench" { 1962 b.skpbenchFlags() 1963 } 1964 b.kitchenTask(recipe, OUTPUT_PERF) 1965 b.cas(cas) 1966 b.swarmDimensions() 1967 if b.extraConfig("Docker") { 1968 b.usesDocker() 1969 } 1970 if compileTaskName != "" { 1971 b.dep(compileTaskName) 1972 } 1973 b.commonTestPerfAssets() 1974 b.expiration(20 * time.Hour) 1975 b.timeout(4 * time.Hour) 1976 1977 if b.extraConfig("Valgrind") { 1978 b.timeout(9 * time.Hour) 1979 b.expiration(48 * time.Hour) 1980 b.asset("valgrind") 1981 // Since Valgrind runs on the same bots as the CQ, we restrict Valgrind to a subset of the bots 1982 // to ensure there are always bots free for CQ tasks. 1983 b.dimension("valgrind:1") 1984 } else if b.extraConfig("MSAN") { 1985 b.timeout(9 * time.Hour) 1986 } else if b.parts["arch"] == "x86" && b.parts["configuration"] == "Debug" { 1987 // skia:6737 1988 b.timeout(6 * time.Hour) 1989 } else if b.extraConfig("LottieWeb", "SkottieWASM") { 1990 b.asset("node", "lottie-samples") 1991 } else if b.matchExtraConfig("SkottieTracing") { 1992 b.needsLottiesWithAssets() 1993 } else if b.matchExtraConfig("Skottie") { 1994 b.asset("lottie-samples") 1995 } 1996 1997 if b.matchOs("Android") && b.cpu() { 1998 b.asset("text_blob_traces") 1999 } 2000 b.maybeAddIosDevImage() 2001 2002 iid := b.internalHardwareLabel() 2003 if iid != nil { 2004 b.Spec.Command = append(b.Spec.Command, fmt.Sprintf("internal_hardware_label=%d", *iid)) 2005 } 2006 }) 2007 2008 // Upload results if necessary. 2009 if doUpload { 2010 uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, b.Name) 2011 depName := b.Name 2012 b.addTask(uploadName, func(b *taskBuilder) { 2013 b.recipeProp("gs_bucket", b.cfg.GsBucketNano) 2014 b.recipeProps(EXTRA_PROPS) 2015 // TODO(borenet): I'm not sure why the upload task is 2016 // using the Perf task name, but I've done this to 2017 // maintain existing behavior. 2018 b.Name = depName 2019 b.kitchenTask("upload_nano_results", OUTPUT_NONE) 2020 b.Name = uploadName 2021 b.serviceAccount(b.cfg.ServiceAccountUploadNano) 2022 b.linuxGceDimensions(MACHINE_TYPE_SMALL) 2023 b.usesGSUtil() 2024 b.dep(depName) 2025 }) 2026 } 2027} 2028 2029// presubmit generates a task which runs the presubmit for this repo. 2030func (b *jobBuilder) presubmit() { 2031 b.addTask(b.Name, func(b *taskBuilder) { 2032 b.recipeProps(map[string]string{ 2033 "category": "cq", 2034 "patch_gerrit_url": "https://skia-review.googlesource.com", 2035 "patch_project": "skia", 2036 "patch_ref": specs.PLACEHOLDER_PATCH_REF, 2037 "reason": "CQ", 2038 "repo_name": "skia", 2039 }) 2040 b.recipeProps(EXTRA_PROPS) 2041 b.kitchenTaskNoBundle("run_presubmit", OUTPUT_NONE) 2042 b.cas(CAS_RUN_RECIPE) 2043 b.serviceAccount(b.cfg.ServiceAccountCompile) 2044 // Use MACHINE_TYPE_LARGE because it seems to save time versus 2045 // MEDIUM and we want presubmit to be fast. 2046 b.linuxGceDimensions(MACHINE_TYPE_LARGE) 2047 b.usesGit() 2048 b.cipd(&specs.CipdPackage{ 2049 Name: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build", 2050 Path: "recipe_bundle", 2051 Version: "git_revision:1a28cb094add070f4beefd052725223930d8c27a", 2052 }) 2053 }) 2054} 2055 2056// compileWasmGMTests uses a task driver to compile the GMs and unit tests for Web Assembly (WASM). 2057// We can use the same build for both CPU and GPU tests since the latter requires the code for the 2058// former anyway. 2059func (b *jobBuilder) compileWasmGMTests(compileName string) { 2060 b.addTask(compileName, func(b *taskBuilder) { 2061 b.attempts(1) 2062 b.usesDocker() 2063 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 2064 b.cipd(CIPD_PKG_LUCI_AUTH) 2065 b.dep(b.buildTaskDrivers("linux", "amd64")) 2066 b.output("wasm_out") 2067 b.timeout(60 * time.Minute) 2068 b.cas(CAS_COMPILE) 2069 b.serviceAccount(b.cfg.ServiceAccountCompile) 2070 b.cache(CACHES_DOCKER...) 2071 // For now, we only have one compile mode - a GPU release mode. This should be sufficient to 2072 // run CPU, WebGL1, and WebGL2 tests. Debug mode is not needed for the waterfall because 2073 // when using puppeteer, stacktraces from exceptions are hard to get access to, so we do not 2074 // even bother. 2075 b.cmd( 2076 "./compile_wasm_gm_tests", 2077 "--project_id", "skia-swarming-bots", 2078 "--task_id", specs.PLACEHOLDER_TASK_ID, 2079 "--task_name", compileName, 2080 "--out_path", "./wasm_out", 2081 "--skia_path", "./skia", 2082 "--work_path", "./cache/docker/wasm_gm", 2083 ) 2084 }) 2085} 2086 2087// compileWasmGMTests uses a task driver to compile the GMs and unit tests for Web Assembly (WASM). 2088// We can use the same build for both CPU and GPU tests since the latter requires the code for the 2089// former anyway. 2090func (b *jobBuilder) runWasmGMTests() { 2091 compileTaskName := b.compile() 2092 2093 b.addTask(b.Name, func(b *taskBuilder) { 2094 b.attempts(1) 2095 b.usesNode() 2096 b.swarmDimensions() 2097 b.cipd(CIPD_PKG_LUCI_AUTH) 2098 b.cipd(CIPD_PKGS_GOLDCTL) 2099 b.dep(b.buildTaskDrivers("linux", "amd64")) 2100 b.dep(compileTaskName) 2101 b.timeout(60 * time.Minute) 2102 b.cas(CAS_WASM_GM) 2103 b.serviceAccount(b.cfg.ServiceAccountUploadGM) 2104 b.cmd( 2105 "./run_wasm_gm_tests", 2106 "--project_id", "skia-swarming-bots", 2107 "--task_id", specs.PLACEHOLDER_TASK_ID, 2108 "--task_name", b.Name, 2109 "--test_harness_path", "./tools/run-wasm-gm-tests", 2110 "--built_path", "./wasm_out", 2111 "--node_bin_path", "./node/node/bin", 2112 "--resource_path", "./resources", 2113 "--work_path", "./wasm_gm/work", 2114 "--gold_ctl_path", "./cipd_bin_packages/goldctl", 2115 "--gold_hashes_url", b.cfg.GoldHashesURL, 2116 "--git_commit", specs.PLACEHOLDER_REVISION, 2117 "--changelist_id", specs.PLACEHOLDER_ISSUE, 2118 "--patchset_order", specs.PLACEHOLDER_PATCHSET, 2119 "--tryjob_id", specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID, 2120 // TODO(kjlubick, nifong) Make these not hard coded if we change the configs we test on. 2121 "--webgl_version", "2", // 0 means CPU ; this flag controls cpu_or_gpu and extra_config 2122 "--gold_key", "alpha_type:Premul", 2123 "--gold_key", "arch:wasm", 2124 "--gold_key", "browser:Chrome", 2125 "--gold_key", "color_depth:8888", 2126 "--gold_key", "config:gles", 2127 "--gold_key", "configuration:Release", 2128 "--gold_key", "cpu_or_gpu_value:QuadroP400", 2129 "--gold_key", "model:Golo", 2130 "--gold_key", "os:Ubuntu18", 2131 ) 2132 }) 2133} 2134 2135// Maps a shorthand version of a label (which can be an arbitrary string) to an absolute Bazel 2136// label or "target pattern" https://bazel.build/docs/build#specifying-build-targets 2137// The reason we need this mapping is because Buildbucket build names cannot have / or : in them. 2138var shorthandToLabel = map[string]string{ 2139 "base": "//src:base", 2140 "example_hello_world_dawn": "//example:hello_world_dawn", 2141 "example_hello_world_gl": "//example:hello_world_gl", 2142 "example_hello_world_vulkan": "//example:hello_world_vulkan", 2143 "modules_canvaskit": "//modules/canvaskit:canvaskit", 2144 "skia_public": "//:skia_public", 2145 "skottie_tool_gpu": "//modules/skottie:skottie_tool_gpu", 2146 "tests": "//tests/...", 2147} 2148 2149// bazelBuild adds a task which builds the specified single-target label (//foo:bar) or 2150// multi-target label (//foo/...) using Bazel. Depending on the host we run this on, we may 2151// specify additional Bazel args to build faster. 2152func (b *jobBuilder) bazelBuild() { 2153 shorthand, config, host, cross := b.parts.bazelBuildParts() 2154 label, ok := shorthandToLabel[shorthand] 2155 if !ok { 2156 panic("unsupported Bazel label shorthand " + shorthand) 2157 } 2158 b.addTask(b.Name, func(b *taskBuilder) { 2159 cmd := []string{"bazel_build_task_driver/bazel_build", 2160 "--project_id=skia-swarming-bots", 2161 "--task_id=" + specs.PLACEHOLDER_TASK_ID, 2162 "--task_name=" + b.Name, 2163 "--label=" + label, 2164 "--config=" + config, 2165 "--workdir=.", 2166 } 2167 if cross != "" { 2168 // The cross (and host) platform is expected to be defined in 2169 // //bazel/common_config_settings/BUILD.bazel 2170 cross = "//bazel/common_config_settings:" + cross 2171 cmd = append(cmd, "--cross="+cross) 2172 } 2173 if host == "linux_x64" { 2174 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 2175 // Use a built task_driver from CIPD instead of building it from scratch. The 2176 // task_driver should not need to change often, so using a CIPD version should reduce 2177 // build latency. 2178 // TODO(kjlubick) For now, this only has the linux version. We could build the task 2179 // driver for all hosts that we support running Bazel from in this CIPD package 2180 // if/when needed. 2181 b.cipd(b.MustGetCipdPackageFromAsset("bazel_build_task_driver")) 2182 2183 // We want all Linux Bazel Builds to use RBE 2184 cmd = append(cmd, "--bazel_arg=--config=for_linux_x64_with_rbe") 2185 cmd = append(cmd, "--bazel_arg=--jobs=100") 2186 cmd = append(cmd, "--bazel_arg=--remote_download_minimal") 2187 } else { 2188 panic("unsupported Bazel host " + host) 2189 } 2190 b.cmd(cmd...) 2191 2192 // TODO(kjlubick) I believe this bazelisk package is just the Linux one. To support 2193 // more hosts, we need to have platform-specific bazelisk binaries. 2194 b.cipd(b.MustGetCipdPackageFromAsset("bazelisk")) 2195 b.addToPATH("bazelisk") 2196 b.idempotent() 2197 b.cas(CAS_BAZEL) 2198 b.attempts(1) 2199 b.serviceAccount(b.cfg.ServiceAccountCompile) 2200 }) 2201} 2202 2203func (b *jobBuilder) bazelTest() { 2204 taskdriverName, config, host, cross := b.parts.bazelTestParts() 2205 2206 b.addTask(b.Name, func(b *taskBuilder) { 2207 cmd := []string{"./" + taskdriverName, 2208 "--project_id=skia-swarming-bots", 2209 "--task_id=" + specs.PLACEHOLDER_TASK_ID, 2210 "--task_name=" + b.Name, 2211 "--test_config=" + config, 2212 "--workdir=.", 2213 } 2214 2215 switch taskdriverName { 2216 case "canvaskit_gold": 2217 cmd = append(cmd, 2218 "--goldctl_path=./cipd_bin_packages/goldctl", 2219 "--git_commit="+specs.PLACEHOLDER_REVISION, 2220 "--changelist_id="+specs.PLACEHOLDER_ISSUE, 2221 "--patchset_order="+specs.PLACEHOLDER_PATCHSET, 2222 "--tryjob_id="+specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID, 2223 // It is unclear why this is needed, but it helps resolve issues like 2224 // Middleman ...tests-runfiles failed: missing input file 'external/npm/node_modules/karma-chrome-launcher/...' 2225 "--expunge_cache") 2226 b.cipd(CIPD_PKGS_GOLDCTL) 2227 switch config { 2228 case "ck_full_cpu_release_chrome": 2229 cmd = append(cmd, "--cpu_or_gpu=CPU", "--cpu_or_gpu_value=CPU", 2230 "--compilation_mode=Release", "--browser=Chrome") 2231 case "ck_full_webgl2_release_chrome": 2232 cmd = append(cmd, "--cpu_or_gpu=GPU", "--cpu_or_gpu_value=WebGL2", 2233 "--compilation_mode=Release", "--browser=Chrome") 2234 default: 2235 panic("Gold keys not specified for config " + config) 2236 } 2237 case "cpu_tests": 2238 break 2239 case "toolchain_layering_check": 2240 break 2241 default: 2242 panic("Unsupported Bazel taskdriver " + taskdriverName) 2243 } 2244 2245 if cross != "" { 2246 // The cross (and host) platform is expected to be defined in 2247 // //bazel/common_config_settings/BUILD.bazel 2248 cross = "//bazel/common_config_settings:" + cross 2249 cmd = append(cmd, "--cross="+cross) 2250 } 2251 if host == "linux_x64" { 2252 b.linuxGceDimensions(MACHINE_TYPE_MEDIUM) 2253 b.dep(b.buildTaskDrivers("linux", "amd64")) 2254 } else { 2255 panic("unsupported Bazel host " + host) 2256 } 2257 b.cmd(cmd...) 2258 2259 // TODO(kjlubick) I believe this bazelisk package is just the Linux one. To support 2260 // more hosts, we need to have platform-specific bazelisk binaries. 2261 b.cipd(b.MustGetCipdPackageFromAsset("bazelisk")) 2262 b.addToPATH("bazelisk") 2263 b.idempotent() 2264 b.cas(CAS_BAZEL) 2265 b.attempts(1) 2266 b.serviceAccount(b.cfg.ServiceAccountCompile) 2267 }) 2268} 2269