• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5package gen_tasks_logic
6
7/*
8	Generate the tasks.json file.
9*/
10
11import (
12	"encoding/json"
13	"fmt"
14	"io/ioutil"
15	"os"
16	"path/filepath"
17	"regexp"
18	"runtime"
19	"sort"
20	"strconv"
21	"strings"
22	"time"
23
24	"github.com/golang/glog"
25	"go.skia.org/infra/go/sklog"
26	"go.skia.org/infra/go/util"
27	"go.skia.org/infra/task_scheduler/go/specs"
28)
29
30const (
31	BUILD_TASK_DRIVERS_NAME    = "Housekeeper-PerCommit-BuildTaskDrivers"
32	BUNDLE_RECIPES_NAME        = "Housekeeper-PerCommit-BundleRecipes"
33	ISOLATE_GCLOUD_LINUX_NAME  = "Housekeeper-PerCommit-IsolateGCloudLinux"
34	ISOLATE_SKIMAGE_NAME       = "Housekeeper-PerCommit-IsolateSkImage"
35	ISOLATE_SKP_NAME           = "Housekeeper-PerCommit-IsolateSKP"
36	ISOLATE_MSKP_NAME           = "Housekeeper-PerCommit-IsolateMSKP"
37	ISOLATE_SVG_NAME           = "Housekeeper-PerCommit-IsolateSVG"
38	ISOLATE_NDK_LINUX_NAME     = "Housekeeper-PerCommit-IsolateAndroidNDKLinux"
39	ISOLATE_SDK_LINUX_NAME     = "Housekeeper-PerCommit-IsolateAndroidSDKLinux"
40	ISOLATE_WIN_TOOLCHAIN_NAME = "Housekeeper-PerCommit-IsolateWinToolchain"
41
42	DEFAULT_OS_DEBIAN    = "Debian-9.4"
43	DEFAULT_OS_LINUX_GCE = "Debian-9.8"
44	DEFAULT_OS_MAC       = "Mac-10.13.6"
45	DEFAULT_OS_WIN       = "Windows-Server-14393"
46
47	// Small is a 2-core machine.
48	// TODO(dogben): Would n1-standard-1 or n1-standard-2 be sufficient?
49	MACHINE_TYPE_SMALL = "n1-highmem-2"
50	// Medium is a 16-core machine
51	MACHINE_TYPE_MEDIUM = "n1-standard-16"
52	// Large is a 64-core machine. (We use "highcpu" because we don't need more than 57GB memory for
53	// any of our tasks.)
54	MACHINE_TYPE_LARGE = "n1-highcpu-64"
55
56	// Swarming output dirs.
57	OUTPUT_NONE  = "output_ignored" // This will result in outputs not being isolated.
58	OUTPUT_BUILD = "build"
59	OUTPUT_TEST  = "test"
60	OUTPUT_PERF  = "perf"
61
62	// Name prefix for upload jobs.
63	PREFIX_UPLOAD = "Upload"
64)
65
66var (
67	// "Constants"
68
69	// Named caches used by tasks.
70	CACHES_GIT = []*specs.Cache{
71		&specs.Cache{
72			Name: "git",
73			Path: "cache/git",
74		},
75		&specs.Cache{
76			Name: "git_cache",
77			Path: "cache/git_cache",
78		},
79	}
80	CACHES_GO = []*specs.Cache{
81		&specs.Cache{
82			Name: "go_cache",
83			Path: "cache/go_cache",
84		},
85		&specs.Cache{
86			Name: "gopath",
87			Path: "cache/gopath",
88		},
89	}
90	CACHES_WORKDIR = []*specs.Cache{
91		&specs.Cache{
92			Name: "work",
93			Path: "cache/work",
94		},
95	}
96	CACHES_DOCKER = []*specs.Cache{
97		&specs.Cache{
98			Name: "docker",
99			Path: "cache/docker",
100		},
101	}
102	// Versions of the following copied from
103	// https://chrome-internal.googlesource.com/infradata/config/+/master/configs/cr-buildbucket/swarming_task_template_canary.json#42
104	// to test the fix for chromium:836196.
105	// (In the future we may want to use versions from
106	// https://chrome-internal.googlesource.com/infradata/config/+/master/configs/cr-buildbucket/swarming_task_template.json#42)
107	// TODO(borenet): Roll these versions automatically!
108	CIPD_PKGS_PYTHON = []*specs.CipdPackage{
109		&specs.CipdPackage{
110			Name:    "infra/tools/luci/vpython/${platform}",
111			Path:    "cipd_bin_packages",
112			Version: "git_revision:f96db4b66034c859090be3c47eb38227277f228b",
113		},
114	}
115
116	CIPD_PKGS_CPYTHON = []*specs.CipdPackage{
117		&specs.CipdPackage{
118			Name:    "infra/python/cpython/${platform}",
119			Path:    "cipd_bin_packages",
120			Version: "version:2.7.15.chromium14",
121		},
122	}
123
124	CIPD_PKGS_KITCHEN = append([]*specs.CipdPackage{
125		&specs.CipdPackage{
126			Name:    "infra/tools/luci/kitchen/${platform}",
127			Path:    ".",
128			Version: "git_revision:d8f38ca9494b5af249942631f9cee45927f6b4bc",
129		},
130		&specs.CipdPackage{
131			Name:    "infra/tools/luci-auth/${platform}",
132			Path:    "cipd_bin_packages",
133			Version: "git_revision:2c805f1c716f6c5ad2126b27ec88b8585a09481e",
134		},
135	}, CIPD_PKGS_PYTHON...)
136
137	CIPD_PKGS_GIT = []*specs.CipdPackage{
138		&specs.CipdPackage{
139			Name:    "infra/git/${platform}",
140			Path:    "cipd_bin_packages",
141			Version: "version:2.17.1.chromium15",
142		},
143		&specs.CipdPackage{
144			Name:    "infra/tools/git/${platform}",
145			Path:    "cipd_bin_packages",
146			Version: "git_revision:c9c8a52bfeaf8bc00ece22fdfd447822c8fcad77",
147		},
148		&specs.CipdPackage{
149			Name:    "infra/tools/luci/git-credential-luci/${platform}",
150			Path:    "cipd_bin_packages",
151			Version: "git_revision:2c805f1c716f6c5ad2126b27ec88b8585a09481e",
152		},
153	}
154
155	CIPD_PKGS_GSUTIL = []*specs.CipdPackage{
156		&specs.CipdPackage{
157			Name:    "infra/gsutil",
158			Path:    "cipd_bin_packages",
159			Version: "version:4.28",
160		},
161	}
162
163	CIPD_PKGS_XCODE = []*specs.CipdPackage{
164		// https://chromium.googlesource.com/chromium/tools/build/+/e19b7d9390e2bb438b566515b141ed2b9ed2c7c2/scripts/slave/recipe_modules/ios/api.py#317
165		// This package is really just an installer for XCode.
166		&specs.CipdPackage{
167			Name: "infra/tools/mac_toolchain/${platform}",
168			Path: "mac_toolchain",
169			// When this is updated, also update
170			// https://skia.googlesource.com/skcms.git/+/f1e2b45d18facbae2dece3aca673fe1603077846/infra/bots/gen_tasks.go#56
171			Version: "git_revision:796d2b92cff93fc2059623ce0a66284373ceea0a",
172		},
173	}
174
175	// These properties are required by some tasks, eg. for running
176	// bot_update, but they prevent de-duplication, so they should only be
177	// used where necessary.
178	EXTRA_PROPS = map[string]string{
179		"buildbucket_build_id": specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID,
180		"patch_issue":          specs.PLACEHOLDER_ISSUE,
181		"patch_ref":            specs.PLACEHOLDER_PATCH_REF,
182		"patch_repo":           specs.PLACEHOLDER_PATCH_REPO,
183		"patch_set":            specs.PLACEHOLDER_PATCHSET,
184		"patch_storage":        specs.PLACEHOLDER_PATCH_STORAGE,
185		"repository":           specs.PLACEHOLDER_REPO,
186		"revision":             specs.PLACEHOLDER_REVISION,
187		"task_id":              specs.PLACEHOLDER_TASK_ID,
188	}
189
190	// ISOLATE_ASSET_MAPPING maps the name of a task to the configuration
191	// for how the CIPD package should be installed for that task, in order
192	// for it to be uploaded to the isolate server.
193	ISOLATE_ASSET_MAPPING = map[string]isolateAssetCfg{
194		ISOLATE_GCLOUD_LINUX_NAME: {
195			cipdPkg: "gcloud_linux",
196			path:    "gcloud_linux",
197		},
198		ISOLATE_SKIMAGE_NAME: {
199			cipdPkg: "skimage",
200			path:    "skimage",
201		},
202		ISOLATE_SKP_NAME: {
203			cipdPkg: "skp",
204			path:    "skp",
205		},
206		ISOLATE_SVG_NAME: {
207			cipdPkg: "svg",
208			path:    "svg",
209		},
210		ISOLATE_MSKP_NAME: {
211			cipdPkg: "mskp",
212			path:    "mskp",
213		},
214		ISOLATE_NDK_LINUX_NAME: {
215			cipdPkg: "android_ndk_linux",
216			path:    "android_ndk_linux",
217		},
218		ISOLATE_SDK_LINUX_NAME: {
219			cipdPkg: "android_sdk_linux",
220			path:    "android_sdk_linux",
221		},
222		ISOLATE_WIN_TOOLCHAIN_NAME: {
223			cipdPkg: "win_toolchain",
224			path:    "win_toolchain",
225		},
226	}
227
228	// BUILD_STATS_NO_UPLOAD indicates which BuildStats tasks should not
229	// have their results uploaded.
230	BUILD_STATS_NO_UPLOAD = []string{"BuildStats-Debian9-Clang-x86_64-Release"}
231)
232
233// Config contains general configuration information.
234type Config struct {
235	// Directory containing assets. Assumed to be relative to the directory
236	// which contains the calling gen_tasks.go file. If not specified, uses
237	// the infra/bots/assets from this repo.
238	AssetsDir string `json:"assets_dir"`
239
240	// Path to the builder name schema JSON file. Assumed to be relative to
241	// the directory which contains the calling gen_tasks.go file. If not
242	// specified, uses infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json
243	// from this repo.
244	BuilderNameSchemaFile string `json:"builder_name_schema"`
245
246	// URL of the Skia Gold known hashes endpoint.
247	GoldHashesURL string `json:"gold_hashes_url"`
248
249	// GCS bucket used for Calmbench results.
250	GsBucketCalm string `json:"gs_bucket_calm"`
251
252	// GCS bucket used for GM results.
253	GsBucketGm string `json:"gs_bucket_gm"`
254
255	// GCS bucket used for Nanobench results.
256	GsBucketNano string `json:"gs_bucket_nano"`
257
258	// Optional function which returns a bot ID for internal devices.
259	InternalHardwareLabel func(parts map[string]string) *int `json:"-"`
260
261	// List of task names for which we'll never upload results.
262	NoUpload []string `json:"no_upload"`
263
264	// Swarming pool used for triggering tasks.
265	Pool string `json:"pool"`
266
267	// LUCI project associated with this repo.
268	Project string `json:"project"`
269
270	// Service accounts.
271	ServiceAccountCompile         string `json:"service_account_compile"`
272	ServiceAccountHousekeeper     string `json:"service_account_housekeeper"`
273	ServiceAccountRecreateSKPs    string `json:"service_account_recreate_skps"`
274	ServiceAccountUploadBinary    string `json:"service_account_upload_binary"`
275	ServiceAccountUploadCalmbench string `json:"service_account_upload_calmbench"`
276	ServiceAccountUploadGM        string `json:"service_account_upload_gm"`
277	ServiceAccountUploadNano      string `json:"service_account_upload_nano"`
278
279	// Optional override function which derives Swarming bot dimensions
280	// from parts of task names.
281	SwarmDimensions func(parts map[string]string) []string `json:"-"`
282}
283
284// LoadConfig loads the Config from a cfg.json file which is the sibling of the
285// calling gen_tasks.go file.
286func LoadConfig() *Config {
287	cfgDir := getCallingDirName()
288	var cfg Config
289	LoadJson(filepath.Join(cfgDir, "cfg.json"), &cfg)
290	return &cfg
291}
292
293// CheckoutRoot is a wrapper around specs.GetCheckoutRoot which prevents the
294// caller from needing a dependency on the specs package.
295func CheckoutRoot() string {
296	root, err := specs.GetCheckoutRoot()
297	if err != nil {
298		glog.Fatal(err)
299	}
300	return root
301}
302
303// LoadJson loads JSON from the given file and unmarshals it into the given
304// destination.
305func LoadJson(filename string, dest interface{}) {
306	b, err := ioutil.ReadFile(filename)
307	if err != nil {
308		glog.Fatal(err)
309	}
310	if err := json.Unmarshal(b, dest); err != nil {
311		glog.Fatal(err)
312	}
313}
314
315// GenTasks regenerates the tasks.json file. Loads the job list from a jobs.json
316// file which is the sibling of the calling gen_tasks.go file. If cfg is nil, it
317// is similarly loaded from a cfg.json file which is the sibling of the calling
318// gen_tasks.go file.
319func GenTasks(cfg *Config) {
320	b := specs.MustNewTasksCfgBuilder()
321
322	// Find the paths to the infra/bots directories in this repo and the
323	// repo of the calling file.
324	relpathTargetDir := getThisDirName()
325	relpathBaseDir := getCallingDirName()
326
327	var jobs []string
328	LoadJson(filepath.Join(relpathBaseDir, "jobs.json"), &jobs)
329
330	if cfg == nil {
331		cfg = new(Config)
332		LoadJson(filepath.Join(relpathBaseDir, "cfg.json"), cfg)
333	}
334
335	// Create the JobNameSchema.
336	builderNameSchemaFile := filepath.Join(relpathTargetDir, "recipe_modules", "builder_name_schema", "builder_name_schema.json")
337	if cfg.BuilderNameSchemaFile != "" {
338		builderNameSchemaFile = filepath.Join(relpathBaseDir, cfg.BuilderNameSchemaFile)
339	}
340	schema, err := NewJobNameSchema(builderNameSchemaFile)
341	if err != nil {
342		glog.Fatal(err)
343	}
344
345	// Set the assets dir.
346	assetsDir := filepath.Join(relpathTargetDir, "assets")
347	if cfg.AssetsDir != "" {
348		assetsDir = filepath.Join(relpathBaseDir, cfg.AssetsDir)
349	}
350	b.SetAssetsDir(assetsDir)
351
352	// Create Tasks and Jobs.
353	builder := &builder{
354		TasksCfgBuilder:  b,
355		cfg:              cfg,
356		jobNameSchema:    schema,
357		jobs:             jobs,
358		relpathBaseDir:   relpathBaseDir,
359		relpathTargetDir: relpathTargetDir,
360	}
361	for _, name := range jobs {
362		builder.process(name)
363	}
364	builder.MustFinish()
365}
366
367// getThisDirName returns the infra/bots directory which is an ancestor of this
368// file.
369func getThisDirName() string {
370	_, thisFileName, _, ok := runtime.Caller(0)
371	if !ok {
372		sklog.Fatal("Unable to find path to current file.")
373	}
374	return filepath.Dir(filepath.Dir(thisFileName))
375}
376
377// getCallingDirName returns the infra/bots directory which is an ancestor of
378// the calling gen_tasks.go file. WARNING: assumes that the calling gen_tasks.go
379// file appears two steps up the stack; do not call from a function which is not
380// directly called by gen_tasks.go.
381func getCallingDirName() string {
382	_, callingFileName, _, ok := runtime.Caller(2)
383	if !ok {
384		sklog.Fatal("Unable to find path to calling file.")
385	}
386	return filepath.Dir(callingFileName)
387}
388
389// builder is a wrapper for specs.TasksCfgBuilder.
390type builder struct {
391	*specs.TasksCfgBuilder
392	cfg              *Config
393	jobNameSchema    *JobNameSchema
394	jobs             []string
395	relpathBaseDir   string
396	relpathTargetDir string
397}
398
399// logdogAnnotationUrl builds the LogDog annotation URL.
400func (b *builder) logdogAnnotationUrl() string {
401	return fmt.Sprintf("logdog://logs.chromium.org/%s/${SWARMING_TASK_ID}/+/annotations", b.cfg.Project)
402}
403
404// props creates a properties JSON string.
405func props(p map[string]string) string {
406	d := make(map[string]interface{}, len(p)+1)
407	for k, v := range p {
408		d[k] = interface{}(v)
409	}
410	d["$kitchen"] = struct {
411		DevShell bool `json:"devshell"`
412		GitAuth  bool `json:"git_auth"`
413	}{
414		DevShell: true,
415		GitAuth:  true,
416	}
417
418	j, err := json.Marshal(d)
419	if err != nil {
420		sklog.Fatal(err)
421	}
422	return strings.Replace(string(j), "\\u003c", "<", -1)
423}
424
425// kitchenTask returns a specs.TaskSpec instance which uses Kitchen to run a
426// recipe.
427func (b *builder) kitchenTask(name, recipe, isolate, serviceAccount string, dimensions []string, extraProps map[string]string, outputDir string) *specs.TaskSpec {
428	cipd := append([]*specs.CipdPackage{}, CIPD_PKGS_KITCHEN...)
429	if strings.Contains(name, "Win") && !strings.Contains(name, "LenovoYogaC630") {
430		cipd = append(cipd, CIPD_PKGS_CPYTHON...)
431	} else if strings.Contains(name, "P30") {
432		cipd = append(cipd, CIPD_PKGS_CPYTHON...)
433	}
434	properties := map[string]string{
435		"buildername":   name,
436		"swarm_out_dir": outputDir,
437	}
438	for k, v := range extraProps {
439		properties[k] = v
440	}
441	var outputs []string = nil
442	if outputDir != OUTPUT_NONE {
443		outputs = []string{outputDir}
444	}
445	python := "cipd_bin_packages/vpython${EXECUTABLE_SUFFIX}"
446	task := &specs.TaskSpec{
447		Caches: []*specs.Cache{
448			&specs.Cache{
449				Name: "vpython",
450				Path: "cache/vpython",
451			},
452		},
453		CipdPackages: cipd,
454		Command:      []string{python, "skia/infra/bots/run_recipe.py", "${ISOLATED_OUTDIR}", recipe, props(properties), b.cfg.Project},
455		Dependencies: []string{BUNDLE_RECIPES_NAME},
456		Dimensions:   dimensions,
457		EnvPrefixes: map[string][]string{
458			"PATH":                    []string{"cipd_bin_packages", "cipd_bin_packages/bin"},
459			"VPYTHON_VIRTUALENV_ROOT": []string{"cache/vpython"},
460		},
461		ExtraTags: map[string]string{
462			"log_location": b.logdogAnnotationUrl(),
463		},
464		Isolate:        b.relpath(isolate),
465		MaxAttempts:    attempts(name),
466		Outputs:        outputs,
467		ServiceAccount: serviceAccount,
468	}
469	timeout(task, time.Hour)
470	return task
471}
472
473// internalHardwareLabel returns the internal ID for the bot, if any.
474func (b *builder) internalHardwareLabel(parts map[string]string) *int {
475	if b.cfg.InternalHardwareLabel != nil {
476		return b.cfg.InternalHardwareLabel(parts)
477	}
478	return nil
479}
480
481// linuxGceDimensions are the Swarming bot dimensions for Linux GCE instances.
482func (b *builder) linuxGceDimensions(machineType string) []string {
483	return []string{
484		// Specify CPU to avoid running builds on bots with a more unique CPU.
485		"cpu:x86-64-Haswell_GCE",
486		"gpu:none",
487		// Currently all Linux GCE tasks run on 16-CPU machines.
488		fmt.Sprintf("machine_type:%s", machineType),
489		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
490		fmt.Sprintf("pool:%s", b.cfg.Pool),
491	}
492}
493
494// dockerGceDimensions are the Swarming bot dimensions for Linux GCE instances
495// which have Docker installed.
496func (b *builder) dockerGceDimensions() []string {
497	// There's limited parallelism for WASM builds, so we can get away with the medium
498	// instance instead of the beefy large instance.
499	// Docker being installed is the most important part.
500	return append(b.linuxGceDimensions(MACHINE_TYPE_MEDIUM), "docker_installed:true")
501}
502
503// deriveCompileTaskName returns the name of a compile task based on the given
504// job name.
505func (b *builder) deriveCompileTaskName(jobName string, parts map[string]string) string {
506	if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
507		task_os := parts["os"]
508		ec := []string{}
509		if val := parts["extra_config"]; val != "" {
510			ec = strings.Split(val, "_")
511			ignore := []string{
512				"Skpbench", "AbandonGpuContext", "PreAbandonGpuContext", "Valgrind",
513				"ReleaseAndAbandonGpuContext", "CCPR", "FSAA", "FAAA", "FDAA", "NativeFonts", "GDI",
514				"NoGPUThreads", "ProcDump", "DDL1", "DDL3", "T8888", "DDLTotal", "DDLRecord", "9x9",
515				"BonusConfigs", "SkottieTracing", "SkottieWASM", "NonNVPR", "Mskp"}
516			keep := make([]string, 0, len(ec))
517			for _, part := range ec {
518				if !util.In(part, ignore) {
519					keep = append(keep, part)
520				}
521			}
522			ec = keep
523		}
524		if task_os == "Android" {
525			if !util.In("Android", ec) {
526				ec = append([]string{"Android"}, ec...)
527			}
528			task_os = "Debian9"
529		} else if task_os == "Chromecast" {
530			task_os = "Debian9"
531			ec = append([]string{"Chromecast"}, ec...)
532		} else if strings.Contains(task_os, "ChromeOS") {
533			ec = append([]string{"Chromebook", "GLES"}, ec...)
534			task_os = "Debian9"
535		} else if task_os == "iOS" {
536			ec = append([]string{task_os}, ec...)
537			task_os = "Mac"
538		} else if strings.Contains(task_os, "Win") {
539			task_os = "Win"
540		} else if strings.Contains(task_os, "Ubuntu") || strings.Contains(task_os, "Debian") {
541			task_os = "Debian9"
542		} else if strings.Contains(task_os, "Mac") {
543			task_os = "Mac"
544		}
545		jobNameMap := map[string]string{
546			"role":          "Build",
547			"os":            task_os,
548			"compiler":      parts["compiler"],
549			"target_arch":   parts["arch"],
550			"configuration": parts["configuration"],
551		}
552		if strings.Contains(jobName, "PathKit") {
553			ec = []string{"PathKit"}
554		}
555		if strings.Contains(jobName, "CanvasKit") || strings.Contains(jobName, "SkottieWASM") {
556			if parts["cpu_or_gpu"] == "CPU" {
557				ec = []string{"CanvasKit_CPU"}
558			} else {
559				ec = []string{"CanvasKit"}
560			}
561
562		}
563		if len(ec) > 0 {
564			jobNameMap["extra_config"] = strings.Join(ec, "_")
565		}
566		name, err := b.jobNameSchema.MakeJobName(jobNameMap)
567		if err != nil {
568			glog.Fatal(err)
569		}
570		return name
571	} else if parts["role"] == "BuildStats" {
572		return strings.Replace(jobName, "BuildStats", "Build", 1)
573	} else {
574		return jobName
575	}
576}
577
578// swarmDimensions generates swarming bot dimensions for the given task.
579func (b *builder) swarmDimensions(parts map[string]string) []string {
580	if b.cfg.SwarmDimensions != nil {
581		dims := b.cfg.SwarmDimensions(parts)
582		if dims != nil {
583			return dims
584		}
585	}
586	return b.defaultSwarmDimensions(parts)
587}
588
589// defaultSwarmDimensions generates default swarming bot dimensions for the given task.
590func (b *builder) defaultSwarmDimensions(parts map[string]string) []string {
591	d := map[string]string{
592		"pool": b.cfg.Pool,
593	}
594	if os, ok := parts["os"]; ok {
595		d["os"], ok = map[string]string{
596			"Android":    "Android",
597			"Chromecast": "Android",
598			"ChromeOS":   "ChromeOS",
599			"Debian9":    DEFAULT_OS_DEBIAN,
600			"Mac":        DEFAULT_OS_MAC,
601			"Mac10.13":   DEFAULT_OS_MAC,
602			"Mac10.14":   "Mac-10.14.3",
603			"Ubuntu18":   "Ubuntu-18.04",
604			"Win":        DEFAULT_OS_WIN,
605			"Win10":      "Windows-10-18362",
606			"Win2016":    DEFAULT_OS_WIN,
607			"Win7":       "Windows-7-SP1",
608			"Win8":       "Windows-8.1-SP0",
609			"iOS":        "iOS-11.4.1",
610		}[os]
611		if !ok {
612			glog.Fatalf("Entry %q not found in OS mapping.", os)
613		}
614		if os == "Win10" && parts["model"] == "Golo" {
615			// ChOps-owned machines have Windows 10 v1709.
616			d["os"] = "Windows-10-16299"
617		}
618		if os == "Mac10.14" && parts["model"] == "VMware7.1" {
619			// ChOps VMs are at a newer version of MacOS.
620			d["os"] = "Mac-10.14.4"
621		}
622		if d["os"] == DEFAULT_OS_WIN {
623			// Upgrades result in a new image but not a new OS version.
624			d["image"] = "windows-server-2016-dc-v20190108"
625		}
626		if parts["model"] == "LenovoYogaC630" {
627			// This is currently a unique snowflake.
628			d["os"] = "Windows-10"
629		}
630	} else {
631		d["os"] = DEFAULT_OS_DEBIAN
632	}
633	if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
634		if strings.Contains(parts["os"], "Android") || strings.Contains(parts["os"], "Chromecast") {
635			// For Android, the device type is a better dimension
636			// than CPU or GPU.
637			deviceInfo, ok := map[string][]string{
638				"AndroidOne":      {"sprout", "MOB30Q"},
639				"Chorizo":         {"chorizo", "1.30_109591"},
640				"GalaxyS6":        {"zerofltetmo", "NRD90M_G920TUVS6FRC1"},
641				"GalaxyS7_G930FD": {"herolte", "R16NW_G930FXXS2ERH6"}, // This is Oreo.
642				"GalaxyS9":        {"starlte", "R16NW_G960FXXU2BRJ8"}, // This is Oreo.
643				"MotoG4":          {"athene", "NPJS25.93-14.7-8"},
644				"NVIDIA_Shield":   {"foster", "OPR6.170623.010_3507953_1441.7411"},
645				"Nexus5":          {"hammerhead", "M4B30Z_3437181"},
646				"Nexus5x":         {"bullhead", "OPR6.170623.023"},
647				"Nexus7":          {"grouper", "LMY47V_1836172"}, // 2012 Nexus 7
648				"P30":             {"HWELE", "HUAWEIELE-L29"},
649				"Pixel":           {"sailfish", "PPR1.180610.009"},
650				"Pixel2XL":        {"taimen", "PPR1.180610.009"},
651				"Pixel3":          {"blueline", "PQ1A.190105.004"},
652				"TecnoSpark3Pro":  {"TECNO-KB8", "PPR1.180610.011"},
653			}[parts["model"]]
654			if !ok {
655				glog.Fatalf("Entry %q not found in Android mapping.", parts["model"])
656			}
657			d["device_type"] = deviceInfo[0]
658			d["device_os"] = deviceInfo[1]
659		} else if strings.Contains(parts["os"], "iOS") {
660			device, ok := map[string]string{
661				"iPadMini4": "iPad5,1",
662				"iPhone6":   "iPhone7,2",
663				"iPhone7":   "iPhone9,1",
664				"iPadPro":   "iPad6,3",
665			}[parts["model"]]
666			if !ok {
667				glog.Fatalf("Entry %q not found in iOS mapping.", parts["model"])
668			}
669			d["device"] = device
670		} else if strings.Contains(parts["extra_config"], "SwiftShader") {
671			if parts["model"] != "GCE" || d["os"] != DEFAULT_OS_DEBIAN || parts["cpu_or_gpu_value"] != "SwiftShader" {
672				glog.Fatalf("Please update defaultSwarmDimensions for SwiftShader %s %s %s.", parts["os"], parts["model"], parts["cpu_or_gpu_value"])
673			}
674			d["cpu"] = "x86-64-Haswell_GCE"
675			d["os"] = DEFAULT_OS_LINUX_GCE
676			d["machine_type"] = MACHINE_TYPE_SMALL
677		} else if strings.Contains(parts["extra_config"], "SKQP") && parts["cpu_or_gpu_value"] == "Emulator" {
678			if parts["model"] != "NUC7i5BNK" || d["os"] != DEFAULT_OS_DEBIAN {
679				glog.Fatalf("Please update defaultSwarmDimensions for SKQP::Emulator %s %s.", parts["os"], parts["model"])
680			}
681			d["cpu"] = "x86-64-i5-7260U"
682			d["os"] = DEFAULT_OS_DEBIAN
683			// KVM means Kernel-based Virtual Machine, that is, can this vm virtualize commands
684			// For us, this means, can we run an x86 android emulator on it.
685			// kjlubick tried running this on GCE, but it was a bit too slow on the large install.
686			// So, we run on bare metal machines in the Skolo (that should also have KVM).
687			d["kvm"] = "1"
688			d["docker_installed"] = "true"
689		} else if parts["cpu_or_gpu"] == "CPU" {
690			modelMapping, ok := map[string]map[string]string{
691				"AVX": {
692					"Golo":      "x86-64-E5-2670",
693					"VMware7.1": "x86-64-E5-2697_v2",
694				},
695				"AVX2": {
696					"GCE":            "x86-64-Haswell_GCE",
697					"MacBookAir7.2":  "x86-64-i5-5350U",
698					"MacBookPro11.5": "x86-64-i7-4870HQ",
699					"NUC5i7RYH":      "x86-64-i7-5557U",
700				},
701				"AVX512": {
702					"GCE": "x86-64-Skylake_GCE",
703				},
704				"Snapdragon850": {
705					"LenovoYogaC630": "arm64-64-Snapdragon850",
706				},
707			}[parts["cpu_or_gpu_value"]]
708			if !ok {
709				glog.Fatalf("Entry %q not found in CPU mapping.", parts["cpu_or_gpu_value"])
710			}
711			cpu, ok := modelMapping[parts["model"]]
712			if !ok {
713				glog.Fatalf("Entry %q not found in %q model mapping.", parts["model"], parts["cpu_or_gpu_value"])
714			}
715			d["cpu"] = cpu
716			if parts["model"] == "GCE" && d["os"] == DEFAULT_OS_DEBIAN {
717				d["os"] = DEFAULT_OS_LINUX_GCE
718			}
719			if parts["model"] == "GCE" && d["cpu"] == "x86-64-Haswell_GCE" {
720				d["machine_type"] = MACHINE_TYPE_MEDIUM
721			}
722		} else {
723			if strings.Contains(parts["extra_config"], "CanvasKit") {
724				// GPU is defined for the WebGL version of CanvasKit, but
725				// it can still run on a GCE instance.
726				return b.dockerGceDimensions()
727			} else if strings.Contains(parts["os"], "Win") {
728				gpu, ok := map[string]string{
729					// At some point this might use the device ID, but for now it's like Chromebooks.
730					"Adreno630":     "Adreno630",
731					"GT610":         "10de:104a-23.21.13.9101",
732					"GTX660":        "10de:11c0-25.21.14.1634",
733					"GTX960":        "10de:1401-25.21.14.1634",
734					"IntelHD4400":   "8086:0a16-20.19.15.4963",
735					"IntelIris540":  "8086:1926-25.20.100.6519",
736					"IntelIris6100": "8086:162b-20.19.15.4963",
737					"IntelIris655":  "8086:3ea5-25.20.100.6519",
738					"RadeonHD7770":  "1002:683d-24.20.13001.1010",
739					"RadeonR9M470X": "1002:6646-24.20.13001.1010",
740					"QuadroP400":    "10de:1cb3-25.21.14.1678",
741				}[parts["cpu_or_gpu_value"]]
742				if !ok {
743					glog.Fatalf("Entry %q not found in Win GPU mapping.", parts["cpu_or_gpu_value"])
744				}
745				d["gpu"] = gpu
746			} else if strings.Contains(parts["os"], "Ubuntu") || strings.Contains(parts["os"], "Debian") {
747				gpu, ok := map[string]string{
748					// Intel drivers come from CIPD, so no need to specify the version here.
749					"IntelBayTrail": "8086:0f31",
750					"IntelHD2000":   "8086:0102",
751					"IntelHD405":    "8086:22b1",
752					"IntelIris640":  "8086:5926",
753					"QuadroP400":    "10de:1cb3-430.14",
754				}[parts["cpu_or_gpu_value"]]
755				if !ok {
756					glog.Fatalf("Entry %q not found in Ubuntu GPU mapping.", parts["cpu_or_gpu_value"])
757				}
758				d["gpu"] = gpu
759			} else if strings.Contains(parts["os"], "Mac") {
760				gpu, ok := map[string]string{
761					"IntelHD6000":   "8086:1626",
762					"IntelHD615":    "8086:591e",
763					"IntelIris5100": "8086:0a2e",
764					"RadeonHD8870M": "1002:6821-4.0.20-3.2.8",
765				}[parts["cpu_or_gpu_value"]]
766				if !ok {
767					glog.Fatalf("Entry %q not found in Mac GPU mapping.", parts["cpu_or_gpu_value"])
768				}
769				d["gpu"] = gpu
770				// Yuck. We have two different types of MacMini7,1 with the same GPU but different CPUs.
771				if parts["cpu_or_gpu_value"] == "IntelIris5100" {
772					// Run all tasks on Golo machines for now.
773					d["cpu"] = "x86-64-i7-4578U"
774				}
775			} else if strings.Contains(parts["os"], "ChromeOS") {
776				version, ok := map[string]string{
777					"MaliT604":           "10575.22.0",
778					"MaliT764":           "10575.22.0",
779					"MaliT860":           "10575.22.0",
780					"PowerVRGX6250":      "10575.22.0",
781					"TegraK1":            "10575.22.0",
782					"IntelHDGraphics615": "10575.22.0",
783				}[parts["cpu_or_gpu_value"]]
784				if !ok {
785					glog.Fatalf("Entry %q not found in ChromeOS GPU mapping.", parts["cpu_or_gpu_value"])
786				}
787				d["gpu"] = parts["cpu_or_gpu_value"]
788				d["release_version"] = version
789			} else {
790				glog.Fatalf("Unknown GPU mapping for OS %q.", parts["os"])
791			}
792		}
793	} else {
794		d["gpu"] = "none"
795		if d["os"] == DEFAULT_OS_DEBIAN {
796			if strings.Contains(parts["extra_config"], "PathKit") || strings.Contains(parts["extra_config"], "CanvasKit") || strings.Contains(parts["extra_config"], "CMake") {
797				return b.dockerGceDimensions()
798			}
799			if parts["role"] == "BuildStats" {
800				// Doesn't require a lot of resources, but some steps require docker
801				return b.dockerGceDimensions()
802			}
803			// Use many-core machines for Build tasks.
804			return b.linuxGceDimensions(MACHINE_TYPE_LARGE)
805		} else if d["os"] == DEFAULT_OS_WIN {
806			// Windows CPU bots.
807			d["cpu"] = "x86-64-Haswell_GCE"
808			// Use many-core machines for Build tasks.
809			d["machine_type"] = MACHINE_TYPE_LARGE
810		} else if d["os"] == DEFAULT_OS_MAC {
811			// Mac CPU bots.
812			d["cpu"] = "x86-64-E5-2697_v2"
813		}
814	}
815
816	rv := make([]string, 0, len(d))
817	for k, v := range d {
818		rv = append(rv, fmt.Sprintf("%s:%s", k, v))
819	}
820	sort.Strings(rv)
821	return rv
822}
823
824// relpath returns the relative path to the given file from the config file.
825func (b *builder) relpath(f string) string {
826	target := filepath.Join(b.relpathTargetDir, f)
827	rv, err := filepath.Rel(b.relpathBaseDir, target)
828	if err != nil {
829		sklog.Fatal(err)
830	}
831	return rv
832}
833
834// bundleRecipes generates the task to bundle and isolate the recipes.
835func (b *builder) bundleRecipes() string {
836	pkgs := append([]*specs.CipdPackage{}, CIPD_PKGS_GIT...)
837	pkgs = append(pkgs, CIPD_PKGS_PYTHON...)
838	b.MustAddTask(BUNDLE_RECIPES_NAME, &specs.TaskSpec{
839		CipdPackages: pkgs,
840		Command: []string{
841			"/bin/bash", "skia/infra/bots/bundle_recipes.sh", specs.PLACEHOLDER_ISOLATED_OUTDIR,
842		},
843		Dimensions: b.linuxGceDimensions(MACHINE_TYPE_SMALL),
844		EnvPrefixes: map[string][]string{
845			"PATH": []string{"cipd_bin_packages", "cipd_bin_packages/bin"},
846		},
847		Idempotent: true,
848		Isolate:    b.relpath("recipes.isolate"),
849	})
850	return BUNDLE_RECIPES_NAME
851}
852
853// buildTaskDrivers generates the task to compile the task driver code to run on
854// all platforms.
855func (b *builder) buildTaskDrivers() string {
856	b.MustAddTask(BUILD_TASK_DRIVERS_NAME, &specs.TaskSpec{
857		Caches:       CACHES_GO,
858		CipdPackages: append(CIPD_PKGS_GIT, b.MustGetCipdPackageFromAsset("go")),
859		Command: []string{
860			"/bin/bash", "skia/infra/bots/build_task_drivers.sh", specs.PLACEHOLDER_ISOLATED_OUTDIR,
861		},
862		Dimensions: b.linuxGceDimensions(MACHINE_TYPE_SMALL),
863		EnvPrefixes: map[string][]string{
864			"PATH": {"cipd_bin_packages", "cipd_bin_packages/bin", "go/go/bin"},
865		},
866		Idempotent: true,
867		Isolate:    "task_drivers.isolate",
868	})
869	return BUILD_TASK_DRIVERS_NAME
870}
871
872// updateGoDeps generates the task to update Go dependencies.
873func (b *builder) updateGoDeps(name string) string {
874	cipd := append([]*specs.CipdPackage{}, CIPD_PKGS_GIT...)
875	cipd = append(cipd, b.MustGetCipdPackageFromAsset("go"))
876	cipd = append(cipd, b.MustGetCipdPackageFromAsset("protoc"))
877
878	machineType := MACHINE_TYPE_MEDIUM
879	t := &specs.TaskSpec{
880		Caches:       CACHES_GO,
881		CipdPackages: cipd,
882		Command: []string{
883			"./update_go_deps",
884			"--project_id", "skia-swarming-bots",
885			"--task_id", specs.PLACEHOLDER_TASK_ID,
886			"--task_name", name,
887			"--workdir", ".",
888			"--gerrit_project", "skia",
889			"--gerrit_url", "https://skia-review.googlesource.com",
890			"--repo", specs.PLACEHOLDER_REPO,
891			"--reviewers", "borenet@google.com",
892			"--revision", specs.PLACEHOLDER_REVISION,
893			"--patch_issue", specs.PLACEHOLDER_ISSUE,
894			"--patch_set", specs.PLACEHOLDER_PATCHSET,
895			"--patch_server", specs.PLACEHOLDER_CODEREVIEW_SERVER,
896			"--alsologtostderr",
897		},
898		Dependencies: []string{BUILD_TASK_DRIVERS_NAME},
899		Dimensions:   b.linuxGceDimensions(machineType),
900		EnvPrefixes: map[string][]string{
901			"PATH": {"cipd_bin_packages", "cipd_bin_packages/bin", "go/go/bin"},
902		},
903		Isolate:        "empty.isolate",
904		ServiceAccount: b.cfg.ServiceAccountRecreateSKPs,
905	}
906	b.MustAddTask(name, t)
907	return name
908}
909
910// isolateAssetConfig represents a task which copies a CIPD package into
911// isolate.
912type isolateAssetCfg struct {
913	cipdPkg string
914	path    string
915}
916
917// isolateCIPDAsset generates a task to isolate the given CIPD asset.
918func (b *builder) isolateCIPDAsset(name string) string {
919	asset := ISOLATE_ASSET_MAPPING[name]
920	b.MustAddTask(name, &specs.TaskSpec{
921		CipdPackages: []*specs.CipdPackage{
922			b.MustGetCipdPackageFromAsset(asset.cipdPkg),
923		},
924		Command:    []string{"/bin/cp", "-rL", asset.path, "${ISOLATED_OUTDIR}"},
925		Dimensions: b.linuxGceDimensions(MACHINE_TYPE_SMALL),
926		Idempotent: true,
927		Isolate:    b.relpath("empty.isolate"),
928	})
929	return name
930}
931
932// getIsolatedCIPDDeps returns the slice of Isolate_* tasks a given task needs.
933// This allows us to  save time on I/O bound bots, like the RPIs.
934func getIsolatedCIPDDeps(parts map[string]string) []string {
935	deps := []string{}
936	// Only do this on the RPIs for now. Other, faster machines shouldn't see much
937	// benefit and we don't need the extra complexity, for now
938	rpiOS := []string{"Android", "ChromeOS", "iOS"}
939
940	if o := parts["os"]; strings.Contains(o, "Chromecast") {
941		// Chromecasts don't have enough disk space to fit all of the content,
942		// so we do a subset of the skps.
943		deps = append(deps, ISOLATE_SKP_NAME)
944	} else if e := parts["extra_config"]; strings.Contains(e, "Skpbench") {
945		// Skpbench only needs skps
946		deps = append(deps, ISOLATE_SKP_NAME)
947		deps = append(deps, ISOLATE_MSKP_NAME)
948	} else if util.In(o, rpiOS) {
949		deps = append(deps, ISOLATE_SKP_NAME)
950		deps = append(deps, ISOLATE_SVG_NAME)
951		deps = append(deps, ISOLATE_SKIMAGE_NAME)
952	}
953
954	return deps
955}
956
957// usesGit adds attributes to tasks which use git.
958func (b *builder) usesGit(t *specs.TaskSpec, name string) {
959	t.Caches = append(t.Caches, CACHES_GIT...)
960	if !strings.Contains(name, "NoDEPS") {
961		t.Caches = append(t.Caches, CACHES_WORKDIR...)
962	}
963	t.CipdPackages = append(t.CipdPackages, CIPD_PKGS_GIT...)
964}
965
966// usesGo adds attributes to tasks which use go. Recipes should use
967// "with api.context(env=api.infra.go_env)".
968func (b *builder) usesGo(t *specs.TaskSpec, name string) {
969	t.Caches = append(t.Caches, CACHES_GO...)
970	pkg := b.MustGetCipdPackageFromAsset("go")
971	if strings.Contains(name, "Win") {
972		pkg = b.MustGetCipdPackageFromAsset("go_win")
973		pkg.Path = "go"
974	}
975	t.CipdPackages = append(t.CipdPackages, pkg)
976}
977
978// usesDocker adds attributes to tasks which use docker.
979func usesDocker(t *specs.TaskSpec, name string) {
980	if strings.Contains(name, "EMCC") || strings.Contains(name, "SKQP") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "CMake") {
981		t.Caches = append(t.Caches, CACHES_DOCKER...)
982	}
983}
984
985// timeout sets the timeout(s) for this task.
986func timeout(task *specs.TaskSpec, timeout time.Duration) {
987	task.ExecutionTimeout = timeout
988	task.IoTimeout = timeout // With kitchen, step logs don't count toward IoTimeout.
989}
990
991// attempts returns the desired MaxAttempts for this task.
992func attempts(name string) int {
993	if strings.Contains(name, "Android_Framework") || strings.Contains(name, "G3_Framework") {
994		// Both bots can be long running. No need to retry them.
995		return 1
996	}
997	if !(strings.HasPrefix(name, "Build-") || strings.HasPrefix(name, "Upload-")) {
998		for _, extraConfig := range []string{"ASAN", "MSAN", "TSAN", "UBSAN", "Valgrind"} {
999			if strings.Contains(name, extraConfig) {
1000				// Sanitizers often find non-deterministic issues that retries would hide.
1001				return 1
1002			}
1003		}
1004	}
1005	// Retry by default to hide random bot/hardware failures.
1006	return 2
1007}
1008
1009// compile generates a compile task. Returns the name of the last task in the
1010// generated chain of tasks, which the Job should add as a dependency.
1011func (b *builder) compile(name string, parts map[string]string) string {
1012	recipe := "compile"
1013	isolate := "compile.isolate"
1014	var props map[string]string
1015	needSync := false
1016	if strings.Contains(name, "NoDEPS") ||
1017		strings.Contains(name, "CMake") ||
1018		strings.Contains(name, "CommandBuffer") ||
1019		strings.Contains(name, "Flutter") ||
1020		strings.Contains(name, "ParentRevision") ||
1021		strings.Contains(name, "SKQP") {
1022		recipe = "sync_and_compile"
1023		isolate = "swarm_recipe.isolate"
1024		props = EXTRA_PROPS
1025		needSync = true
1026	}
1027	task := b.kitchenTask(name, recipe, isolate, b.cfg.ServiceAccountCompile, b.swarmDimensions(parts), props, OUTPUT_BUILD)
1028	if needSync {
1029		b.usesGit(task, name)
1030	} else {
1031		task.Idempotent = true
1032	}
1033	usesDocker(task, name)
1034
1035	// Android bots require a toolchain.
1036	if strings.Contains(name, "Android") {
1037		if strings.Contains(name, "Mac") {
1038			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("android_ndk_darwin"))
1039		} else if strings.Contains(name, "Win") {
1040			pkg := b.MustGetCipdPackageFromAsset("android_ndk_windows")
1041			pkg.Path = "n"
1042			task.CipdPackages = append(task.CipdPackages, pkg)
1043		} else if !strings.Contains(name, "SKQP") {
1044			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("android_ndk_linux"))
1045		}
1046	} else if strings.Contains(name, "Chromecast") {
1047		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("cast_toolchain"))
1048		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
1049	} else if strings.Contains(name, "Chromebook") {
1050		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_linux"))
1051		if parts["target_arch"] == "x86_64" {
1052			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_x86_64_gles"))
1053		} else if parts["target_arch"] == "arm" {
1054			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("armhf_sysroot"))
1055			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
1056		}
1057	} else if strings.Contains(name, "Debian") {
1058		if strings.Contains(name, "Clang") {
1059			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_linux"))
1060		}
1061		if parts["target_arch"] == "mips64el" || parts["target_arch"] == "loongson3a" {
1062			if parts["compiler"] != "GCC" {
1063				glog.Fatalf("mips64el toolchain is GCC, but compiler is %q in %q", parts["compiler"], name)
1064			}
1065			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("mips64el_toolchain_linux"))
1066		}
1067		if strings.Contains(name, "SwiftShader") {
1068			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("cmake_linux"))
1069		}
1070		if strings.Contains(name, "OpenCL") {
1071			task.CipdPackages = append(task.CipdPackages,
1072				b.MustGetCipdPackageFromAsset("opencl_headers"),
1073				b.MustGetCipdPackageFromAsset("opencl_ocl_icd_linux"),
1074			)
1075		}
1076	} else if strings.Contains(name, "Win") {
1077		task.Dependencies = append(task.Dependencies, b.isolateCIPDAsset(ISOLATE_WIN_TOOLCHAIN_NAME))
1078		if strings.Contains(name, "Clang") {
1079			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_win"))
1080		}
1081		if strings.Contains(name, "OpenCL") {
1082			task.CipdPackages = append(task.CipdPackages,
1083				b.MustGetCipdPackageFromAsset("opencl_headers"),
1084			)
1085		}
1086	} else if strings.Contains(name, "Mac") {
1087		task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_XCODE...)
1088		task.Caches = append(task.Caches, &specs.Cache{
1089			Name: "xcode",
1090			Path: "cache/Xcode.app",
1091		})
1092		if strings.Contains(name, "CommandBuffer") {
1093			timeout(task, 2*time.Hour)
1094		}
1095		if strings.Contains(name, "MoltenVK") {
1096			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("moltenvk"))
1097		}
1098	}
1099
1100	// Add the task.
1101	b.MustAddTask(name, task)
1102
1103	// All compile tasks are runnable as their own Job. Assert that the Job
1104	// is listed in jobs.
1105	if !util.In(name, b.jobs) {
1106		glog.Fatalf("Job %q is missing from the jobs list!", name)
1107	}
1108
1109	// Upload the skiaserve binary only for Linux Android compile bots.
1110	// See skbug.com/7399 for context.
1111	if parts["configuration"] == "Release" &&
1112		parts["extra_config"] == "Android" &&
1113		!strings.Contains(parts["os"], "Win") &&
1114		!strings.Contains(parts["os"], "Mac") {
1115		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, name)
1116		task := b.kitchenTask(uploadName, "upload_skiaserve", "swarm_recipe.isolate", b.cfg.ServiceAccountUploadBinary, b.linuxGceDimensions(MACHINE_TYPE_SMALL), EXTRA_PROPS, OUTPUT_NONE)
1117		task.Dependencies = append(task.Dependencies, name)
1118		b.MustAddTask(uploadName, task)
1119		return uploadName
1120	}
1121
1122	return name
1123}
1124
1125// recreateSKPs generates a RecreateSKPs task. Returns the name of the last
1126// task in the generated chain of tasks, which the Job should add as a
1127// dependency.
1128func (b *builder) recreateSKPs(name string) string {
1129	dims := []string{
1130		"pool:SkiaCT",
1131		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
1132	}
1133	task := b.kitchenTask(name, "recreate_skps", "swarm_recipe.isolate", b.cfg.ServiceAccountRecreateSKPs, dims, EXTRA_PROPS, OUTPUT_NONE)
1134	task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GIT...)
1135	b.usesGo(task, name)
1136	timeout(task, 4*time.Hour)
1137	b.MustAddTask(name, task)
1138	return name
1139}
1140
1141// checkGeneratedFiles verifies that no generated SKSL files have been edited
1142// by hand.
1143func (b *builder) checkGeneratedFiles(name string) string {
1144	task := b.kitchenTask(name, "check_generated_files", "swarm_recipe.isolate", b.cfg.ServiceAccountCompile, b.linuxGceDimensions(MACHINE_TYPE_LARGE), EXTRA_PROPS, OUTPUT_NONE)
1145	task.Caches = append(task.Caches, CACHES_WORKDIR...)
1146	b.usesGo(task, name)
1147	b.MustAddTask(name, task)
1148	return name
1149}
1150
1151// housekeeper generates a Housekeeper task. Returns the name of the last task
1152// in the generated chain of tasks, which the Job should add as a dependency.
1153func (b *builder) housekeeper(name string) string {
1154	task := b.kitchenTask(name, "housekeeper", "swarm_recipe.isolate", b.cfg.ServiceAccountHousekeeper, b.linuxGceDimensions(MACHINE_TYPE_SMALL), EXTRA_PROPS, OUTPUT_NONE)
1155	b.usesGit(task, name)
1156	b.MustAddTask(name, task)
1157	return name
1158}
1159
1160// androidFrameworkCompile generates an Android Framework Compile task. Returns
1161// the name of the last task in the generated chain of tasks, which the Job
1162// should add as a dependency.
1163func (b *builder) androidFrameworkCompile(name string) string {
1164	task := b.kitchenTask(name, "android_compile", "compile_android_framework.isolate", b.cfg.ServiceAccountCompile, b.linuxGceDimensions(MACHINE_TYPE_SMALL), EXTRA_PROPS, OUTPUT_NONE)
1165	timeout(task, 2*time.Hour)
1166	b.MustAddTask(name, task)
1167	return name
1168}
1169
1170// g3FrameworkCompile generates a G3 Framework Compile task. Returns
1171// the name of the last task in the generated chain of tasks, which the Job
1172// should add as a dependency.
1173func (b *builder) g3FrameworkCompile(name string) string {
1174	task := b.kitchenTask(name, "g3_compile", "compile_g3_framework.isolate", b.cfg.ServiceAccountCompile, b.linuxGceDimensions(MACHINE_TYPE_SMALL), EXTRA_PROPS, OUTPUT_NONE)
1175	timeout(task, 3*time.Hour)
1176	b.MustAddTask(name, task)
1177	return name
1178}
1179
1180// infra generates an infra_tests task. Returns the name of the last task in the
1181// generated chain of tasks, which the Job should add as a dependency.
1182func (b *builder) infra(name string) string {
1183	dims := b.linuxGceDimensions(MACHINE_TYPE_SMALL)
1184	if strings.Contains(name, "Win") {
1185		dims = []string{
1186			// Specify CPU to avoid running builds on bots with a more unique CPU.
1187			"cpu:x86-64-Haswell_GCE",
1188			"gpu:none",
1189			fmt.Sprintf("machine_type:%s", MACHINE_TYPE_MEDIUM), // We don't have any small Windows instances.
1190			fmt.Sprintf("os:%s", DEFAULT_OS_WIN),
1191			fmt.Sprintf("pool:%s", b.cfg.Pool),
1192		}
1193	}
1194	extraProps := map[string]string{
1195		"repository": specs.PLACEHOLDER_REPO,
1196	}
1197	task := b.kitchenTask(name, "infra", "infra_tests.isolate", b.cfg.ServiceAccountCompile, dims, extraProps, OUTPUT_NONE)
1198	task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GSUTIL...)
1199	task.Idempotent = true
1200	// Repos which call into Skia's gen_tasks.go should define their own
1201	// infra_tests.isolate and therefore should not use relpath().
1202	task.Isolate = "infra_tests.isolate"
1203	b.usesGit(task, name) // We don't run bot_update, but Go needs a git repo.
1204	b.usesGo(task, name)
1205	b.MustAddTask(name, task)
1206	return name
1207}
1208
1209// buildstats generates a builtstats task, which compiles code and generates
1210// statistics about the build.
1211func (b *builder) buildstats(name string, parts map[string]string, compileTaskName string) string {
1212	task := b.kitchenTask(name, "compute_buildstats", "swarm_recipe.isolate", "", b.swarmDimensions(parts), EXTRA_PROPS, OUTPUT_PERF)
1213	task.Dependencies = append(task.Dependencies, compileTaskName)
1214	task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("bloaty"))
1215	b.MustAddTask(name, task)
1216
1217	// Upload release results (for tracking in perf)
1218	// We have some jobs that are FYI (e.g. Debug-CanvasKit, tree-map generator)
1219	if strings.Contains(name, "Release") && !util.In(name, BUILD_STATS_NO_UPLOAD) {
1220		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, name)
1221		extraProps := map[string]string{
1222			"gs_bucket": b.cfg.GsBucketNano,
1223		}
1224		for k, v := range EXTRA_PROPS {
1225			extraProps[k] = v
1226		}
1227		uploadTask := b.kitchenTask(name, "upload_buildstats_results", "swarm_recipe.isolate", b.cfg.ServiceAccountUploadNano, b.linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1228		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1229		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1230		b.MustAddTask(uploadName, uploadTask)
1231		return uploadName
1232	}
1233
1234	return name
1235}
1236
1237// getParentRevisionName returns the name of a compile task which builds
1238// against a "parent" revision.
1239func getParentRevisionName(compileTaskName string, parts map[string]string) string {
1240	if parts["extra_config"] == "" {
1241		return compileTaskName + "-ParentRevision"
1242	} else {
1243		return compileTaskName + "_ParentRevision"
1244	}
1245}
1246
1247// calmbench generates a calmbench task. Returns the name of the last task in the
1248// generated chain of tasks, which the Job should add as a dependency.
1249func (b *builder) calmbench(name string, parts map[string]string, compileTaskName, compileParentName string) string {
1250	task := b.kitchenTask(name, "calmbench", "calmbench.isolate", "", b.swarmDimensions(parts), EXTRA_PROPS, OUTPUT_PERF)
1251	b.usesGit(task, name)
1252	task.Dependencies = append(task.Dependencies, compileTaskName, compileParentName, ISOLATE_SKP_NAME, ISOLATE_SVG_NAME)
1253	b.MustAddTask(name, task)
1254
1255	// Upload results if necessary.
1256	if strings.Contains(name, "Release") && b.doUpload(name) {
1257		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, name)
1258		extraProps := map[string]string{
1259			"gs_bucket": b.cfg.GsBucketCalm,
1260		}
1261		for k, v := range EXTRA_PROPS {
1262			extraProps[k] = v
1263		}
1264		uploadTask := b.kitchenTask(name, "upload_calmbench_results", "swarm_recipe.isolate", b.cfg.ServiceAccountUploadCalmbench, b.linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1265		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1266		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1267		b.MustAddTask(uploadName, uploadTask)
1268		return uploadName
1269	}
1270
1271	return name
1272}
1273
1274// doUpload indicates whether the given Job should upload its results.
1275func (b *builder) doUpload(name string) bool {
1276	for _, s := range b.cfg.NoUpload {
1277		m, err := regexp.MatchString(s, name)
1278		if err != nil {
1279			glog.Fatal(err)
1280		}
1281		if m {
1282			return false
1283		}
1284	}
1285	return true
1286}
1287
1288// test generates a Test task. Returns the name of the last task in the
1289// generated chain of tasks, which the Job should add as a dependency.
1290func (b *builder) test(name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
1291	recipe := "test"
1292	if strings.Contains(name, "SKQP") {
1293		recipe = "skqp_test"
1294		if strings.Contains(name, "Emulator") {
1295			recipe = "test_skqp_emulator"
1296		}
1297	} else if strings.Contains(name, "OpenCL") {
1298		// TODO(dogben): Longer term we may not want this to be called a "Test" task, but until we start
1299		// running hs_bench or kx, it will be easier to fit into the current job name schema.
1300		recipe = "compute_test"
1301	} else if strings.Contains(name, "PathKit") {
1302		recipe = "test_pathkit"
1303	} else if strings.Contains(name, "CanvasKit") {
1304		recipe = "test_canvaskit"
1305	} else if strings.Contains(name, "LottieWeb") {
1306		recipe = "test_lottie_web"
1307	}
1308	extraProps := map[string]string{
1309		"gold_hashes_url": b.cfg.GoldHashesURL,
1310	}
1311	for k, v := range EXTRA_PROPS {
1312		extraProps[k] = v
1313	}
1314	iid := b.internalHardwareLabel(parts)
1315	if iid != nil {
1316		extraProps["internal_hardware_label"] = strconv.Itoa(*iid)
1317	}
1318	isolate := "test_skia_bundled.isolate"
1319	if strings.Contains(name, "CanvasKit") || strings.Contains(name, "Emulator") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "PathKit") {
1320		isolate = "swarm_recipe.isolate"
1321	}
1322	task := b.kitchenTask(name, recipe, isolate, "", b.swarmDimensions(parts), extraProps, OUTPUT_TEST)
1323	task.CipdPackages = append(task.CipdPackages, pkgs...)
1324	if strings.Contains(name, "Lottie") {
1325		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("lottie-samples"))
1326	}
1327	if !strings.Contains(name, "LottieWeb") {
1328		// Test.+LottieWeb doesn't require anything in Skia to be compiled.
1329		task.Dependencies = append(task.Dependencies, compileTaskName)
1330	}
1331
1332	if strings.Contains(name, "Android_ASAN") {
1333		task.Dependencies = append(task.Dependencies, b.isolateCIPDAsset(ISOLATE_NDK_LINUX_NAME))
1334	}
1335	if strings.Contains(name, "SKQP") {
1336		if !strings.Contains(name, "Emulator") {
1337			task.Dependencies = append(task.Dependencies, b.isolateCIPDAsset(ISOLATE_GCLOUD_LINUX_NAME))
1338		}
1339	}
1340	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
1341		task.Dependencies = append(task.Dependencies, deps...)
1342	}
1343	task.Expiration = 20 * time.Hour
1344
1345	timeout(task, 4*time.Hour)
1346	if strings.Contains(parts["extra_config"], "Valgrind") {
1347		timeout(task, 9*time.Hour)
1348		task.Expiration = 48 * time.Hour
1349		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
1350		// Since Valgrind runs on the same bots as the CQ, we restrict Valgrind to a subset of the bots
1351		// to ensure there are always bots free for CQ tasks.
1352		task.Dimensions = append(task.Dimensions, "valgrind:1")
1353	} else if strings.Contains(parts["extra_config"], "MSAN") {
1354		timeout(task, 9*time.Hour)
1355	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
1356		// skia:6737
1357		timeout(task, 6*time.Hour)
1358	}
1359	b.MustAddTask(name, task)
1360
1361	// Upload results if necessary. TODO(kjlubick): If we do coverage analysis at the same
1362	// time as normal tests (which would be nice), cfg.json needs to have Coverage removed.
1363	if b.doUpload(name) {
1364		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, name)
1365		extraProps := map[string]string{
1366			"gs_bucket": b.cfg.GsBucketGm,
1367		}
1368		for k, v := range EXTRA_PROPS {
1369			extraProps[k] = v
1370		}
1371		uploadTask := b.kitchenTask(name, "upload_dm_results", "swarm_recipe.isolate", b.cfg.ServiceAccountUploadGM, b.linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1372		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1373		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1374		b.MustAddTask(uploadName, uploadTask)
1375		return uploadName
1376	}
1377
1378	return name
1379}
1380
1381// perf generates a Perf task. Returns the name of the last task in the
1382// generated chain of tasks, which the Job should add as a dependency.
1383func (b *builder) perf(name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
1384	recipe := "perf"
1385	isolate := b.relpath("perf_skia_bundled.isolate")
1386	if strings.Contains(parts["extra_config"], "Skpbench") {
1387		recipe = "skpbench"
1388		isolate = b.relpath("skpbench_skia_bundled.isolate")
1389	} else if strings.Contains(name, "PathKit") {
1390		recipe = "perf_pathkit"
1391	} else if strings.Contains(name, "CanvasKit") {
1392		recipe = "perf_canvaskit"
1393	} else if strings.Contains(name, "SkottieTracing") {
1394		recipe = "perf_skottietrace"
1395	} else if strings.Contains(name, "SkottieWASM") || strings.Contains(name, "LottieWeb") {
1396		recipe = "perf_skottiewasm_lottieweb"
1397	}
1398	task := b.kitchenTask(name, recipe, isolate, "", b.swarmDimensions(parts), EXTRA_PROPS, OUTPUT_PERF)
1399	task.CipdPackages = append(task.CipdPackages, pkgs...)
1400	if !strings.Contains(name, "LottieWeb") {
1401		// Perf.+LottieWeb doesn't require anything in Skia to be compiled.
1402		task.Dependencies = append(task.Dependencies, compileTaskName)
1403	}
1404	task.Expiration = 20 * time.Hour
1405	timeout(task, 4*time.Hour)
1406	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
1407		task.Dependencies = append(task.Dependencies, deps...)
1408	}
1409
1410	if strings.Contains(parts["extra_config"], "Valgrind") {
1411		timeout(task, 9*time.Hour)
1412		task.Expiration = 48 * time.Hour
1413		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
1414		// Since Valgrind runs on the same bots as the CQ, we restrict Valgrind to a subset of the bots
1415		// to ensure there are always bots free for CQ tasks.
1416		task.Dimensions = append(task.Dimensions, "valgrind:1")
1417	} else if strings.Contains(parts["extra_config"], "MSAN") {
1418		timeout(task, 9*time.Hour)
1419	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
1420		// skia:6737
1421		timeout(task, 6*time.Hour)
1422	} else if strings.Contains(parts["extra_config"], "SkottieWASM") || strings.Contains(parts["extra_config"], "LottieWeb") {
1423		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("node"))
1424		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("lottie-samples"))
1425		task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GIT...)
1426	} else if strings.Contains(parts["extra_config"], "Skottie") {
1427		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("lottie-samples"))
1428	}
1429	iid := b.internalHardwareLabel(parts)
1430	if iid != nil {
1431		task.Command = append(task.Command, fmt.Sprintf("internal_hardware_label=%d", *iid))
1432	}
1433	b.MustAddTask(name, task)
1434
1435	// Upload results if necessary.
1436	if strings.Contains(name, "Release") && b.doUpload(name) {
1437		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, b.jobNameSchema.Sep, name)
1438		extraProps := map[string]string{
1439			"gs_bucket": b.cfg.GsBucketNano,
1440		}
1441		for k, v := range EXTRA_PROPS {
1442			extraProps[k] = v
1443		}
1444		uploadTask := b.kitchenTask(name, "upload_nano_results", "swarm_recipe.isolate", b.cfg.ServiceAccountUploadNano, b.linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1445		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1446		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1447		b.MustAddTask(uploadName, uploadTask)
1448		return uploadName
1449	}
1450	return name
1451}
1452
1453// presubmit generates a task which runs the presubmit for this repo.
1454func (b *builder) presubmit(name string) string {
1455	extraProps := map[string]string{
1456		"category":         "cq",
1457		"patch_gerrit_url": "https://skia-review.googlesource.com",
1458		"patch_project":    "skia",
1459		"patch_ref":        specs.PLACEHOLDER_PATCH_REF,
1460		"reason":           "CQ",
1461		"repo_name":        "skia",
1462	}
1463	for k, v := range EXTRA_PROPS {
1464		extraProps[k] = v
1465	}
1466	// Use MACHINE_TYPE_LARGE because it seems to save time versus MEDIUM and we want presubmit to be
1467	// fast.
1468	task := b.kitchenTask(name, "run_presubmit", "run_recipe.isolate", b.cfg.ServiceAccountCompile, b.linuxGceDimensions(MACHINE_TYPE_LARGE), extraProps, OUTPUT_NONE)
1469	b.usesGit(task, name)
1470	task.CipdPackages = append(task.CipdPackages, &specs.CipdPackage{
1471		Name:    "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build",
1472		Path:    "recipe_bundle",
1473		Version: "refs/heads/master",
1474	})
1475	task.Dependencies = []string{} // No bundled recipes for this one.
1476	b.MustAddTask(name, task)
1477	return name
1478}
1479
1480// process generates tasks and jobs for the given job name.
1481func (b *builder) process(name string) {
1482	var priority float64 // Leave as default for most jobs.
1483	deps := []string{}
1484
1485	// Bundle Recipes.
1486	if name == BUNDLE_RECIPES_NAME {
1487		deps = append(deps, b.bundleRecipes())
1488	}
1489	if name == BUILD_TASK_DRIVERS_NAME {
1490		deps = append(deps, b.buildTaskDrivers())
1491	}
1492
1493	// Isolate CIPD assets.
1494	if _, ok := ISOLATE_ASSET_MAPPING[name]; ok {
1495		deps = append(deps, b.isolateCIPDAsset(name))
1496	}
1497
1498	parts, err := b.jobNameSchema.ParseJobName(name)
1499	if err != nil {
1500		glog.Fatal(err)
1501	}
1502
1503	// RecreateSKPs.
1504	if strings.Contains(name, "RecreateSKPs") {
1505		deps = append(deps, b.recreateSKPs(name))
1506	}
1507
1508	// Update Go Dependencies.
1509	if strings.Contains(name, "UpdateGoDeps") {
1510		// Update Go deps bot.
1511		deps = append(deps, b.updateGoDeps(name))
1512	}
1513
1514	// Infra tests.
1515	if strings.Contains(name, "Housekeeper-PerCommit-InfraTests") {
1516		deps = append(deps, b.infra(name))
1517	}
1518
1519	// Compile bots.
1520	if parts["role"] == "Build" {
1521		if parts["extra_config"] == "Android_Framework" {
1522			// Android Framework compile tasks use a different recipe.
1523			deps = append(deps, b.androidFrameworkCompile(name))
1524		} else if parts["extra_config"] == "G3_Framework" {
1525			// G3 compile tasks use a different recipe.
1526			deps = append(deps, b.g3FrameworkCompile(name))
1527		} else {
1528			deps = append(deps, b.compile(name, parts))
1529		}
1530	}
1531
1532	// Most remaining bots need a compile task.
1533	compileTaskName := b.deriveCompileTaskName(name, parts)
1534	compileTaskParts, err := b.jobNameSchema.ParseJobName(compileTaskName)
1535	if err != nil {
1536		glog.Fatal(err)
1537	}
1538	compileParentName := getParentRevisionName(compileTaskName, compileTaskParts)
1539	compileParentParts, err := b.jobNameSchema.ParseJobName(compileParentName)
1540	if err != nil {
1541		glog.Fatal(err)
1542	}
1543
1544	// These bots do not need a compile task.
1545	if parts["role"] != "Build" &&
1546		name != "Housekeeper-PerCommit-BundleRecipes" &&
1547		!strings.Contains(name, "Housekeeper-PerCommit-InfraTests") &&
1548		name != "Housekeeper-PerCommit-CheckGeneratedFiles" &&
1549		name != "Housekeeper-Nightly-UpdateGoDeps" &&
1550		name != "Housekeeper-OnDemand-Presubmit" &&
1551		name != "Housekeeper-PerCommit" &&
1552		name != BUILD_TASK_DRIVERS_NAME &&
1553		!strings.Contains(name, "Android_Framework") &&
1554		!strings.Contains(name, "G3_Framework") &&
1555		!strings.Contains(name, "RecreateSKPs") &&
1556		!strings.Contains(name, "Housekeeper-PerCommit-Isolate") &&
1557		!strings.Contains(name, "SkottieWASM") &&
1558		!strings.Contains(name, "LottieWeb") {
1559		b.compile(compileTaskName, compileTaskParts)
1560		if parts["role"] == "Calmbench" {
1561			b.compile(compileParentName, compileParentParts)
1562		}
1563	}
1564
1565	// Housekeepers.
1566	if name == "Housekeeper-PerCommit" {
1567		deps = append(deps, b.housekeeper(name))
1568	}
1569	if name == "Housekeeper-PerCommit-CheckGeneratedFiles" {
1570		deps = append(deps, b.checkGeneratedFiles(name))
1571	}
1572	if name == "Housekeeper-OnDemand-Presubmit" {
1573		priority = 1
1574		deps = append(deps, b.presubmit(name))
1575	}
1576
1577	// Common assets needed by the remaining bots.
1578
1579	pkgs := []*specs.CipdPackage{}
1580
1581	if deps := getIsolatedCIPDDeps(parts); len(deps) == 0 {
1582		// for desktop machines
1583		pkgs = []*specs.CipdPackage{
1584			b.MustGetCipdPackageFromAsset("skimage"),
1585			b.MustGetCipdPackageFromAsset("skp"),
1586			b.MustGetCipdPackageFromAsset("svg"),
1587		}
1588	}
1589
1590	if strings.Contains(name, "Ubuntu") || strings.Contains(name, "Debian") {
1591		if strings.Contains(name, "SAN") {
1592			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
1593		}
1594		if strings.Contains(name, "Vulkan") {
1595			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_sdk"))
1596		}
1597		if strings.Contains(name, "Intel") && strings.Contains(name, "GPU") {
1598			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("mesa_intel_driver_linux"))
1599		}
1600		if strings.Contains(name, "OpenCL") {
1601			pkgs = append(pkgs,
1602				b.MustGetCipdPackageFromAsset("opencl_ocl_icd_linux"),
1603				b.MustGetCipdPackageFromAsset("opencl_intel_neo_linux"),
1604			)
1605		}
1606	}
1607	if strings.Contains(name, "ProcDump") {
1608		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("procdump_win"))
1609	}
1610	if strings.Contains(name, "CanvasKit") || (parts["role"] == "Test" && strings.Contains(name, "LottieWeb")) || strings.Contains(name, "PathKit") {
1611		// Docker-based tests that don't need the standard CIPD assets
1612		pkgs = []*specs.CipdPackage{}
1613	}
1614
1615	// Test bots.
1616	if parts["role"] == "Test" {
1617		deps = append(deps, b.test(name, parts, compileTaskName, pkgs))
1618	}
1619
1620	// Perf bots.
1621	if parts["role"] == "Perf" {
1622		deps = append(deps, b.perf(name, parts, compileTaskName, pkgs))
1623	}
1624
1625	// Calmbench bots.
1626	if parts["role"] == "Calmbench" {
1627		deps = append(deps, b.calmbench(name, parts, compileTaskName, compileParentName))
1628	}
1629
1630	// Valgrind runs at a low priority so that it doesn't occupy all the bots.
1631	if strings.Contains(name, "Valgrind") {
1632		// Priority of 0.085 should result in Valgrind tasks with a blamelist of ~10 commits having the
1633		// same score as other tasks with a blamelist of 1 commit, when we have insufficient bot
1634		// capacity to run more frequently.
1635		priority = 0.085
1636	}
1637
1638	// BuildStats bots. This computes things like binary size.
1639	if parts["role"] == "BuildStats" {
1640		deps = append(deps, b.buildstats(name, parts, compileTaskName))
1641	}
1642
1643	// Add the Job spec.
1644	j := &specs.JobSpec{
1645		Priority:  priority,
1646		TaskSpecs: deps,
1647		Trigger:   specs.TRIGGER_ANY_BRANCH,
1648	}
1649	if strings.Contains(name, "-Nightly-") {
1650		j.Trigger = specs.TRIGGER_NIGHTLY
1651	} else if strings.Contains(name, "-Weekly-") {
1652		j.Trigger = specs.TRIGGER_WEEKLY
1653	} else if strings.Contains(name, "Flutter") || strings.Contains(name, "CommandBuffer") {
1654		j.Trigger = specs.TRIGGER_MASTER_ONLY
1655	} else if strings.Contains(name, "-OnDemand-") || strings.Contains(name, "Android_Framework") || strings.Contains(name, "G3_Framework") {
1656		j.Trigger = specs.TRIGGER_ON_DEMAND
1657	}
1658	b.MustAddJob(name, j)
1659}
1660
1661// TODO(borenet): The below really belongs in its own file, probably next to the
1662// builder_name_schema.json file.
1663
1664// schema is a sub-struct of JobNameSchema.
1665type schema struct {
1666	Keys         []string `json:"keys"`
1667	OptionalKeys []string `json:"optional_keys"`
1668	RecurseRoles []string `json:"recurse_roles"`
1669}
1670
1671// JobNameSchema is a struct used for (de)constructing Job names in a
1672// predictable format.
1673type JobNameSchema struct {
1674	Schema map[string]*schema `json:"builder_name_schema"`
1675	Sep    string             `json:"builder_name_sep"`
1676}
1677
1678// NewJobNameSchema returns a JobNameSchema instance based on the given JSON
1679// file.
1680func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) {
1681	var rv JobNameSchema
1682	f, err := os.Open(jsonFile)
1683	if err != nil {
1684		return nil, err
1685	}
1686	defer util.Close(f)
1687	if err := json.NewDecoder(f).Decode(&rv); err != nil {
1688		return nil, err
1689	}
1690	return &rv, nil
1691}
1692
1693// ParseJobName splits the given Job name into its component parts, according
1694// to the schema.
1695func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) {
1696	popFront := func(items []string) (string, []string, error) {
1697		if len(items) == 0 {
1698			return "", nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
1699		}
1700		return items[0], items[1:], nil
1701	}
1702
1703	result := map[string]string{}
1704
1705	var parse func(int, string, []string) ([]string, error)
1706	parse = func(depth int, role string, parts []string) ([]string, error) {
1707		s, ok := s.Schema[role]
1708		if !ok {
1709			return nil, fmt.Errorf("Invalid job name; %q is not a valid role.", role)
1710		}
1711		if depth == 0 {
1712			result["role"] = role
1713		} else {
1714			result[fmt.Sprintf("sub-role-%d", depth)] = role
1715		}
1716		var err error
1717		for _, key := range s.Keys {
1718			var value string
1719			value, parts, err = popFront(parts)
1720			if err != nil {
1721				return nil, err
1722			}
1723			result[key] = value
1724		}
1725		for _, subRole := range s.RecurseRoles {
1726			if len(parts) > 0 && parts[0] == subRole {
1727				parts, err = parse(depth+1, parts[0], parts[1:])
1728				if err != nil {
1729					return nil, err
1730				}
1731			}
1732		}
1733		for _, key := range s.OptionalKeys {
1734			if len(parts) > 0 {
1735				var value string
1736				value, parts, err = popFront(parts)
1737				if err != nil {
1738					return nil, err
1739				}
1740				result[key] = value
1741			}
1742		}
1743		if len(parts) > 0 {
1744			return nil, fmt.Errorf("Invalid job name: %s (too many parts)", n)
1745		}
1746		return parts, nil
1747	}
1748
1749	split := strings.Split(n, s.Sep)
1750	if len(split) < 2 {
1751		return nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
1752	}
1753	role := split[0]
1754	split = split[1:]
1755	_, err := parse(0, role, split)
1756	return result, err
1757}
1758
1759// MakeJobName assembles the given parts of a Job name, according to the schema.
1760func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) {
1761	rvParts := make([]string, 0, len(parts))
1762
1763	var process func(int, map[string]string) (map[string]string, error)
1764	process = func(depth int, parts map[string]string) (map[string]string, error) {
1765		roleKey := "role"
1766		if depth != 0 {
1767			roleKey = fmt.Sprintf("sub-role-%d", depth)
1768		}
1769		role, ok := parts[roleKey]
1770		if !ok {
1771			return nil, fmt.Errorf("Invalid job parts; missing key %q", roleKey)
1772		}
1773
1774		s, ok := s.Schema[role]
1775		if !ok {
1776			return nil, fmt.Errorf("Invalid job parts; unknown role %q", role)
1777		}
1778		rvParts = append(rvParts, role)
1779		delete(parts, roleKey)
1780
1781		for _, key := range s.Keys {
1782			value, ok := parts[key]
1783			if !ok {
1784				return nil, fmt.Errorf("Invalid job parts; missing %q", key)
1785			}
1786			rvParts = append(rvParts, value)
1787			delete(parts, key)
1788		}
1789
1790		if len(s.RecurseRoles) > 0 {
1791			subRoleKey := fmt.Sprintf("sub-role-%d", depth+1)
1792			subRole, ok := parts[subRoleKey]
1793			if !ok {
1794				return nil, fmt.Errorf("Invalid job parts; missing %q", subRoleKey)
1795			}
1796			rvParts = append(rvParts, subRole)
1797			delete(parts, subRoleKey)
1798			found := false
1799			for _, recurseRole := range s.RecurseRoles {
1800				if recurseRole == subRole {
1801					found = true
1802					var err error
1803					parts, err = process(depth+1, parts)
1804					if err != nil {
1805						return nil, err
1806					}
1807					break
1808				}
1809			}
1810			if !found {
1811				return nil, fmt.Errorf("Invalid job parts; unknown sub-role %q", subRole)
1812			}
1813		}
1814		for _, key := range s.OptionalKeys {
1815			if value, ok := parts[key]; ok {
1816				rvParts = append(rvParts, value)
1817				delete(parts, key)
1818			}
1819		}
1820		if len(parts) > 0 {
1821			return nil, fmt.Errorf("Invalid job parts: too many parts: %v", parts)
1822		}
1823		return parts, nil
1824	}
1825
1826	// Copy the parts map, so that we can modify at will.
1827	partsCpy := make(map[string]string, len(parts))
1828	for k, v := range parts {
1829		partsCpy[k] = v
1830	}
1831	if _, err := process(0, partsCpy); err != nil {
1832		return "", err
1833	}
1834	return strings.Join(rvParts, s.Sep), nil
1835}
1836