• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5package main
6
7/*
8	Generate the tasks.json file.
9*/
10
11import (
12	"encoding/json"
13	"flag"
14	"fmt"
15	"io/ioutil"
16	"os"
17	"path"
18	"path/filepath"
19	"regexp"
20	"runtime"
21	"sort"
22	"strconv"
23	"strings"
24	"time"
25
26	"github.com/golang/glog"
27	"go.skia.org/infra/go/sklog"
28	"go.skia.org/infra/go/util"
29	"go.skia.org/infra/task_scheduler/go/specs"
30)
31
32const (
33	BUNDLE_RECIPES_NAME        = "Housekeeper-PerCommit-BundleRecipes"
34	ISOLATE_GCLOUD_LINUX_NAME  = "Housekeeper-PerCommit-IsolateGCloudLinux"
35	ISOLATE_GO_DEPS_NAME       = "Housekeeper-PerCommit-IsolateGoDeps"
36	ISOLATE_SKIMAGE_NAME       = "Housekeeper-PerCommit-IsolateSkImage"
37	ISOLATE_SKP_NAME           = "Housekeeper-PerCommit-IsolateSKP"
38	ISOLATE_SVG_NAME           = "Housekeeper-PerCommit-IsolateSVG"
39	ISOLATE_NDK_LINUX_NAME     = "Housekeeper-PerCommit-IsolateAndroidNDKLinux"
40	ISOLATE_SDK_LINUX_NAME     = "Housekeeper-PerCommit-IsolateAndroidSDKLinux"
41	ISOLATE_WIN_TOOLCHAIN_NAME = "Housekeeper-PerCommit-IsolateWinToolchain"
42
43	DEFAULT_OS_DEBIAN    = "Debian"
44	DEFAULT_OS_LINUX_GCE = "Debian"
45	DEFAULT_OS_MAC       = "Mac"
46	DEFAULT_OS_WIN       = "Windows-2016Server"
47
48	DEFAULT_PROJECT = "skia"
49
50	// Small is a 2-core machine.
51	// TODO(dogben): Would n1-standard-1 or n1-standard-2 be sufficient?
52	MACHINE_TYPE_SMALL = "n1-highmem-2"
53	// Medium is a 16-core machine
54	MACHINE_TYPE_MEDIUM = "n1-standard-16"
55	// Large is a 64-core machine. (We use "highcpu" because we don't need more than 57GB memory for
56	// any of our tasks.)
57	MACHINE_TYPE_LARGE = "n1-highcpu-64"
58
59	// Swarming output dirs.
60	OUTPUT_NONE  = "output_ignored" // This will result in outputs not being isolated.
61	OUTPUT_BUILD = "build"
62	OUTPUT_TEST  = "test"
63	OUTPUT_PERF  = "perf"
64
65	// Name prefix for upload jobs.
66	PREFIX_UPLOAD = "Upload"
67
68	SERVICE_ACCOUNT_COMPILE            = "skia-external-compile-tasks@skia-swarming-bots.iam.gserviceaccount.com"
69	SERVICE_ACCOUNT_HOUSEKEEPER        = "skia-external-housekeeper@skia-swarming-bots.iam.gserviceaccount.com"
70	SERVICE_ACCOUNT_RECREATE_SKPS      = "skia-recreate-skps@skia-swarming-bots.iam.gserviceaccount.com"
71	SERVICE_ACCOUNT_UPDATE_GO_DEPS     = "skia-recreate-skps@skia-swarming-bots.iam.gserviceaccount.com"
72	SERVICE_ACCOUNT_UPDATE_META_CONFIG = "skia-update-meta-config@skia-swarming-bots.iam.gserviceaccount.com"
73	SERVICE_ACCOUNT_UPLOAD_BINARY      = "skia-external-binary-uploader@skia-swarming-bots.iam.gserviceaccount.com"
74	SERVICE_ACCOUNT_UPLOAD_CALMBENCH   = "skia-external-calmbench-upload@skia-swarming-bots.iam.gserviceaccount.com"
75	SERVICE_ACCOUNT_UPLOAD_GM          = "skia-external-gm-uploader@skia-swarming-bots.iam.gserviceaccount.com"
76	SERVICE_ACCOUNT_UPLOAD_NANO        = "skia-external-nano-uploader@skia-swarming-bots.iam.gserviceaccount.com"
77)
78
79var (
80	// "Constants"
81
82	// Top-level list of all jobs to run at each commit; loaded from
83	// jobs.json.
84	JOBS []string
85
86	// General configuration information.
87	CONFIG struct {
88		GsBucketGm    string   `json:"gs_bucket_gm"`
89		GoldHashesURL string   `json:"gold_hashes_url"`
90		GsBucketNano  string   `json:"gs_bucket_nano"`
91		GsBucketCalm  string   `json:"gs_bucket_calm"`
92		NoUpload      []string `json:"no_upload"`
93		Pool          string   `json:"pool"`
94	}
95
96	// alternateProject can be set in an init function to override the default project ID.
97	alternateProject string
98
99	// alternateServiceAccount can be set in an init function to override the normal service accounts.
100	// Takes one of SERVICE_ACCOUNT_* constants as an argument and returns the service account that
101	// should be used, or uses sklog.Fatal to indicate a problem.
102	alternateServiceAccount func(serviceAccountEnum string) string
103
104	// alternateSwarmDimensions can be set in an init function to override the default swarming bot
105	// dimensions for the given task.
106	alternateSwarmDimensions func(parts map[string]string) []string
107
108	// internalHardwareLabelFn can be set in an init function to provide an
109	// internal_hardware_label variable to the recipe.
110	internalHardwareLabelFn func(parts map[string]string) *int
111
112	// Defines the structure of job names.
113	jobNameSchema *JobNameSchema
114
115	// Named caches used by tasks.
116	CACHES_GIT = []*specs.Cache{
117		&specs.Cache{
118			Name: "git",
119			Path: "cache/git",
120		},
121		&specs.Cache{
122			Name: "git_cache",
123			Path: "cache/git_cache",
124		},
125	}
126	CACHES_GO = []*specs.Cache{
127		&specs.Cache{
128			Name: "go_cache",
129			Path: "cache/go_cache",
130		},
131	}
132	CACHES_WORKDIR = []*specs.Cache{
133		&specs.Cache{
134			Name: "work",
135			Path: "cache/work",
136		},
137	}
138	CACHES_DOCKER = []*specs.Cache{
139		&specs.Cache{
140			Name: "docker",
141			Path: "cache/docker",
142		},
143	}
144	// Versions of the following copied from
145	// https://chrome-internal.googlesource.com/infradata/config/+/master/configs/cr-buildbucket/swarming_task_template_canary.json#42
146	// to test the fix for chromium:836196.
147	// (In the future we may want to use versions from
148	// https://chrome-internal.googlesource.com/infradata/config/+/master/configs/cr-buildbucket/swarming_task_template.json#42)
149	// TODO(borenet): Roll these versions automatically!
150	CIPD_PKGS_PYTHON = []*specs.CipdPackage{
151		&specs.CipdPackage{
152			Name:    "infra/tools/luci/vpython/${platform}",
153			Path:    "cipd_bin_packages",
154			Version: "git_revision:96f81e737868d43124b4661cf1c325296ca04944",
155		},
156	}
157
158	CIPD_PKGS_CPYTHON = []*specs.CipdPackage{
159		&specs.CipdPackage{
160			Name:    "infra/python/cpython/${platform}",
161			Path:    "cipd_bin_packages",
162			Version: "version:2.7.14.chromium14",
163		},
164	}
165
166	CIPD_PKGS_KITCHEN = append([]*specs.CipdPackage{
167		&specs.CipdPackage{
168			Name:    "infra/tools/luci/kitchen/${platform}",
169			Path:    ".",
170			Version: "git_revision:d8f38ca9494b5af249942631f9cee45927f6b4bc",
171		},
172		&specs.CipdPackage{
173			Name:    "infra/tools/luci-auth/${platform}",
174			Path:    "cipd_bin_packages",
175			Version: "git_revision:2c805f1c716f6c5ad2126b27ec88b8585a09481e",
176		},
177	}, CIPD_PKGS_PYTHON...)
178
179	CIPD_PKGS_GIT = []*specs.CipdPackage{
180		&specs.CipdPackage{
181			Name:    "infra/git/${platform}",
182			Path:    "cipd_bin_packages",
183			Version: "version:2.17.1.chromium15",
184		},
185		&specs.CipdPackage{
186			Name:    "infra/tools/git/${platform}",
187			Path:    "cipd_bin_packages",
188			Version: "git_revision:c9c8a52bfeaf8bc00ece22fdfd447822c8fcad77",
189		},
190		&specs.CipdPackage{
191			Name:    "infra/tools/luci/git-credential-luci/${platform}",
192			Path:    "cipd_bin_packages",
193			Version: "git_revision:2c805f1c716f6c5ad2126b27ec88b8585a09481e",
194		},
195	}
196
197	CIPD_PKGS_GSUTIL = []*specs.CipdPackage{
198		&specs.CipdPackage{
199			Name:    "infra/gsutil",
200			Path:    "cipd_bin_packages",
201			Version: "version:4.28",
202		},
203	}
204
205	CIPD_PKGS_XCODE = []*specs.CipdPackage{
206		// https://chromium.googlesource.com/chromium/tools/build/+/e19b7d9390e2bb438b566515b141ed2b9ed2c7c2/scripts/slave/recipe_modules/ios/api.py#317
207		// This package is really just an installer for XCode.
208		&specs.CipdPackage{
209			Name: "infra/tools/mac_toolchain/${platform}",
210			Path: "mac_toolchain",
211			// When this is updated, also update
212			// https://skia.googlesource.com/skcms.git/+/f1e2b45d18facbae2dece3aca673fe1603077846/infra/bots/gen_tasks.go#56
213			Version: "git_revision:796d2b92cff93fc2059623ce0a66284373ceea0a",
214		},
215	}
216
217	// Flags.
218	builderNameSchemaFile = flag.String("builder_name_schema", "", "Path to the builder_name_schema.json file. If not specified, uses infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json from this repo.")
219	assetsDir             = flag.String("assets_dir", "", "Directory containing assets.")
220	cfgFile               = flag.String("cfg_file", "", "JSON file containing general configuration information.")
221	jobsFile              = flag.String("jobs", "", "JSON file containing jobs to run.")
222)
223
224// Build the LogDog annotation URL.
225func logdogAnnotationUrl() string {
226	project := DEFAULT_PROJECT
227	if alternateProject != "" {
228		project = alternateProject
229	}
230	return fmt.Sprintf("logdog://logs.chromium.org/%s/%s/+/annotations", project, specs.PLACEHOLDER_TASK_ID)
231}
232
233// Create a properties JSON string.
234func props(p map[string]string) string {
235	d := make(map[string]interface{}, len(p)+1)
236	for k, v := range p {
237		d[k] = interface{}(v)
238	}
239	d["$kitchen"] = struct {
240		DevShell bool `json:"devshell"`
241		GitAuth  bool `json:"git_auth"`
242	}{
243		DevShell: true,
244		GitAuth:  true,
245	}
246
247	j, err := json.Marshal(d)
248	if err != nil {
249		sklog.Fatal(err)
250	}
251	return strings.Replace(string(j), "\\u003c", "<", -1)
252}
253
254// kitchenTask returns a specs.TaskSpec instance which uses Kitchen to run a
255// recipe.
256func kitchenTask(name, recipe, isolate, serviceAccount string, dimensions []string, extraProps map[string]string, outputDir string) *specs.TaskSpec {
257	if serviceAccount != "" && alternateServiceAccount != nil {
258		serviceAccount = alternateServiceAccount(serviceAccount)
259	}
260	cipd := append([]*specs.CipdPackage{}, CIPD_PKGS_KITCHEN...)
261	if strings.Contains(name, "Win") {
262		cipd = append(cipd, CIPD_PKGS_CPYTHON...)
263	}
264	properties := map[string]string{
265		"buildbucket_build_id": specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID,
266		"buildername":          name,
267		"patch_issue":          specs.PLACEHOLDER_ISSUE,
268		"patch_ref":            specs.PLACEHOLDER_PATCH_REF,
269		"patch_repo":           specs.PLACEHOLDER_PATCH_REPO,
270		"patch_set":            specs.PLACEHOLDER_PATCHSET,
271		"patch_storage":        specs.PLACEHOLDER_PATCH_STORAGE,
272		"repository":           specs.PLACEHOLDER_REPO,
273		"revision":             specs.PLACEHOLDER_REVISION,
274		"swarm_out_dir":        outputDir,
275		"task_id":              specs.PLACEHOLDER_TASK_ID,
276	}
277	for k, v := range extraProps {
278		properties[k] = v
279	}
280	var outputs []string = nil
281	if outputDir != OUTPUT_NONE {
282		outputs = []string{outputDir}
283	}
284	task := &specs.TaskSpec{
285		Caches: []*specs.Cache{
286			&specs.Cache{
287				Name: "vpython",
288				Path: "cache/vpython",
289			},
290		},
291		CipdPackages: cipd,
292		Command: []string{
293			"./kitchen${EXECUTABLE_SUFFIX}", "cook",
294			"-checkout-dir", "recipe_bundle",
295			"-mode", "swarming",
296			"-luci-system-account", "system",
297			"-cache-dir", "cache",
298			"-temp-dir", "tmp",
299			"-known-gerrit-host", "android.googlesource.com",
300			"-known-gerrit-host", "boringssl.googlesource.com",
301			"-known-gerrit-host", "chromium.googlesource.com",
302			"-known-gerrit-host", "dart.googlesource.com",
303			"-known-gerrit-host", "fuchsia.googlesource.com",
304			"-known-gerrit-host", "go.googlesource.com",
305			"-known-gerrit-host", "llvm.googlesource.com",
306			"-known-gerrit-host", "skia.googlesource.com",
307			"-known-gerrit-host", "webrtc.googlesource.com",
308			"-output-result-json", "${ISOLATED_OUTDIR}/build_result_filename",
309			"-workdir", ".",
310			"-recipe", recipe,
311			"-properties", props(properties),
312			"-logdog-annotation-url", logdogAnnotationUrl(),
313		},
314		Dependencies: []string{BUNDLE_RECIPES_NAME},
315		Dimensions:   dimensions,
316		EnvPrefixes: map[string][]string{
317			"PATH":                    []string{"cipd_bin_packages", "cipd_bin_packages/bin"},
318			"VPYTHON_VIRTUALENV_ROOT": []string{"cache/vpython"},
319		},
320		ExtraTags: map[string]string{
321			"log_location": logdogAnnotationUrl(),
322		},
323		Isolate:        relpath(isolate),
324		MaxAttempts:    attempts(name),
325		Outputs:        outputs,
326		ServiceAccount: serviceAccount,
327	}
328	timeout(task, time.Hour)
329	return task
330}
331
332// internalHardwareLabel returns the internal ID for the bot, if any.
333func internalHardwareLabel(parts map[string]string) *int {
334	if internalHardwareLabelFn != nil {
335		return internalHardwareLabelFn(parts)
336	}
337	return nil
338}
339
340// linuxGceDimensions are the Swarming dimensions for Linux GCE instances.
341func linuxGceDimensions(machineType string) []string {
342	return []string{
343		// Specify CPU to avoid running builds on bots with a more unique CPU.
344		"cpu:x86-64-Haswell_GCE",
345		"gpu:none",
346		// Currently all Linux GCE tasks run on 16-CPU machines.
347		fmt.Sprintf("machine_type:%s", machineType),
348		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
349		fmt.Sprintf("pool:%s", CONFIG.Pool),
350	}
351}
352
353func dockerGceDimensions() []string {
354	// There's limited parallelism for WASM builds, so we can get away with the medium
355	// instance instead of the beefy large instance.
356	// Docker being installed is the most important part.
357	return append(linuxGceDimensions(MACHINE_TYPE_MEDIUM), "docker_installed:true")
358}
359
360// deriveCompileTaskName returns the name of a compile task based on the given
361// job name.
362func deriveCompileTaskName(jobName string, parts map[string]string) string {
363	if strings.Contains(jobName, "Bookmaker") {
364		return "Build-Debian9-GCC-x86_64-Release"
365	} else if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
366		task_os := parts["os"]
367		ec := []string{}
368		if val := parts["extra_config"]; val != "" {
369			ec = strings.Split(val, "_")
370			ignore := []string{"Skpbench", "AbandonGpuContext", "PreAbandonGpuContext", "Valgrind", "ReleaseAndAbandonGpuContext", "CCPR", "FSAA", "FAAA", "FDAA", "NativeFonts", "GDI", "NoGPUThreads", "ProcDump", "DDL1", "DDL3", "T8888", "DDLTotal", "DDLRecord", "9x9", "BonusConfigs"}
371			keep := make([]string, 0, len(ec))
372			for _, part := range ec {
373				if !util.In(part, ignore) {
374					keep = append(keep, part)
375				}
376			}
377			ec = keep
378		}
379		if task_os == "Android" {
380			if !util.In("Android", ec) {
381				ec = append([]string{"Android"}, ec...)
382			}
383			task_os = "Debian9"
384		} else if task_os == "Chromecast" {
385			task_os = "Debian9"
386			ec = append([]string{"Chromecast"}, ec...)
387		} else if strings.Contains(task_os, "ChromeOS") {
388			ec = append([]string{"Chromebook", "GLES"}, ec...)
389			task_os = "Debian9"
390		} else if task_os == "iOS" {
391			ec = append([]string{task_os}, ec...)
392			task_os = "Mac"
393		} else if strings.Contains(task_os, "Win") {
394			task_os = "Win"
395		} else if strings.Contains(task_os, "Ubuntu") || strings.Contains(task_os, "Debian") {
396			task_os = "Debian9"
397		} else if strings.Contains(task_os, "Mac") {
398			task_os = "Mac"
399		}
400		jobNameMap := map[string]string{
401			"role":          "Build",
402			"os":            task_os,
403			"compiler":      parts["compiler"],
404			"target_arch":   parts["arch"],
405			"configuration": parts["configuration"],
406		}
407		if strings.Contains(jobName, "PathKit") {
408			ec = []string{"PathKit"}
409		}
410		if strings.Contains(jobName, "CanvasKit") {
411			if parts["cpu_or_gpu"] == "CPU" {
412				ec = []string{"CanvasKit_CPU"}
413			} else {
414				ec = []string{"CanvasKit"}
415			}
416
417		}
418		if len(ec) > 0 {
419			jobNameMap["extra_config"] = strings.Join(ec, "_")
420		}
421		name, err := jobNameSchema.MakeJobName(jobNameMap)
422		if err != nil {
423			glog.Fatal(err)
424		}
425		return name
426	} else if parts["role"] == "BuildStats" {
427		return strings.Replace(jobName, "BuildStats", "Build", 1)
428	} else {
429		return jobName
430	}
431}
432
433// swarmDimensions generates swarming bot dimensions for the given task.
434func swarmDimensions(parts map[string]string) []string {
435	if alternateSwarmDimensions != nil {
436		return alternateSwarmDimensions(parts)
437	}
438	return defaultSwarmDimensions(parts)
439}
440
441// defaultSwarmDimensions generates default swarming bot dimensions for the given task.
442func defaultSwarmDimensions(parts map[string]string) []string {
443	d := map[string]string{
444		"pool": CONFIG.Pool,
445	}
446	if os, ok := parts["os"]; ok {
447		d["os"], ok = map[string]string{
448			"Android":    "Android",
449			"Chromecast": "Android",
450			"ChromeOS":   "ChromeOS",
451			"Debian9":    DEFAULT_OS_DEBIAN,
452			"Mac":        DEFAULT_OS_MAC,
453			"Mac10.13":   "Mac-10.13",
454			"Mac10.14":   "Mac-10.14",
455			"Ubuntu18":   "Ubuntu",
456			"Win":        DEFAULT_OS_WIN,
457			"Win10":      "Windows-10",
458			"Win2016":    DEFAULT_OS_WIN,
459			"Win7":       "Windows-7-SP1",
460			"Win8":       "Windows-8.1-SP0",
461			"iOS":        "iOS",
462		}[os]
463		if !ok {
464			glog.Fatalf("Entry %q not found in OS mapping.", os)
465		}
466	} else {
467		d["os"] = DEFAULT_OS_DEBIAN
468	}
469	if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
470		if strings.Contains(parts["os"], "Android") || strings.Contains(parts["os"], "Chromecast") {
471			// For Android, the device type is a better dimension
472			// than CPU or GPU.
473			deviceInfo, ok := map[string][]string{
474				"AndroidOne":      {"sprout", "MOB30Q"},
475				"Chorizo":         {"chorizo", "1.30_109591"},
476				"GalaxyS6":        {"zerofltetmo", "NRD90M_G920TUVU5FQK1"},
477				"GalaxyS7_G930FD": {"herolte", "R16NW_G930FXXS2ERH6"}, // This is Oreo.
478				"GalaxyS9":        {"starlte", "R16NW_G960FXXU2BRJ8"}, // This is Oreo.
479				"MotoG4":          {"athene", "NPJS25.93-14.7-8"},
480				"NVIDIA_Shield":   {"foster", "OPR6.170623.010"},
481				"Nexus5":          {"hammerhead", "M4B30Z_3437181"},
482				"Nexus5x":         {"bullhead", "OPR6.170623.023"},
483				"Nexus7":          {"grouper", "LMY47V_1836172"}, // 2012 Nexus 7
484				"NexusPlayer":     {"fugu", "OPR2.170623.027"},
485				"Pixel":           {"sailfish", "PPR1.180610.009"},
486				"Pixel2XL":        {"taimen", "PPR1.180610.009"},
487			}[parts["model"]]
488			if !ok {
489				glog.Fatalf("Entry %q not found in Android mapping.", parts["model"])
490			}
491			d["device_type"] = deviceInfo[0]
492			// Ignore device_os on branches. d["device_os"] = deviceInfo[1]
493		} else if strings.Contains(parts["os"], "iOS") {
494			device, ok := map[string]string{
495				"iPadMini4": "iPad5,1",
496				"iPhone6":   "iPhone7,2",
497				"iPhone7":   "iPhone9,1",
498				"iPadPro":   "iPad6,3",
499			}[parts["model"]]
500			if !ok {
501				glog.Fatalf("Entry %q not found in iOS mapping.", parts["model"])
502			}
503			d["device"] = device
504		} else if strings.Contains(parts["extra_config"], "SwiftShader") {
505			if parts["model"] != "GCE" || d["os"] != DEFAULT_OS_DEBIAN || parts["cpu_or_gpu_value"] != "SwiftShader" {
506				glog.Fatalf("Please update defaultSwarmDimensions for SwiftShader %s %s %s.", parts["os"], parts["model"], parts["cpu_or_gpu_value"])
507			}
508			d["cpu"] = "x86-64-Haswell_GCE"
509			d["os"] = DEFAULT_OS_LINUX_GCE
510			d["machine_type"] = MACHINE_TYPE_SMALL
511		} else if strings.Contains(parts["extra_config"], "SKQP") && parts["cpu_or_gpu_value"] == "Emulator" {
512			if parts["model"] != "NUC7i5BNK" || d["os"] != DEFAULT_OS_DEBIAN {
513				glog.Fatalf("Please update defaultSwarmDimensions for SKQP::Emulator %s %s.", parts["os"], parts["model"])
514			}
515			d["cpu"] = "x86-64-i5-7260U"
516			d["os"] = DEFAULT_OS_DEBIAN
517			// KVM means Kernel-based Virtual Machine, that is, can this vm virtualize commands
518			// For us, this means, can we run an x86 android emulator on it.
519			// kjlubick tried running this on GCE, but it was a bit too slow on the large install.
520			// So, we run on bare metal machines in the Skolo (that should also have KVM).
521			d["kvm"] = "1"
522			d["docker_installed"] = "true"
523		} else if parts["cpu_or_gpu"] == "CPU" {
524			modelMapping, ok := map[string]map[string]string{
525				"AVX": {
526					"Golo": "x86-64-E5-2670",
527				},
528				"AVX2": {
529					"GCE":            "x86-64-Haswell_GCE",
530					"MacBookAir7.2":  "x86-64-i5-5350U",
531					"MacBookPro11.5": "x86-64-i7-4870HQ",
532					"NUC5i7RYH":      "x86-64-i7-5557U",
533				},
534				"AVX512": {
535					"GCE": "x86-64-Skylake_GCE",
536				},
537			}[parts["cpu_or_gpu_value"]]
538			if !ok {
539				glog.Fatalf("Entry %q not found in CPU mapping.", parts["cpu_or_gpu_value"])
540			}
541			cpu, ok := modelMapping[parts["model"]]
542			if !ok {
543				glog.Fatalf("Entry %q not found in %q model mapping.", parts["model"], parts["cpu_or_gpu_value"])
544			}
545			d["cpu"] = cpu
546			if parts["model"] == "GCE" && d["os"] == DEFAULT_OS_DEBIAN {
547				d["os"] = DEFAULT_OS_LINUX_GCE
548			}
549			if parts["model"] == "GCE" && d["cpu"] == "x86-64-Haswell_GCE" {
550				d["machine_type"] = MACHINE_TYPE_MEDIUM
551			}
552		} else {
553			if strings.Contains(parts["extra_config"], "CanvasKit") {
554				// GPU is defined for the WebGL version of CanvasKit, but
555				// it can still run on a GCE instance.
556				return dockerGceDimensions()
557			} else if strings.Contains(parts["os"], "Win") {
558				gpu, ok := map[string]string{
559					"GT610":         "10de:104a",
560					"GTX660":        "10de:11c0",
561					"GTX960":        "10de:1401",
562					"IntelHD4400":   "8086:0a16",
563					"IntelIris540":  "8086:1926",
564					"IntelIris6100": "8086:162b",
565					"IntelIris655":  "8086:3ea5",
566					"RadeonHD7770":  "1002:683d",
567					"RadeonR9M470X": "1002:6646",
568					"QuadroP400":    "10de:1cb3",
569				}[parts["cpu_or_gpu_value"]]
570				if !ok {
571					glog.Fatalf("Entry %q not found in Win GPU mapping.", parts["cpu_or_gpu_value"])
572				}
573				d["gpu"] = gpu
574			} else if strings.Contains(parts["os"], "Ubuntu") || strings.Contains(parts["os"], "Debian") {
575				gpu, ok := map[string]string{
576					// Intel drivers come from CIPD, so no need to specify the version here.
577					"IntelBayTrail": "8086:0f31",
578					"IntelHD2000":   "8086:0102",
579					"IntelHD405":    "8086:22b1",
580					"IntelIris640":  "8086:5926",
581					"QuadroP400":    "10de:1cb3",
582				}[parts["cpu_or_gpu_value"]]
583				if !ok {
584					glog.Fatalf("Entry %q not found in Ubuntu GPU mapping.", parts["cpu_or_gpu_value"])
585				}
586				if parts["os"] == "Ubuntu18" && parts["cpu_or_gpu_value"] == "QuadroP400" {
587					// Ubuntu18 has a newer GPU driver.
588					gpu = "10de:1cb3"
589				}
590				d["gpu"] = gpu
591			} else if strings.Contains(parts["os"], "Mac") {
592				gpu, ok := map[string]string{
593					"IntelHD6000":   "8086:1626",
594					"IntelHD615":    "8086:591e",
595					"IntelIris5100": "8086:0a2e",
596					"RadeonHD8870M": "1002:6821",
597				}[parts["cpu_or_gpu_value"]]
598				if !ok {
599					glog.Fatalf("Entry %q not found in Mac GPU mapping.", parts["cpu_or_gpu_value"])
600				}
601				d["gpu"] = gpu
602				// Yuck. We have two different types of MacMini7,1 with the same GPU but different CPUs.
603				if parts["cpu_or_gpu_value"] == "IntelIris5100" {
604					// Run all tasks on Golo machines for now.
605					d["cpu"] = "x86-64-i7-4578U"
606				}
607			} else if strings.Contains(parts["os"], "ChromeOS") {
608				version, ok := map[string]string{
609					"MaliT604":           "10575.22.0",
610					"MaliT764":           "10575.22.0",
611					"MaliT860":           "10575.22.0",
612					"PowerVRGX6250":      "10575.22.0",
613					"TegraK1":            "10575.22.0",
614					"IntelHDGraphics615": "10575.22.0",
615				}[parts["cpu_or_gpu_value"]]
616				if !ok {
617					glog.Fatalf("Entry %q not found in ChromeOS GPU mapping.", parts["cpu_or_gpu_value"])
618				}
619				d["gpu"] = parts["cpu_or_gpu_value"]
620				d["release_version"] = version
621			} else {
622				glog.Fatalf("Unknown GPU mapping for OS %q.", parts["os"])
623			}
624		}
625	} else {
626		d["gpu"] = "none"
627		if d["os"] == DEFAULT_OS_DEBIAN {
628			if strings.Contains(parts["extra_config"], "PathKit") || strings.Contains(parts["extra_config"], "CanvasKit") || strings.Contains(parts["extra_config"], "CMake") {
629				return dockerGceDimensions()
630			}
631			if parts["role"] == "BuildStats" {
632				// Doesn't require a lot of resources, but some steps require docker
633				return dockerGceDimensions()
634			}
635			// Use many-core machines for Build tasks.
636			return linuxGceDimensions(MACHINE_TYPE_LARGE)
637		} else if d["os"] == DEFAULT_OS_WIN {
638			// Windows CPU bots.
639			d["cpu"] = "x86-64-Haswell_GCE"
640			// Use many-core machines for Build tasks.
641			d["machine_type"] = MACHINE_TYPE_LARGE
642		} else if d["os"] == DEFAULT_OS_MAC {
643			// Mac CPU bots.
644			d["cpu"] = "x86-64-E5-2697_v2"
645		}
646	}
647
648	rv := make([]string, 0, len(d))
649	for k, v := range d {
650		rv = append(rv, fmt.Sprintf("%s:%s", k, v))
651	}
652	sort.Strings(rv)
653	return rv
654}
655
656// relpath returns the relative path to the given file from the config file.
657func relpath(f string) string {
658	_, filename, _, _ := runtime.Caller(0)
659	dir := path.Dir(filename)
660	rel := dir
661	if *cfgFile != "" {
662		rel = path.Dir(*cfgFile)
663	}
664	rv, err := filepath.Rel(rel, path.Join(dir, f))
665	if err != nil {
666		sklog.Fatal(err)
667	}
668	return rv
669}
670
671// bundleRecipes generates the task to bundle and isolate the recipes.
672func bundleRecipes(b *specs.TasksCfgBuilder) string {
673	pkgs := append([]*specs.CipdPackage{}, CIPD_PKGS_GIT...)
674	pkgs = append(pkgs, CIPD_PKGS_PYTHON...)
675	b.MustAddTask(BUNDLE_RECIPES_NAME, &specs.TaskSpec{
676		CipdPackages: pkgs,
677		Command: []string{
678			"/bin/bash", "skia/infra/bots/bundle_recipes.sh", specs.PLACEHOLDER_ISOLATED_OUTDIR,
679		},
680		Dimensions: linuxGceDimensions(MACHINE_TYPE_SMALL),
681		EnvPrefixes: map[string][]string{
682			"PATH": []string{"cipd_bin_packages", "cipd_bin_packages/bin"},
683		},
684		Isolate: relpath("swarm_recipe.isolate"),
685	})
686	return BUNDLE_RECIPES_NAME
687}
688
689type isolateAssetCfg struct {
690	cipdPkg string
691	path    string
692}
693
694var ISOLATE_ASSET_MAPPING = map[string]isolateAssetCfg{
695	ISOLATE_GCLOUD_LINUX_NAME: {
696		cipdPkg: "gcloud_linux",
697		path:    "gcloud_linux",
698	},
699	ISOLATE_GO_DEPS_NAME: {
700		cipdPkg: "go_deps",
701		path:    "go_deps",
702	},
703	ISOLATE_SKIMAGE_NAME: {
704		cipdPkg: "skimage",
705		path:    "skimage",
706	},
707	ISOLATE_SKP_NAME: {
708		cipdPkg: "skp",
709		path:    "skp",
710	},
711	ISOLATE_SVG_NAME: {
712		cipdPkg: "svg",
713		path:    "svg",
714	},
715	ISOLATE_NDK_LINUX_NAME: {
716		cipdPkg: "android_ndk_linux",
717		path:    "android_ndk_linux",
718	},
719	ISOLATE_SDK_LINUX_NAME: {
720		cipdPkg: "android_sdk_linux",
721		path:    "android_sdk_linux",
722	},
723	ISOLATE_WIN_TOOLCHAIN_NAME: {
724		cipdPkg: "win_toolchain",
725		path:    "win_toolchain",
726	},
727}
728
729// isolateCIPDAsset generates a task to isolate the given CIPD asset.
730func isolateCIPDAsset(b *specs.TasksCfgBuilder, name string) string {
731	asset := ISOLATE_ASSET_MAPPING[name]
732	b.MustAddTask(name, &specs.TaskSpec{
733		CipdPackages: []*specs.CipdPackage{
734			b.MustGetCipdPackageFromAsset(asset.cipdPkg),
735		},
736		Command:    []string{"/bin/cp", "-rL", asset.path, "${ISOLATED_OUTDIR}"},
737		Dimensions: linuxGceDimensions(MACHINE_TYPE_SMALL),
738		Isolate:    relpath("empty.isolate"),
739	})
740	return name
741}
742
743// getIsolatedCIPDDeps returns the slice of Isolate_* tasks a given task needs.
744// This allows us to  save time on I/O bound bots, like the RPIs.
745func getIsolatedCIPDDeps(parts map[string]string) []string {
746	deps := []string{}
747	// Only do this on the RPIs for now. Other, faster machines shouldn't see much
748	// benefit and we don't need the extra complexity, for now
749	rpiOS := []string{"Android", "ChromeOS", "iOS"}
750
751	if o := parts["os"]; strings.Contains(o, "Chromecast") {
752		// Chromecasts don't have enough disk space to fit all of the content,
753		// so we do a subset of the skps.
754		deps = append(deps, ISOLATE_SKP_NAME)
755	} else if e := parts["extra_config"]; strings.Contains(e, "Skpbench") {
756		// Skpbench only needs skps
757		deps = append(deps, ISOLATE_SKP_NAME)
758	} else if util.In(o, rpiOS) {
759		deps = append(deps, ISOLATE_SKP_NAME)
760		deps = append(deps, ISOLATE_SVG_NAME)
761		deps = append(deps, ISOLATE_SKIMAGE_NAME)
762	}
763
764	return deps
765}
766
767// usesGit adds attributes to tasks which use git.
768func usesGit(t *specs.TaskSpec, name string) {
769	t.Caches = append(t.Caches, CACHES_GIT...)
770	if !strings.Contains(name, "NoDEPS") {
771		t.Caches = append(t.Caches, CACHES_WORKDIR...)
772	}
773	t.CipdPackages = append(t.CipdPackages, CIPD_PKGS_GIT...)
774}
775
776// usesGo adds attributes to tasks which use go. Recipes should use
777// "with api.context(env=api.infra.go_env)".
778// (Not needed for tasks that just want to run Go code from the infra repo -- instead use go_deps.)
779func usesGo(b *specs.TasksCfgBuilder, t *specs.TaskSpec) {
780	t.Caches = append(t.Caches, CACHES_GO...)
781	t.CipdPackages = append(t.CipdPackages, b.MustGetCipdPackageFromAsset("go"))
782	t.Dependencies = append(t.Dependencies, isolateCIPDAsset(b, ISOLATE_GO_DEPS_NAME))
783}
784
785// usesDocker adds attributes to tasks which use docker.
786func usesDocker(t *specs.TaskSpec, name string) {
787	if strings.Contains(name, "EMCC") || strings.Contains(name, "SKQP") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "CMake") {
788		t.Caches = append(t.Caches, CACHES_DOCKER...)
789	}
790}
791
792// timeout sets the timeout(s) for this task.
793func timeout(task *specs.TaskSpec, timeout time.Duration) {
794	task.ExecutionTimeout = timeout
795	task.IoTimeout = timeout // With kitchen, step logs don't count toward IoTimeout.
796}
797
798// attempts returns the desired MaxAttempts for this task.
799func attempts(name string) int {
800	if strings.Contains(name, "Android_Framework") {
801		// The reason for this has been lost to time.
802		return 1
803	}
804	if !(strings.HasPrefix(name, "Build-") || strings.HasPrefix(name, "Upload-")) {
805		for _, extraConfig := range []string{"ASAN", "MSAN", "TSAN", "UBSAN", "Valgrind"} {
806			if strings.Contains(name, extraConfig) {
807				// Sanitizers often find non-deterministic issues that retries would hide.
808				return 1
809			}
810		}
811	}
812	// Retry by default to hide random bot/hardware failures.
813	return 2
814}
815
816// compile generates a compile task. Returns the name of the last task in the
817// generated chain of tasks, which the Job should add as a dependency.
818func compile(b *specs.TasksCfgBuilder, name string, parts map[string]string) string {
819	task := kitchenTask(name, "compile", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, swarmDimensions(parts), nil, OUTPUT_BUILD)
820	usesGit(task, name)
821	usesDocker(task, name)
822
823	// Android bots require a toolchain.
824	if strings.Contains(name, "Android") {
825		if parts["extra_config"] == "Android_Framework" {
826			// Do not need a toolchain when building the
827			// Android Framework.
828		} else if strings.Contains(name, "Mac") {
829			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("android_ndk_darwin"))
830		} else if strings.Contains(name, "Win") {
831			pkg := b.MustGetCipdPackageFromAsset("android_ndk_windows")
832			pkg.Path = "n"
833			task.CipdPackages = append(task.CipdPackages, pkg)
834		} else if !strings.Contains(name, "SKQP") {
835			task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_NDK_LINUX_NAME))
836		}
837	} else if strings.Contains(name, "Chromecast") {
838		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("cast_toolchain"))
839		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
840	} else if strings.Contains(name, "Chromebook") {
841		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_linux"))
842		if parts["target_arch"] == "x86_64" {
843			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_x86_64_gles"))
844		} else if parts["target_arch"] == "arm" {
845			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("armhf_sysroot"))
846			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
847		}
848	} else if strings.Contains(name, "Debian") {
849		if strings.Contains(name, "Clang") {
850			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_linux"))
851		}
852		if parts["target_arch"] == "mips64el" || parts["target_arch"] == "loongson3a" {
853			if parts["compiler"] != "GCC" {
854				glog.Fatalf("mips64el toolchain is GCC, but compiler is %q in %q", parts["compiler"], name)
855			}
856			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("mips64el_toolchain_linux"))
857		}
858		if strings.Contains(name, "SwiftShader") {
859			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("cmake_linux"))
860		}
861		if strings.Contains(name, "OpenCL") {
862			task.CipdPackages = append(task.CipdPackages,
863				b.MustGetCipdPackageFromAsset("opencl_headers"),
864				b.MustGetCipdPackageFromAsset("opencl_ocl_icd_linux"),
865			)
866		}
867	} else if strings.Contains(name, "Win") {
868		task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_WIN_TOOLCHAIN_NAME))
869		if strings.Contains(name, "Clang") {
870			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_win"))
871		}
872		if strings.Contains(name, "OpenCL") {
873			task.CipdPackages = append(task.CipdPackages,
874				b.MustGetCipdPackageFromAsset("opencl_headers"),
875			)
876		}
877	} else if strings.Contains(name, "Mac") {
878		task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_XCODE...)
879		task.Caches = append(task.Caches, &specs.Cache{
880			Name: "xcode",
881			Path: "cache/Xcode.app",
882		})
883		if strings.Contains(name, "CommandBuffer") {
884			timeout(task, 2*time.Hour)
885		}
886		if strings.Contains(name, "MoltenVK") {
887			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("moltenvk"))
888		}
889	}
890
891	// Add the task.
892	b.MustAddTask(name, task)
893
894	// All compile tasks are runnable as their own Job. Assert that the Job
895	// is listed in JOBS.
896	if !util.In(name, JOBS) {
897		glog.Fatalf("Job %q is missing from the JOBS list!", name)
898	}
899
900	// Upload the skiaserve binary only for Linux Android compile bots.
901	// See skbug.com/7399 for context.
902	if parts["configuration"] == "Release" &&
903		parts["extra_config"] == "Android" &&
904		!strings.Contains(parts["os"], "Win") &&
905		!strings.Contains(parts["os"], "Mac") {
906		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
907		task := kitchenTask(uploadName, "upload_skiaserve", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_BINARY, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
908		task.Dependencies = append(task.Dependencies, name)
909		b.MustAddTask(uploadName, task)
910		return uploadName
911	}
912
913	return name
914}
915
916// recreateSKPs generates a RecreateSKPs task. Returns the name of the last
917// task in the generated chain of tasks, which the Job should add as a
918// dependency.
919func recreateSKPs(b *specs.TasksCfgBuilder, name string) string {
920	dims := []string{
921		"pool:SkiaCT",
922		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
923	}
924	task := kitchenTask(name, "recreate_skps", "swarm_recipe.isolate", SERVICE_ACCOUNT_RECREATE_SKPS, dims, nil, OUTPUT_NONE)
925	task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GIT...)
926	usesGo(b, task)
927	timeout(task, 4*time.Hour)
928	b.MustAddTask(name, task)
929	return name
930}
931
932// updateGoDEPS generates an UpdateGoDEPS task. Returns the name of the last
933// task in the generated chain of tasks, which the Job should add as a
934// dependency.
935func updateGoDEPS(b *specs.TasksCfgBuilder, name string) string {
936	dims := linuxGceDimensions(MACHINE_TYPE_LARGE)
937	task := kitchenTask(name, "update_go_deps", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPDATE_GO_DEPS, dims, nil, OUTPUT_NONE)
938	task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GIT...)
939	usesGo(b, task)
940	b.MustAddTask(name, task)
941	return name
942}
943
944// checkGeneratedFiles verifies that no generated SKSL files have been edited
945// by hand.
946func checkGeneratedFiles(b *specs.TasksCfgBuilder, name string) string {
947	task := kitchenTask(name, "check_generated_files", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_LARGE), nil, OUTPUT_NONE)
948	task.Caches = append(task.Caches, CACHES_WORKDIR...)
949	b.MustAddTask(name, task)
950	return name
951}
952
953// housekeeper generates a Housekeeper task. Returns the name of the last task
954// in the generated chain of tasks, which the Job should add as a dependency.
955func housekeeper(b *specs.TasksCfgBuilder, name string) string {
956	task := kitchenTask(name, "housekeeper", "swarm_recipe.isolate", SERVICE_ACCOUNT_HOUSEKEEPER, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
957	usesGit(task, name)
958	b.MustAddTask(name, task)
959	return name
960}
961
962// androidFrameworkCompile generates an Android Framework Compile task. Returns
963// the name of the last task in the generated chain of tasks, which the Job
964// should add as a dependency.
965func androidFrameworkCompile(b *specs.TasksCfgBuilder, name string) string {
966	task := kitchenTask(name, "android_compile", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
967	timeout(task, time.Hour)
968	b.MustAddTask(name, task)
969	return name
970}
971
972// infra generates an infra_tests task. Returns the name of the last task in the
973// generated chain of tasks, which the Job should add as a dependency.
974func infra(b *specs.TasksCfgBuilder, name string) string {
975	task := kitchenTask(name, "infra", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
976	usesGit(task, name)
977	usesGo(b, task)
978	b.MustAddTask(name, task)
979	return name
980}
981
982var BUILD_STATS_NO_UPLOAD = []string{"BuildStats-Debian9-Clang-x86_64-Release"}
983
984func buildstats(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string) string {
985	task := kitchenTask(name, "compute_buildstats", "swarm_recipe.isolate", "", swarmDimensions(parts), nil, OUTPUT_PERF)
986	task.Dependencies = append(task.Dependencies, compileTaskName)
987	task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("bloaty"))
988	b.MustAddTask(name, task)
989
990	// Upload release results (for tracking in perf)
991	// We have some jobs that are FYI (e.g. Debug-CanvasKit, tree-map generator)
992	if strings.Contains(name, "Release") && !util.In(name, BUILD_STATS_NO_UPLOAD) {
993		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
994		extraProps := map[string]string{
995			"gs_bucket": CONFIG.GsBucketNano,
996		}
997		uploadTask := kitchenTask(name, "upload_buildstats_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_NANO, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
998		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
999		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1000		b.MustAddTask(uploadName, uploadTask)
1001		return uploadName
1002	}
1003
1004	return name
1005}
1006
1007func getParentRevisionName(compileTaskName string, parts map[string]string) string {
1008	if parts["extra_config"] == "" {
1009		return compileTaskName + "-ParentRevision"
1010	} else {
1011		return compileTaskName + "_ParentRevision"
1012	}
1013}
1014
1015// calmbench generates a calmbench task. Returns the name of the last task in the
1016// generated chain of tasks, which the Job should add as a dependency.
1017func calmbench(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName, compileParentName string) string {
1018	task := kitchenTask(name, "calmbench", "calmbench.isolate", "", swarmDimensions(parts), nil, OUTPUT_PERF)
1019	usesGit(task, name)
1020	task.Dependencies = append(task.Dependencies, compileTaskName, compileParentName, ISOLATE_SKP_NAME, ISOLATE_SVG_NAME)
1021	if parts["cpu_or_gpu_value"] == "QuadroP400" {
1022		// Specify "rack" dimension for consistent test results.
1023		// See https://bugs.chromium.org/p/chromium/issues/detail?id=784662&desc=2#c34
1024		// for more context.
1025		if parts["os"] == "Ubuntu18" {
1026			task.Dimensions = append(task.Dimensions, "rack:2")
1027		} else {
1028			task.Dimensions = append(task.Dimensions, "rack:1")
1029		}
1030	}
1031	b.MustAddTask(name, task)
1032
1033	// Upload results if necessary.
1034	if strings.Contains(name, "Release") && doUpload(name) {
1035		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
1036		extraProps := map[string]string{
1037			"gs_bucket": CONFIG.GsBucketCalm,
1038		}
1039		uploadTask := kitchenTask(name, "upload_calmbench_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_CALMBENCH, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1040		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1041		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1042		b.MustAddTask(uploadName, uploadTask)
1043		return uploadName
1044	}
1045
1046	return name
1047}
1048
1049// doUpload indicates whether the given Job should upload its results.
1050func doUpload(name string) bool {
1051	for _, s := range CONFIG.NoUpload {
1052		m, err := regexp.MatchString(s, name)
1053		if err != nil {
1054			glog.Fatal(err)
1055		}
1056		if m {
1057			return false
1058		}
1059	}
1060	return true
1061}
1062
1063// test generates a Test task. Returns the name of the last task in the
1064// generated chain of tasks, which the Job should add as a dependency.
1065func test(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
1066	recipe := "test"
1067	if strings.Contains(name, "SKQP") {
1068		recipe = "skqp_test"
1069		if strings.Contains(name, "Emulator") {
1070			recipe = "test_skqp_emulator"
1071		}
1072	} else if strings.Contains(name, "OpenCL") {
1073		// TODO(dogben): Longer term we may not want this to be called a "Test" task, but until we start
1074		// running hs_bench or kx, it will be easier to fit into the current job name schema.
1075		recipe = "compute_test"
1076	} else if strings.Contains(name, "PathKit") {
1077		recipe = "test_pathkit"
1078	} else if strings.Contains(name, "CanvasKit") {
1079		recipe = "test_canvaskit"
1080	} else if strings.Contains(name, "LottieWeb") {
1081		recipe = "test_lottie_web"
1082	}
1083	extraProps := map[string]string{
1084		"gold_hashes_url": CONFIG.GoldHashesURL,
1085	}
1086	iid := internalHardwareLabel(parts)
1087	if iid != nil {
1088		extraProps["internal_hardware_label"] = strconv.Itoa(*iid)
1089	}
1090	isolate := "test_skia_bundled.isolate"
1091	if strings.Contains(name, "CanvasKit") || strings.Contains(name, "Emulator") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "PathKit") {
1092		isolate = "swarm_recipe.isolate"
1093	}
1094	task := kitchenTask(name, recipe, isolate, "", swarmDimensions(parts), extraProps, OUTPUT_TEST)
1095	task.CipdPackages = append(task.CipdPackages, pkgs...)
1096	if strings.Contains(name, "Lottie") {
1097		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("lottie-samples"))
1098	}
1099	if !strings.Contains(name, "LottieWeb") {
1100		// Test.+LottieWeb doesn't require anything in Skia to be compiled.
1101		task.Dependencies = append(task.Dependencies, compileTaskName)
1102	}
1103
1104	if strings.Contains(name, "Android_ASAN") {
1105		task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_NDK_LINUX_NAME))
1106	}
1107	if strings.Contains(name, "SKQP") {
1108		if !strings.Contains(name, "Emulator") {
1109			task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_GCLOUD_LINUX_NAME))
1110		}
1111	}
1112	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
1113		task.Dependencies = append(task.Dependencies, deps...)
1114	}
1115	task.Expiration = 20 * time.Hour
1116
1117	timeout(task, 4*time.Hour)
1118	if strings.Contains(parts["extra_config"], "Valgrind") {
1119		timeout(task, 9*time.Hour)
1120		task.Expiration = 48 * time.Hour
1121		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
1122		// Since Valgrind runs on the same bots as the CQ, we restrict Valgrind to a subset of the bots
1123		// to ensure there are always bots free for CQ tasks.
1124		task.Dimensions = append(task.Dimensions, "valgrind:1")
1125	} else if strings.Contains(parts["extra_config"], "MSAN") {
1126		timeout(task, 9*time.Hour)
1127	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
1128		// skia:6737
1129		timeout(task, 6*time.Hour)
1130	}
1131	b.MustAddTask(name, task)
1132
1133	// Upload results if necessary. TODO(kjlubick): If we do coverage analysis at the same
1134	// time as normal tests (which would be nice), cfg.json needs to have Coverage removed.
1135	if doUpload(name) {
1136		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
1137		extraProps := map[string]string{
1138			"gs_bucket": CONFIG.GsBucketGm,
1139		}
1140		uploadTask := kitchenTask(name, "upload_dm_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_GM, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1141		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1142		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1143		b.MustAddTask(uploadName, uploadTask)
1144		return uploadName
1145	}
1146
1147	return name
1148}
1149
1150// perf generates a Perf task. Returns the name of the last task in the
1151// generated chain of tasks, which the Job should add as a dependency.
1152func perf(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
1153	recipe := "perf"
1154	isolate := relpath("perf_skia_bundled.isolate")
1155	if strings.Contains(parts["extra_config"], "Skpbench") {
1156		recipe = "skpbench"
1157		isolate = relpath("skpbench_skia_bundled.isolate")
1158	} else if strings.Contains(name, "PathKit") {
1159		recipe = "perf_pathkit"
1160	} else if strings.Contains(name, "CanvasKit") {
1161		recipe = "perf_canvaskit"
1162	}
1163	task := kitchenTask(name, recipe, isolate, "", swarmDimensions(parts), nil, OUTPUT_PERF)
1164	task.CipdPackages = append(task.CipdPackages, pkgs...)
1165	task.Dependencies = append(task.Dependencies, compileTaskName)
1166	task.Expiration = 20 * time.Hour
1167	timeout(task, 4*time.Hour)
1168	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
1169		task.Dependencies = append(task.Dependencies, deps...)
1170	}
1171
1172	if strings.Contains(parts["extra_config"], "Valgrind") {
1173		timeout(task, 9*time.Hour)
1174		task.Expiration = 48 * time.Hour
1175		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
1176		// Since Valgrind runs on the same bots as the CQ, we restrict Valgrind to a subset of the bots
1177		// to ensure there are always bots free for CQ tasks.
1178		task.Dimensions = append(task.Dimensions, "valgrind:1")
1179	} else if strings.Contains(parts["extra_config"], "MSAN") {
1180		timeout(task, 9*time.Hour)
1181	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
1182		// skia:6737
1183		timeout(task, 6*time.Hour)
1184	}
1185	iid := internalHardwareLabel(parts)
1186	if iid != nil {
1187		task.Command = append(task.Command, fmt.Sprintf("internal_hardware_label=%d", *iid))
1188	}
1189	if parts["cpu_or_gpu_value"] == "QuadroP400" {
1190		// Specify "rack" dimension for consistent test results.
1191		// See https://bugs.chromium.org/p/chromium/issues/detail?id=784662&desc=2#c34
1192		// for more context.
1193		if parts["os"] == "Ubuntu18" {
1194			task.Dimensions = append(task.Dimensions, "rack:2")
1195		} else {
1196			task.Dimensions = append(task.Dimensions, "rack:1")
1197		}
1198	}
1199	b.MustAddTask(name, task)
1200
1201	// Upload results if necessary.
1202	if strings.Contains(name, "Release") && doUpload(name) {
1203		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
1204		extraProps := map[string]string{
1205			"gs_bucket": CONFIG.GsBucketNano,
1206		}
1207		uploadTask := kitchenTask(name, "upload_nano_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_NANO, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1208		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1209		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1210		b.MustAddTask(uploadName, uploadTask)
1211		return uploadName
1212	}
1213	return name
1214}
1215
1216// Run the presubmit.
1217func presubmit(b *specs.TasksCfgBuilder, name string) string {
1218	extraProps := map[string]string{
1219		"category":         "cq",
1220		"patch_gerrit_url": "https://skia-review.googlesource.com",
1221		"patch_project":    "skia",
1222		"patch_ref":        specs.PLACEHOLDER_PATCH_REF,
1223		"reason":           "CQ",
1224		"repo_name":        "skia",
1225	}
1226	// Use MACHINE_TYPE_LARGE because it seems to save time versus MEDIUM and we want presubmit to be
1227	// fast.
1228	task := kitchenTask(name, "run_presubmit", "empty.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_LARGE), extraProps, OUTPUT_NONE)
1229	usesGit(task, name)
1230	task.CipdPackages = append(task.CipdPackages, &specs.CipdPackage{
1231		Name:    "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build",
1232		Path:    "recipe_bundle",
1233		Version: "refs/heads/master",
1234	})
1235	task.Dependencies = []string{} // No bundled recipes for this one.
1236	b.MustAddTask(name, task)
1237	return name
1238}
1239
1240// process generates tasks and jobs for the given job name.
1241func process(b *specs.TasksCfgBuilder, name string) {
1242	var priority float64 // Leave as default for most jobs.
1243	deps := []string{}
1244
1245	// Bundle Recipes.
1246	if name == BUNDLE_RECIPES_NAME {
1247		deps = append(deps, bundleRecipes(b))
1248	}
1249
1250	// Isolate CIPD assets.
1251	if _, ok := ISOLATE_ASSET_MAPPING[name]; ok {
1252		deps = append(deps, isolateCIPDAsset(b, name))
1253	}
1254
1255	parts, err := jobNameSchema.ParseJobName(name)
1256	if err != nil {
1257		glog.Fatal(err)
1258	}
1259
1260	// RecreateSKPs.
1261	if strings.Contains(name, "RecreateSKPs") {
1262		deps = append(deps, recreateSKPs(b, name))
1263	}
1264
1265	// Update Go DEPS.
1266	if strings.Contains(name, "UpdateGoDEPS") {
1267		deps = append(deps, updateGoDEPS(b, name))
1268	}
1269
1270	// Infra tests.
1271	if name == "Housekeeper-PerCommit-InfraTests" {
1272		deps = append(deps, infra(b, name))
1273	}
1274
1275	// Compile bots.
1276	if parts["role"] == "Build" {
1277		if parts["extra_config"] == "Android_Framework" {
1278			// Android Framework compile tasks use a different recipe.
1279			deps = append(deps, androidFrameworkCompile(b, name))
1280		} else {
1281			deps = append(deps, compile(b, name, parts))
1282		}
1283	}
1284
1285	// Most remaining bots need a compile task.
1286	compileTaskName := deriveCompileTaskName(name, parts)
1287	compileTaskParts, err := jobNameSchema.ParseJobName(compileTaskName)
1288	if err != nil {
1289		glog.Fatal(err)
1290	}
1291	compileParentName := getParentRevisionName(compileTaskName, compileTaskParts)
1292	compileParentParts, err := jobNameSchema.ParseJobName(compileParentName)
1293	if err != nil {
1294		glog.Fatal(err)
1295	}
1296
1297	// These bots do not need a compile task.
1298	if parts["role"] != "Build" &&
1299		name != "Housekeeper-Nightly-UpdateGoDEPS" &&
1300		name != "Housekeeper-PerCommit-BundleRecipes" &&
1301		name != "Housekeeper-PerCommit-InfraTests" &&
1302		name != "Housekeeper-PerCommit-CheckGeneratedFiles" &&
1303		name != "Housekeeper-OnDemand-Presubmit" &&
1304		name != "Housekeeper-PerCommit" &&
1305		!strings.Contains(name, "Android_Framework") &&
1306		!strings.Contains(name, "RecreateSKPs") &&
1307		!strings.Contains(name, "Housekeeper-PerCommit-Isolate") &&
1308		!strings.Contains(name, "LottieWeb") {
1309		compile(b, compileTaskName, compileTaskParts)
1310		if parts["role"] == "Calmbench" {
1311			compile(b, compileParentName, compileParentParts)
1312		}
1313	}
1314
1315	// Housekeepers.
1316	if name == "Housekeeper-PerCommit" {
1317		deps = append(deps, housekeeper(b, name))
1318	}
1319	if name == "Housekeeper-PerCommit-CheckGeneratedFiles" {
1320		deps = append(deps, checkGeneratedFiles(b, name))
1321	}
1322	if name == "Housekeeper-OnDemand-Presubmit" {
1323		priority = 1
1324		deps = append(deps, presubmit(b, name))
1325	}
1326
1327	// Common assets needed by the remaining bots.
1328
1329	pkgs := []*specs.CipdPackage{}
1330
1331	if deps := getIsolatedCIPDDeps(parts); len(deps) == 0 {
1332		pkgs = []*specs.CipdPackage{
1333			b.MustGetCipdPackageFromAsset("skimage"),
1334			b.MustGetCipdPackageFromAsset("skp"),
1335			b.MustGetCipdPackageFromAsset("svg"),
1336		}
1337	}
1338
1339	if strings.Contains(name, "Ubuntu") || strings.Contains(name, "Debian") {
1340		if strings.Contains(name, "SAN") {
1341			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
1342		}
1343		if strings.Contains(name, "Vulkan") {
1344			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_sdk"))
1345		}
1346		if strings.Contains(name, "Intel") && strings.Contains(name, "GPU") {
1347			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("mesa_intel_driver_linux"))
1348		}
1349		if strings.Contains(name, "OpenCL") {
1350			pkgs = append(pkgs,
1351				b.MustGetCipdPackageFromAsset("opencl_ocl_icd_linux"),
1352				b.MustGetCipdPackageFromAsset("opencl_intel_neo_linux"),
1353			)
1354		}
1355	}
1356	if strings.Contains(name, "ProcDump") {
1357		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("procdump_win"))
1358	}
1359	if strings.Contains(name, "CanvasKit") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "PathKit") {
1360		// Docker-based tests that don't need the standard CIPD assets
1361		pkgs = []*specs.CipdPackage{}
1362	}
1363
1364	// Test bots.
1365	if parts["role"] == "Test" {
1366		deps = append(deps, test(b, name, parts, compileTaskName, pkgs))
1367	}
1368
1369	// Perf bots.
1370	if parts["role"] == "Perf" {
1371		deps = append(deps, perf(b, name, parts, compileTaskName, pkgs))
1372	}
1373
1374	// Calmbench bots.
1375	if parts["role"] == "Calmbench" {
1376		deps = append(deps, calmbench(b, name, parts, compileTaskName, compileParentName))
1377	}
1378
1379	// Valgrind runs at a low priority so that it doesn't occupy all the bots.
1380	if strings.Contains(name, "Valgrind") {
1381		// Priority of 0.085 should result in Valgrind tasks with a blamelist of ~10 commits having the
1382		// same score as other tasks with a blamelist of 1 commit, when we have insufficient bot
1383		// capacity to run more frequently.
1384		priority = 0.085
1385	}
1386
1387	// BuildStats bots. This computes things like binary size.
1388	if parts["role"] == "BuildStats" {
1389		deps = append(deps, buildstats(b, name, parts, compileTaskName))
1390	}
1391
1392	// Add the Job spec.
1393	j := &specs.JobSpec{
1394		Priority:  priority,
1395		TaskSpecs: deps,
1396		Trigger:   specs.TRIGGER_ANY_BRANCH,
1397	}
1398	if strings.Contains(name, "-Nightly-") {
1399		j.Trigger = specs.TRIGGER_NIGHTLY
1400	} else if strings.Contains(name, "-Weekly-") {
1401		j.Trigger = specs.TRIGGER_WEEKLY
1402	} else if strings.Contains(name, "Flutter") || strings.Contains(name, "CommandBuffer") {
1403		j.Trigger = specs.TRIGGER_MASTER_ONLY
1404	} else if strings.Contains(name, "-OnDemand-") || strings.Contains(name, "Android_Framework") {
1405		j.Trigger = specs.TRIGGER_ON_DEMAND
1406	}
1407	b.MustAddJob(name, j)
1408}
1409
1410func loadJson(flag *string, defaultFlag string, val interface{}) {
1411	if *flag == "" {
1412		*flag = defaultFlag
1413	}
1414	b, err := ioutil.ReadFile(*flag)
1415	if err != nil {
1416		glog.Fatal(err)
1417	}
1418	if err := json.Unmarshal(b, val); err != nil {
1419		glog.Fatal(err)
1420	}
1421}
1422
1423// Regenerate the tasks.json file.
1424func main() {
1425	b := specs.MustNewTasksCfgBuilder()
1426	b.SetAssetsDir(*assetsDir)
1427	infraBots := path.Join(b.CheckoutRoot(), "infra", "bots")
1428
1429	// Load the jobs from a JSON file.
1430	loadJson(jobsFile, path.Join(infraBots, "jobs.json"), &JOBS)
1431
1432	// Load general config information from a JSON file.
1433	loadJson(cfgFile, path.Join(infraBots, "cfg.json"), &CONFIG)
1434
1435	// Create the JobNameSchema.
1436	if *builderNameSchemaFile == "" {
1437		*builderNameSchemaFile = path.Join(b.CheckoutRoot(), "infra", "bots", "recipe_modules", "builder_name_schema", "builder_name_schema.json")
1438	}
1439	schema, err := NewJobNameSchema(*builderNameSchemaFile)
1440	if err != nil {
1441		glog.Fatal(err)
1442	}
1443	jobNameSchema = schema
1444
1445	// Create Tasks and Jobs.
1446	for _, name := range JOBS {
1447		process(b, name)
1448	}
1449
1450	b.MustFinish()
1451}
1452
1453// TODO(borenet): The below really belongs in its own file, probably next to the
1454// builder_name_schema.json file.
1455
1456// schema is a sub-struct of JobNameSchema.
1457type schema struct {
1458	Keys         []string `json:"keys"`
1459	OptionalKeys []string `json:"optional_keys"`
1460	RecurseRoles []string `json:"recurse_roles"`
1461}
1462
1463// JobNameSchema is a struct used for (de)constructing Job names in a
1464// predictable format.
1465type JobNameSchema struct {
1466	Schema map[string]*schema `json:"builder_name_schema"`
1467	Sep    string             `json:"builder_name_sep"`
1468}
1469
1470// NewJobNameSchema returns a JobNameSchema instance based on the given JSON
1471// file.
1472func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) {
1473	var rv JobNameSchema
1474	f, err := os.Open(jsonFile)
1475	if err != nil {
1476		return nil, err
1477	}
1478	defer util.Close(f)
1479	if err := json.NewDecoder(f).Decode(&rv); err != nil {
1480		return nil, err
1481	}
1482	return &rv, nil
1483}
1484
1485// ParseJobName splits the given Job name into its component parts, according
1486// to the schema.
1487func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) {
1488	popFront := func(items []string) (string, []string, error) {
1489		if len(items) == 0 {
1490			return "", nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
1491		}
1492		return items[0], items[1:], nil
1493	}
1494
1495	result := map[string]string{}
1496
1497	var parse func(int, string, []string) ([]string, error)
1498	parse = func(depth int, role string, parts []string) ([]string, error) {
1499		s, ok := s.Schema[role]
1500		if !ok {
1501			return nil, fmt.Errorf("Invalid job name; %q is not a valid role.", role)
1502		}
1503		if depth == 0 {
1504			result["role"] = role
1505		} else {
1506			result[fmt.Sprintf("sub-role-%d", depth)] = role
1507		}
1508		var err error
1509		for _, key := range s.Keys {
1510			var value string
1511			value, parts, err = popFront(parts)
1512			if err != nil {
1513				return nil, err
1514			}
1515			result[key] = value
1516		}
1517		for _, subRole := range s.RecurseRoles {
1518			if len(parts) > 0 && parts[0] == subRole {
1519				parts, err = parse(depth+1, parts[0], parts[1:])
1520				if err != nil {
1521					return nil, err
1522				}
1523			}
1524		}
1525		for _, key := range s.OptionalKeys {
1526			if len(parts) > 0 {
1527				var value string
1528				value, parts, err = popFront(parts)
1529				if err != nil {
1530					return nil, err
1531				}
1532				result[key] = value
1533			}
1534		}
1535		if len(parts) > 0 {
1536			return nil, fmt.Errorf("Invalid job name: %s (too many parts)", n)
1537		}
1538		return parts, nil
1539	}
1540
1541	split := strings.Split(n, s.Sep)
1542	if len(split) < 2 {
1543		return nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
1544	}
1545	role := split[0]
1546	split = split[1:]
1547	_, err := parse(0, role, split)
1548	return result, err
1549}
1550
1551// MakeJobName assembles the given parts of a Job name, according to the schema.
1552func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) {
1553	rvParts := make([]string, 0, len(parts))
1554
1555	var process func(int, map[string]string) (map[string]string, error)
1556	process = func(depth int, parts map[string]string) (map[string]string, error) {
1557		roleKey := "role"
1558		if depth != 0 {
1559			roleKey = fmt.Sprintf("sub-role-%d", depth)
1560		}
1561		role, ok := parts[roleKey]
1562		if !ok {
1563			return nil, fmt.Errorf("Invalid job parts; missing key %q", roleKey)
1564		}
1565
1566		s, ok := s.Schema[role]
1567		if !ok {
1568			return nil, fmt.Errorf("Invalid job parts; unknown role %q", role)
1569		}
1570		rvParts = append(rvParts, role)
1571		delete(parts, roleKey)
1572
1573		for _, key := range s.Keys {
1574			value, ok := parts[key]
1575			if !ok {
1576				return nil, fmt.Errorf("Invalid job parts; missing %q", key)
1577			}
1578			rvParts = append(rvParts, value)
1579			delete(parts, key)
1580		}
1581
1582		if len(s.RecurseRoles) > 0 {
1583			subRoleKey := fmt.Sprintf("sub-role-%d", depth+1)
1584			subRole, ok := parts[subRoleKey]
1585			if !ok {
1586				return nil, fmt.Errorf("Invalid job parts; missing %q", subRoleKey)
1587			}
1588			rvParts = append(rvParts, subRole)
1589			delete(parts, subRoleKey)
1590			found := false
1591			for _, recurseRole := range s.RecurseRoles {
1592				if recurseRole == subRole {
1593					found = true
1594					var err error
1595					parts, err = process(depth+1, parts)
1596					if err != nil {
1597						return nil, err
1598					}
1599					break
1600				}
1601			}
1602			if !found {
1603				return nil, fmt.Errorf("Invalid job parts; unknown sub-role %q", subRole)
1604			}
1605		}
1606		for _, key := range s.OptionalKeys {
1607			if value, ok := parts[key]; ok {
1608				rvParts = append(rvParts, value)
1609				delete(parts, key)
1610			}
1611		}
1612		if len(parts) > 0 {
1613			return nil, fmt.Errorf("Invalid job parts: too many parts: %v", parts)
1614		}
1615		return parts, nil
1616	}
1617
1618	// Copy the parts map, so that we can modify at will.
1619	partsCpy := make(map[string]string, len(parts))
1620	for k, v := range parts {
1621		partsCpy[k] = v
1622	}
1623	if _, err := process(0, partsCpy); err != nil {
1624		return "", err
1625	}
1626	return strings.Join(rvParts, s.Sep), nil
1627}
1628