• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5package main
6
7/*
8	Generate the tasks.json file.
9*/
10
11import (
12	"encoding/json"
13	"flag"
14	"fmt"
15	"io/ioutil"
16	"os"
17	"path"
18	"path/filepath"
19	"regexp"
20	"runtime"
21	"sort"
22	"strconv"
23	"strings"
24	"time"
25
26	"github.com/golang/glog"
27	"go.skia.org/infra/go/sklog"
28	"go.skia.org/infra/go/util"
29	"go.skia.org/infra/task_scheduler/go/specs"
30)
31
32const (
33	BUNDLE_RECIPES_NAME        = "Housekeeper-PerCommit-BundleRecipes"
34	ISOLATE_GCLOUD_LINUX_NAME  = "Housekeeper-PerCommit-IsolateGCloudLinux"
35	ISOLATE_GO_DEPS_NAME       = "Housekeeper-PerCommit-IsolateGoDeps"
36	ISOLATE_SKIMAGE_NAME       = "Housekeeper-PerCommit-IsolateSkImage"
37	ISOLATE_SKP_NAME           = "Housekeeper-PerCommit-IsolateSKP"
38	ISOLATE_SVG_NAME           = "Housekeeper-PerCommit-IsolateSVG"
39	ISOLATE_NDK_LINUX_NAME     = "Housekeeper-PerCommit-IsolateAndroidNDKLinux"
40	ISOLATE_SDK_LINUX_NAME     = "Housekeeper-PerCommit-IsolateAndroidSDKLinux"
41	ISOLATE_WIN_TOOLCHAIN_NAME = "Housekeeper-PerCommit-IsolateWinToolchain"
42
43	DEFAULT_OS_DEBIAN    = "Debian-9.4"
44	DEFAULT_OS_LINUX_GCE = DEFAULT_OS_DEBIAN
45	DEFAULT_OS_MAC       = "Mac-10.13.6"
46	DEFAULT_OS_UBUNTU    = "Ubuntu-14.04"
47	DEFAULT_OS_WIN       = "Windows-2016Server-14393"
48
49	DEFAULT_PROJECT = "skia"
50
51	// Small is a 2-core machine.
52	// TODO(dogben): Would n1-standard-1 or n1-standard-2 be sufficient?
53	MACHINE_TYPE_SMALL = "n1-highmem-2"
54	// Medium is a 16-core machine
55	MACHINE_TYPE_MEDIUM = "n1-standard-16"
56	// Large is a 64-core machine. (We use "highcpu" because we don't need more than 57GB memory for
57	// any of our tasks.)
58	MACHINE_TYPE_LARGE = "n1-highcpu-64"
59
60	// Swarming output dirs.
61	OUTPUT_NONE  = "output_ignored" // This will result in outputs not being isolated.
62	OUTPUT_BUILD = "build"
63	OUTPUT_TEST  = "test"
64	OUTPUT_PERF  = "perf"
65
66	// Name prefix for upload jobs.
67	PREFIX_UPLOAD = "Upload"
68
69	SERVICE_ACCOUNT_BOOKMAKER          = "skia-bookmaker@skia-swarming-bots.iam.gserviceaccount.com"
70	SERVICE_ACCOUNT_COMPILE            = "skia-external-compile-tasks@skia-swarming-bots.iam.gserviceaccount.com"
71	SERVICE_ACCOUNT_HOUSEKEEPER        = "skia-external-housekeeper@skia-swarming-bots.iam.gserviceaccount.com"
72	SERVICE_ACCOUNT_RECREATE_SKPS      = "skia-recreate-skps@skia-swarming-bots.iam.gserviceaccount.com"
73	SERVICE_ACCOUNT_UPDATE_GO_DEPS     = "skia-recreate-skps@skia-swarming-bots.iam.gserviceaccount.com"
74	SERVICE_ACCOUNT_UPDATE_META_CONFIG = "skia-update-meta-config@skia-swarming-bots.iam.gserviceaccount.com"
75	SERVICE_ACCOUNT_UPLOAD_BINARY      = "skia-external-binary-uploader@skia-swarming-bots.iam.gserviceaccount.com"
76	SERVICE_ACCOUNT_UPLOAD_CALMBENCH   = "skia-external-calmbench-upload@skia-swarming-bots.iam.gserviceaccount.com"
77	SERVICE_ACCOUNT_UPLOAD_GM          = "skia-external-gm-uploader@skia-swarming-bots.iam.gserviceaccount.com"
78	SERVICE_ACCOUNT_UPLOAD_NANO        = "skia-external-nano-uploader@skia-swarming-bots.iam.gserviceaccount.com"
79)
80
81var (
82	// "Constants"
83
84	// Top-level list of all jobs to run at each commit; loaded from
85	// jobs.json.
86	JOBS []string
87
88	// General configuration information.
89	CONFIG struct {
90		GsBucketGm    string   `json:"gs_bucket_gm"`
91		GoldHashesURL string   `json:"gold_hashes_url"`
92		GsBucketNano  string   `json:"gs_bucket_nano"`
93		GsBucketCalm  string   `json:"gs_bucket_calm"`
94		NoUpload      []string `json:"no_upload"`
95		Pool          string   `json:"pool"`
96	}
97
98	// alternateProject can be set in an init function to override the default project ID.
99	alternateProject string
100
101	// alternateServiceAccount can be set in an init function to override the normal service accounts.
102	// Takes one of SERVICE_ACCOUNT_* constants as an argument and returns the service account that
103	// should be used, or uses sklog.Fatal to indicate a problem.
104	alternateServiceAccount func(serviceAccountEnum string) string
105
106	// alternateSwarmDimensions can be set in an init function to override the default swarming bot
107	// dimensions for the given task.
108	alternateSwarmDimensions func(parts map[string]string) []string
109
110	// internalHardwareLabelFn can be set in an init function to provide an
111	// internal_hardware_label variable to the recipe.
112	internalHardwareLabelFn func(parts map[string]string) *int
113
114	// Defines the structure of job names.
115	jobNameSchema *JobNameSchema
116
117	// Named caches used by tasks.
118	CACHES_GIT = []*specs.Cache{
119		&specs.Cache{
120			Name: "git",
121			Path: "cache/git",
122		},
123		&specs.Cache{
124			Name: "git_cache",
125			Path: "cache/git_cache",
126		},
127	}
128	CACHES_GO = []*specs.Cache{
129		&specs.Cache{
130			Name: "go_cache",
131			Path: "cache/go_cache",
132		},
133	}
134	CACHES_WORKDIR = []*specs.Cache{
135		&specs.Cache{
136			Name: "work",
137			Path: "cache/work",
138		},
139	}
140	CACHES_DOCKER = []*specs.Cache{
141		&specs.Cache{
142			Name: "docker",
143			Path: "cache/docker",
144		},
145	}
146	// Versions of the following copied from
147	// https://chrome-internal.googlesource.com/infradata/config/+/master/configs/cr-buildbucket/swarming_task_template_canary.json#42
148	// to test the fix for chromium:836196.
149	// (In the future we may want to use versions from
150	// https://chrome-internal.googlesource.com/infradata/config/+/master/configs/cr-buildbucket/swarming_task_template.json#42)
151	// TODO(borenet): Roll these versions automatically!
152	CIPD_PKGS_PYTHON = []*specs.CipdPackage{
153		&specs.CipdPackage{
154			Name:    "infra/tools/luci/vpython/${platform}",
155			Path:    "cipd_bin_packages",
156			Version: "git_revision:96f81e737868d43124b4661cf1c325296ca04944",
157		},
158	}
159
160	CIPD_PKGS_CPYTHON = []*specs.CipdPackage{
161		&specs.CipdPackage{
162			Name:    "infra/python/cpython/${platform}",
163			Path:    "cipd_bin_packages",
164			Version: "version:2.7.14.chromium14",
165		},
166	}
167
168	CIPD_PKGS_KITCHEN = append([]*specs.CipdPackage{
169		&specs.CipdPackage{
170			Name:    "infra/tools/luci/kitchen/${platform}",
171			Path:    ".",
172			Version: "git_revision:d8f38ca9494b5af249942631f9cee45927f6b4bc",
173		},
174		&specs.CipdPackage{
175			Name:    "infra/tools/luci-auth/${platform}",
176			Path:    "cipd_bin_packages",
177			Version: "git_revision:2c805f1c716f6c5ad2126b27ec88b8585a09481e",
178		},
179	}, CIPD_PKGS_PYTHON...)
180
181	CIPD_PKGS_GIT = []*specs.CipdPackage{
182		&specs.CipdPackage{
183			Name:    "infra/git/${platform}",
184			Path:    "cipd_bin_packages",
185			Version: "version:2.17.1.chromium15",
186		},
187		&specs.CipdPackage{
188			Name:    "infra/tools/git/${platform}",
189			Path:    "cipd_bin_packages",
190			Version: "git_revision:c9c8a52bfeaf8bc00ece22fdfd447822c8fcad77",
191		},
192		&specs.CipdPackage{
193			Name:    "infra/tools/luci/git-credential-luci/${platform}",
194			Path:    "cipd_bin_packages",
195			Version: "git_revision:2c805f1c716f6c5ad2126b27ec88b8585a09481e",
196		},
197	}
198
199	CIPD_PKGS_GSUTIL = []*specs.CipdPackage{
200		&specs.CipdPackage{
201			Name:    "infra/gsutil",
202			Path:    "cipd_bin_packages",
203			Version: "version:4.28",
204		},
205	}
206
207	CIPD_PKGS_XCODE = []*specs.CipdPackage{
208		// https://chromium.googlesource.com/chromium/tools/build/+/e19b7d9390e2bb438b566515b141ed2b9ed2c7c2/scripts/slave/recipe_modules/ios/api.py#317
209		// This package is really just an installer for XCode.
210		&specs.CipdPackage{
211			Name: "infra/tools/mac_toolchain/${platform}",
212			Path: "mac_toolchain",
213			// When this is updated, also update
214			// https://skia.googlesource.com/skcms.git/+/f1e2b45d18facbae2dece3aca673fe1603077846/infra/bots/gen_tasks.go#56
215			Version: "git_revision:796d2b92cff93fc2059623ce0a66284373ceea0a",
216		},
217	}
218
219	// Flags.
220	builderNameSchemaFile = flag.String("builder_name_schema", "", "Path to the builder_name_schema.json file. If not specified, uses infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json from this repo.")
221	assetsDir             = flag.String("assets_dir", "", "Directory containing assets.")
222	cfgFile               = flag.String("cfg_file", "", "JSON file containing general configuration information.")
223	jobsFile              = flag.String("jobs", "", "JSON file containing jobs to run.")
224)
225
226// Build the LogDog annotation URL.
227func logdogAnnotationUrl() string {
228	project := DEFAULT_PROJECT
229	if alternateProject != "" {
230		project = alternateProject
231	}
232	return fmt.Sprintf("logdog://logs.chromium.org/%s/%s/+/annotations", project, specs.PLACEHOLDER_TASK_ID)
233}
234
235// Create a properties JSON string.
236func props(p map[string]string) string {
237	d := make(map[string]interface{}, len(p)+1)
238	for k, v := range p {
239		d[k] = interface{}(v)
240	}
241	d["$kitchen"] = struct {
242		DevShell bool `json:"devshell"`
243		GitAuth  bool `json:"git_auth"`
244	}{
245		DevShell: true,
246		GitAuth:  true,
247	}
248
249	j, err := json.Marshal(d)
250	if err != nil {
251		sklog.Fatal(err)
252	}
253	return strings.Replace(string(j), "\\u003c", "<", -1)
254}
255
256// kitchenTask returns a specs.TaskSpec instance which uses Kitchen to run a
257// recipe.
258func kitchenTask(name, recipe, isolate, serviceAccount string, dimensions []string, extraProps map[string]string, outputDir string) *specs.TaskSpec {
259	if serviceAccount != "" && alternateServiceAccount != nil {
260		serviceAccount = alternateServiceAccount(serviceAccount)
261	}
262	cipd := append([]*specs.CipdPackage{}, CIPD_PKGS_KITCHEN...)
263	if strings.Contains(name, "Win") {
264		cipd = append(cipd, CIPD_PKGS_CPYTHON...)
265	}
266	properties := map[string]string{
267		"buildbucket_build_id": specs.PLACEHOLDER_BUILDBUCKET_BUILD_ID,
268		"buildername":          name,
269		"patch_issue":          specs.PLACEHOLDER_ISSUE,
270		"patch_ref":            specs.PLACEHOLDER_PATCH_REF,
271		"patch_repo":           specs.PLACEHOLDER_PATCH_REPO,
272		"patch_set":            specs.PLACEHOLDER_PATCHSET,
273		"patch_storage":        specs.PLACEHOLDER_PATCH_STORAGE,
274		"repository":           specs.PLACEHOLDER_REPO,
275		"revision":             specs.PLACEHOLDER_REVISION,
276		"swarm_out_dir":        outputDir,
277	}
278	for k, v := range extraProps {
279		properties[k] = v
280	}
281	var outputs []string = nil
282	if outputDir != OUTPUT_NONE {
283		outputs = []string{outputDir}
284	}
285	task := &specs.TaskSpec{
286		Caches: []*specs.Cache{
287			&specs.Cache{
288				Name: "vpython",
289				Path: "cache/vpython",
290			},
291		},
292		CipdPackages: cipd,
293		Command: []string{
294			"./kitchen${EXECUTABLE_SUFFIX}", "cook",
295			"-checkout-dir", "recipe_bundle",
296			"-mode", "swarming",
297			"-luci-system-account", "system",
298			"-cache-dir", "cache",
299			"-temp-dir", "tmp",
300			"-known-gerrit-host", "android.googlesource.com",
301			"-known-gerrit-host", "boringssl.googlesource.com",
302			"-known-gerrit-host", "chromium.googlesource.com",
303			"-known-gerrit-host", "dart.googlesource.com",
304			"-known-gerrit-host", "fuchsia.googlesource.com",
305			"-known-gerrit-host", "go.googlesource.com",
306			"-known-gerrit-host", "llvm.googlesource.com",
307			"-known-gerrit-host", "skia.googlesource.com",
308			"-known-gerrit-host", "webrtc.googlesource.com",
309			"-output-result-json", "${ISOLATED_OUTDIR}/build_result_filename",
310			"-workdir", ".",
311			"-recipe", recipe,
312			"-properties", props(properties),
313			"-logdog-annotation-url", logdogAnnotationUrl(),
314		},
315		Dependencies: []string{BUNDLE_RECIPES_NAME},
316		Dimensions:   dimensions,
317		EnvPrefixes: map[string][]string{
318			"PATH": []string{"cipd_bin_packages", "cipd_bin_packages/bin"},
319			"VPYTHON_VIRTUALENV_ROOT": []string{"cache/vpython"},
320		},
321		ExtraTags: map[string]string{
322			"log_location": logdogAnnotationUrl(),
323		},
324		Isolate:        relpath(isolate),
325		MaxAttempts:    attempts(name),
326		Outputs:        outputs,
327		ServiceAccount: serviceAccount,
328	}
329	timeout(task, time.Hour)
330	return task
331}
332
333// internalHardwareLabel returns the internal ID for the bot, if any.
334func internalHardwareLabel(parts map[string]string) *int {
335	if internalHardwareLabelFn != nil {
336		return internalHardwareLabelFn(parts)
337	}
338	return nil
339}
340
341// linuxGceDimensions are the Swarming dimensions for Linux GCE instances.
342func linuxGceDimensions(machineType string) []string {
343	return []string{
344		// Specify CPU to avoid running builds on bots with a more unique CPU.
345		"cpu:x86-64-Haswell_GCE",
346		"gpu:none",
347		// Currently all Linux GCE tasks run on 16-CPU machines.
348		fmt.Sprintf("machine_type:%s", machineType),
349		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
350		fmt.Sprintf("pool:%s", CONFIG.Pool),
351	}
352}
353
354func dockerGceDimensions() []string {
355	// There's limited parallelism for WASM builds, so we can get away with the medium
356	// instance instead of the beefy large instance.
357	// Docker being installed is the most important part.
358	return append(linuxGceDimensions(MACHINE_TYPE_MEDIUM), "docker_installed:true")
359}
360
361// deriveCompileTaskName returns the name of a compile task based on the given
362// job name.
363func deriveCompileTaskName(jobName string, parts map[string]string) string {
364	if strings.Contains(jobName, "Bookmaker") {
365		return "Build-Debian9-GCC-x86_64-Release"
366	} else if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
367		task_os := parts["os"]
368		ec := []string{}
369		if val := parts["extra_config"]; val != "" {
370			ec = strings.Split(val, "_")
371			ignore := []string{"Skpbench", "AbandonGpuContext", "PreAbandonGpuContext", "Valgrind", "ReleaseAndAbandonGpuContext", "CCPR", "FSAA", "FAAA", "FDAA", "NativeFonts", "GDI", "NoGPUThreads", "ProcDump", "DDL1", "DDL3", "T8888", "DDLTotal", "DDLRecord", "9x9", "BonusConfigs"}
372			keep := make([]string, 0, len(ec))
373			for _, part := range ec {
374				if !util.In(part, ignore) {
375					keep = append(keep, part)
376				}
377			}
378			ec = keep
379		}
380		if task_os == "Android" {
381			if !util.In("Android", ec) {
382				ec = append([]string{"Android"}, ec...)
383			}
384			task_os = "Debian9"
385		} else if task_os == "Chromecast" {
386			task_os = "Debian9"
387			ec = append([]string{"Chromecast"}, ec...)
388		} else if strings.Contains(task_os, "ChromeOS") {
389			ec = append([]string{"Chromebook", "GLES"}, ec...)
390			task_os = "Debian9"
391		} else if task_os == "iOS" {
392			ec = append([]string{task_os}, ec...)
393			task_os = "Mac"
394		} else if strings.Contains(task_os, "Win") {
395			task_os = "Win"
396		} else if strings.Contains(task_os, "Ubuntu") || strings.Contains(task_os, "Debian") {
397			task_os = "Debian9"
398		}
399		jobNameMap := map[string]string{
400			"role":          "Build",
401			"os":            task_os,
402			"compiler":      parts["compiler"],
403			"target_arch":   parts["arch"],
404			"configuration": parts["configuration"],
405		}
406		if strings.Contains(jobName, "PathKit") {
407			ec = []string{"PathKit"}
408		}
409		if strings.Contains(jobName, "CanvasKit") {
410			if parts["cpu_or_gpu"] == "CPU" {
411				ec = []string{"CanvasKit_CPU"}
412			} else {
413				ec = []string{"CanvasKit"}
414			}
415
416		}
417		if len(ec) > 0 {
418			jobNameMap["extra_config"] = strings.Join(ec, "_")
419		}
420		name, err := jobNameSchema.MakeJobName(jobNameMap)
421		if err != nil {
422			glog.Fatal(err)
423		}
424		return name
425	} else if parts["role"] == "BuildStats" {
426		return strings.Replace(jobName, "BuildStats", "Build", 1)
427	} else {
428		return jobName
429	}
430}
431
432// swarmDimensions generates swarming bot dimensions for the given task.
433func swarmDimensions(parts map[string]string) []string {
434	if alternateSwarmDimensions != nil {
435		return alternateSwarmDimensions(parts)
436	}
437	return defaultSwarmDimensions(parts)
438}
439
440// defaultSwarmDimensions generates default swarming bot dimensions for the given task.
441func defaultSwarmDimensions(parts map[string]string) []string {
442	d := map[string]string{
443		"pool": CONFIG.Pool,
444	}
445	if os, ok := parts["os"]; ok {
446		d["os"], ok = map[string]string{
447			"Android":    "Android",
448			"Chromecast": "Android",
449			"ChromeOS":   "ChromeOS",
450			"Debian9":    DEFAULT_OS_DEBIAN,
451			"Mac":        DEFAULT_OS_MAC,
452			"Ubuntu14":   DEFAULT_OS_UBUNTU,
453			"Ubuntu17":   "Ubuntu-17.04",
454			"Ubuntu18":   "Ubuntu-18.04",
455			"Win":        DEFAULT_OS_WIN,
456			"Win10":      "Windows-10-17763.195",
457			"Win2k8":     "Windows-2008ServerR2-SP1",
458			"Win2016":    DEFAULT_OS_WIN,
459			"Win7":       "Windows-7-SP1",
460			"Win8":       "Windows-8.1-SP0",
461			"iOS":        "iOS-11.4.1",
462		}[os]
463		if !ok {
464			glog.Fatalf("Entry %q not found in OS mapping.", os)
465		}
466		if os == "Win10" && parts["model"] == "Golo" {
467			// ChOps-owned machines have Windows 10 v1709, but a slightly different version than Skolo.
468			d["os"] = "Windows-10-16299.309"
469		}
470		if d["os"] == DEFAULT_OS_WIN {
471			// TODO(dogben): Temporarily add image dimension during upgrade.
472			d["image"] = "windows-server-2016-dc-v20190108"
473		}
474	} else {
475		d["os"] = DEFAULT_OS_DEBIAN
476	}
477	if parts["role"] == "Test" || parts["role"] == "Perf" || parts["role"] == "Calmbench" {
478		if strings.Contains(parts["os"], "Android") || strings.Contains(parts["os"], "Chromecast") {
479			// For Android, the device type is a better dimension
480			// than CPU or GPU.
481			deviceInfo, ok := map[string][]string{
482				"AndroidOne":      {"sprout", "MOB30Q"},
483				"Chorizo":         {"chorizo", "1.30_109591"},
484				"GalaxyS6":        {"zerofltetmo", "NRD90M_G920TUVU5FQK1"},
485				"GalaxyS7_G930FD": {"herolte", "R16NW_G930FXXS2ERH6"}, // This is Oreo.
486				"GalaxyS9":        {"starlte", "R16NW_G960FXXU2BRJ8"}, // This is Oreo.
487				"MotoG4":          {"athene", "NPJS25.93-14.7-8"},
488				"NVIDIA_Shield":   {"foster", "OPR6.170623.010"},
489				"Nexus5":          {"hammerhead", "M4B30Z_3437181"},
490				"Nexus5x":         {"bullhead", "OPR6.170623.023"},
491				"Nexus7":          {"grouper", "LMY47V_1836172"}, // 2012 Nexus 7
492				"NexusPlayer":     {"fugu", "OPR2.170623.027"},
493				"Pixel":           {"sailfish", "PPR1.180610.009"},
494				"Pixel2XL":        {"taimen", "PPR1.180610.009"},
495			}[parts["model"]]
496			if !ok {
497				glog.Fatalf("Entry %q not found in Android mapping.", parts["model"])
498			}
499			d["device_type"] = deviceInfo[0]
500			d["device_os"] = deviceInfo[1]
501		} else if strings.Contains(parts["os"], "iOS") {
502			device, ok := map[string]string{
503				"iPadMini4": "iPad5,1",
504				"iPhone6":   "iPhone7,2",
505				"iPhone7":   "iPhone9,1",
506				"iPadPro":   "iPad6,3",
507			}[parts["model"]]
508			if !ok {
509				glog.Fatalf("Entry %q not found in iOS mapping.", parts["model"])
510			}
511			d["device"] = device
512		} else if strings.Contains(parts["extra_config"], "SwiftShader") {
513			if parts["model"] != "GCE" || d["os"] != DEFAULT_OS_DEBIAN || parts["cpu_or_gpu_value"] != "SwiftShader" {
514				glog.Fatalf("Please update defaultSwarmDimensions for SwiftShader %s %s %s.", parts["os"], parts["model"], parts["cpu_or_gpu_value"])
515			}
516			d["cpu"] = "x86-64-Haswell_GCE"
517			d["os"] = DEFAULT_OS_LINUX_GCE
518			d["machine_type"] = MACHINE_TYPE_SMALL
519		} else if strings.Contains(parts["extra_config"], "SKQP") && parts["cpu_or_gpu_value"] == "Emulator" {
520			if parts["model"] != "NUC7i5BNK" || d["os"] != DEFAULT_OS_DEBIAN {
521				glog.Fatalf("Please update defaultSwarmDimensions for SKQP::Emulator %s %s.", parts["os"], parts["model"])
522			}
523			d["cpu"] = "x86-64-i5-7260U"
524			d["os"] = "Debian-9.4"
525			// KVM means Kernel-based Virtual Machine, that is, can this vm virtualize commands
526			// For us, this means, can we run an x86 android emulator on it.
527			// kjlubick tried running this on GCE, but it was a bit too slow on the large install.
528			// So, we run on bare metal machines in the Skolo (that should also have KVM).
529			d["kvm"] = "1"
530			d["docker_installed"] = "true"
531		} else if parts["cpu_or_gpu"] == "CPU" {
532			modelMapping, ok := map[string]map[string]string{
533				"AVX": {
534					"Golo": "x86-64-E5-2670",
535				},
536				"AVX2": {
537					"GCE":            "x86-64-Haswell_GCE",
538					"MacBookPro11.5": "x86-64-i7-4870HQ",
539					"NUC5i7RYH":      "x86-64-i7-5557U",
540				},
541				"AVX512": {
542					"GCE": "x86-64-Skylake_GCE",
543				},
544			}[parts["cpu_or_gpu_value"]]
545			if !ok {
546				glog.Fatalf("Entry %q not found in CPU mapping.", parts["cpu_or_gpu_value"])
547			}
548			cpu, ok := modelMapping[parts["model"]]
549			if !ok {
550				glog.Fatalf("Entry %q not found in %q model mapping.", parts["model"], parts["cpu_or_gpu_value"])
551			}
552			d["cpu"] = cpu
553			if parts["model"] == "GCE" && d["os"] == DEFAULT_OS_DEBIAN {
554				d["os"] = DEFAULT_OS_LINUX_GCE
555			}
556			if parts["model"] == "GCE" && d["cpu"] == "x86-64-Haswell_GCE" {
557				d["machine_type"] = MACHINE_TYPE_MEDIUM
558			}
559		} else {
560			if strings.Contains(parts["extra_config"], "CanvasKit") {
561				// GPU is defined for the WebGL version of CanvasKit, but
562				// it can still run on a GCE instance.
563				return dockerGceDimensions()
564			} else if strings.Contains(parts["os"], "Win") {
565				gpu, ok := map[string]string{
566					"GT610":         "10de:104a-23.21.13.9101",
567					"GTX660":        "10de:11c0-25.21.14.1634",
568					"GTX960":        "10de:1401-25.21.14.1634",
569					"IntelHD4400":   "8086:0a16-20.19.15.4963",
570					"IntelIris540":  "8086:1926-25.20.100.6444",
571					"IntelIris6100": "8086:162b-20.19.15.4963",
572					"IntelIris655":  "8086:3ea5-25.20.100.6444",
573					"RadeonHD7770":  "1002:683d-24.20.13001.1010",
574					"RadeonR9M470X": "1002:6646-24.20.13001.1010",
575					"QuadroP400":    "10de:1cb3-25.21.14.1678",
576				}[parts["cpu_or_gpu_value"]]
577				if !ok {
578					glog.Fatalf("Entry %q not found in Win GPU mapping.", parts["cpu_or_gpu_value"])
579				}
580				d["gpu"] = gpu
581			} else if strings.Contains(parts["os"], "Ubuntu") || strings.Contains(parts["os"], "Debian") {
582				gpu, ok := map[string]string{
583					// Intel drivers come from CIPD, so no need to specify the version here.
584					"IntelBayTrail": "8086:0f31",
585					"IntelHD2000":   "8086:0102",
586					"IntelHD405":    "8086:22b1",
587					"IntelIris640":  "8086:5926",
588					"QuadroP400":    "10de:1cb3-384.59",
589				}[parts["cpu_or_gpu_value"]]
590				if !ok {
591					glog.Fatalf("Entry %q not found in Ubuntu GPU mapping.", parts["cpu_or_gpu_value"])
592				}
593				if parts["os"] == "Ubuntu18" && parts["cpu_or_gpu_value"] == "QuadroP400" {
594					// Ubuntu18 has a slightly newer GPU driver.
595					gpu = "10de:1cb3-390.87"
596				}
597				d["gpu"] = gpu
598			} else if strings.Contains(parts["os"], "Mac") {
599				gpu, ok := map[string]string{
600					"IntelHD6000":   "8086:1626",
601					"IntelHD615":    "8086:591e",
602					"IntelIris5100": "8086:0a2e",
603					"RadeonHD8870M": "1002:6821-4.0.20-3.2.8",
604				}[parts["cpu_or_gpu_value"]]
605				if !ok {
606					glog.Fatalf("Entry %q not found in Mac GPU mapping.", parts["cpu_or_gpu_value"])
607				}
608				d["gpu"] = gpu
609				// Yuck. We have two different types of MacMini7,1 with the same GPU but different CPUs.
610				if parts["cpu_or_gpu_value"] == "IntelIris5100" {
611					// Run all tasks on Golo machines for now.
612					d["cpu"] = "x86-64-i7-4578U"
613				}
614			} else if strings.Contains(parts["os"], "ChromeOS") {
615				version, ok := map[string]string{
616					"MaliT604":           "10575.22.0",
617					"MaliT764":           "10575.22.0",
618					"MaliT860":           "10575.22.0",
619					"PowerVRGX6250":      "10575.22.0",
620					"TegraK1":            "10575.22.0",
621					"IntelHDGraphics615": "10575.22.0",
622				}[parts["cpu_or_gpu_value"]]
623				if !ok {
624					glog.Fatalf("Entry %q not found in ChromeOS GPU mapping.", parts["cpu_or_gpu_value"])
625				}
626				d["gpu"] = parts["cpu_or_gpu_value"]
627				d["release_version"] = version
628			} else {
629				glog.Fatalf("Unknown GPU mapping for OS %q.", parts["os"])
630			}
631		}
632	} else {
633		d["gpu"] = "none"
634		if d["os"] == DEFAULT_OS_DEBIAN {
635			if strings.Contains(parts["extra_config"], "PathKit") || strings.Contains(parts["extra_config"], "CanvasKit") || strings.Contains(parts["extra_config"], "CMake") {
636				return dockerGceDimensions()
637			}
638			if parts["role"] == "BuildStats" {
639				// Doesn't require a lot of resources
640				return linuxGceDimensions(MACHINE_TYPE_MEDIUM)
641			}
642			// Use many-core machines for Build tasks.
643			return linuxGceDimensions(MACHINE_TYPE_LARGE)
644		} else if d["os"] == DEFAULT_OS_WIN {
645			// Windows CPU bots.
646			d["cpu"] = "x86-64-Haswell_GCE"
647			// Use many-core machines for Build tasks.
648			d["machine_type"] = MACHINE_TYPE_LARGE
649		} else if d["os"] == DEFAULT_OS_MAC {
650			// Mac CPU bots.
651			d["cpu"] = "x86-64-E5-2697_v2"
652		}
653	}
654
655	rv := make([]string, 0, len(d))
656	for k, v := range d {
657		rv = append(rv, fmt.Sprintf("%s:%s", k, v))
658	}
659	sort.Strings(rv)
660	return rv
661}
662
663// relpath returns the relative path to the given file from the config file.
664func relpath(f string) string {
665	_, filename, _, _ := runtime.Caller(0)
666	dir := path.Dir(filename)
667	rel := dir
668	if *cfgFile != "" {
669		rel = path.Dir(*cfgFile)
670	}
671	rv, err := filepath.Rel(rel, path.Join(dir, f))
672	if err != nil {
673		sklog.Fatal(err)
674	}
675	return rv
676}
677
678// bundleRecipes generates the task to bundle and isolate the recipes.
679func bundleRecipes(b *specs.TasksCfgBuilder) string {
680	pkgs := append([]*specs.CipdPackage{}, CIPD_PKGS_GIT...)
681	pkgs = append(pkgs, CIPD_PKGS_PYTHON...)
682	b.MustAddTask(BUNDLE_RECIPES_NAME, &specs.TaskSpec{
683		CipdPackages: pkgs,
684		Command: []string{
685			"/bin/bash", "skia/infra/bots/bundle_recipes.sh", specs.PLACEHOLDER_ISOLATED_OUTDIR,
686		},
687		Dimensions: linuxGceDimensions(MACHINE_TYPE_SMALL),
688		EnvPrefixes: map[string][]string{
689			"PATH": []string{"cipd_bin_packages", "cipd_bin_packages/bin"},
690		},
691		Isolate: relpath("swarm_recipe.isolate"),
692	})
693	return BUNDLE_RECIPES_NAME
694}
695
696type isolateAssetCfg struct {
697	cipdPkg string
698	path    string
699}
700
701var ISOLATE_ASSET_MAPPING = map[string]isolateAssetCfg{
702	ISOLATE_GCLOUD_LINUX_NAME: {
703		cipdPkg: "gcloud_linux",
704		path:    "gcloud_linux",
705	},
706	ISOLATE_GO_DEPS_NAME: {
707		cipdPkg: "go_deps",
708		path:    "go_deps",
709	},
710	ISOLATE_SKIMAGE_NAME: {
711		cipdPkg: "skimage",
712		path:    "skimage",
713	},
714	ISOLATE_SKP_NAME: {
715		cipdPkg: "skp",
716		path:    "skp",
717	},
718	ISOLATE_SVG_NAME: {
719		cipdPkg: "svg",
720		path:    "svg",
721	},
722	ISOLATE_NDK_LINUX_NAME: {
723		cipdPkg: "android_ndk_linux",
724		path:    "android_ndk_linux",
725	},
726	ISOLATE_SDK_LINUX_NAME: {
727		cipdPkg: "android_sdk_linux",
728		path:    "android_sdk_linux",
729	},
730	ISOLATE_WIN_TOOLCHAIN_NAME: {
731		cipdPkg: "win_toolchain",
732		path:    "win_toolchain",
733	},
734}
735
736// isolateCIPDAsset generates a task to isolate the given CIPD asset.
737func isolateCIPDAsset(b *specs.TasksCfgBuilder, name string) string {
738	asset := ISOLATE_ASSET_MAPPING[name]
739	b.MustAddTask(name, &specs.TaskSpec{
740		CipdPackages: []*specs.CipdPackage{
741			b.MustGetCipdPackageFromAsset(asset.cipdPkg),
742		},
743		Command:    []string{"/bin/cp", "-rL", asset.path, "${ISOLATED_OUTDIR}"},
744		Dimensions: linuxGceDimensions(MACHINE_TYPE_SMALL),
745		Isolate:    relpath("empty.isolate"),
746	})
747	return name
748}
749
750// getIsolatedCIPDDeps returns the slice of Isolate_* tasks a given task needs.
751// This allows us to  save time on I/O bound bots, like the RPIs.
752func getIsolatedCIPDDeps(parts map[string]string) []string {
753	deps := []string{}
754	// Only do this on the RPIs for now. Other, faster machines shouldn't see much
755	// benefit and we don't need the extra complexity, for now
756	rpiOS := []string{"Android", "ChromeOS", "iOS"}
757
758	if o := parts["os"]; strings.Contains(o, "Chromecast") {
759		// Chromecasts don't have enough disk space to fit all of the content,
760		// so we do a subset of the skps.
761		deps = append(deps, ISOLATE_SKP_NAME)
762	} else if e := parts["extra_config"]; strings.Contains(e, "Skpbench") {
763		// Skpbench only needs skps
764		deps = append(deps, ISOLATE_SKP_NAME)
765	} else if util.In(o, rpiOS) {
766		deps = append(deps, ISOLATE_SKP_NAME)
767		deps = append(deps, ISOLATE_SVG_NAME)
768		deps = append(deps, ISOLATE_SKIMAGE_NAME)
769	}
770
771	return deps
772}
773
774// usesGit adds attributes to tasks which use git.
775func usesGit(t *specs.TaskSpec, name string) {
776	t.Caches = append(t.Caches, CACHES_GIT...)
777	if !strings.Contains(name, "NoDEPS") {
778		t.Caches = append(t.Caches, CACHES_WORKDIR...)
779	}
780	t.CipdPackages = append(t.CipdPackages, CIPD_PKGS_GIT...)
781}
782
783// usesGo adds attributes to tasks which use go. Recipes should use
784// "with api.context(env=api.infra.go_env)".
785// (Not needed for tasks that just want to run Go code from the infra repo -- instead use go_deps.)
786func usesGo(b *specs.TasksCfgBuilder, t *specs.TaskSpec) {
787	t.Caches = append(t.Caches, CACHES_GO...)
788	t.CipdPackages = append(t.CipdPackages, b.MustGetCipdPackageFromAsset("go"))
789	t.Dependencies = append(t.Dependencies, isolateCIPDAsset(b, ISOLATE_GO_DEPS_NAME))
790}
791
792// usesDocker adds attributes to tasks which use docker.
793func usesDocker(t *specs.TaskSpec, name string) {
794	if strings.Contains(name, "EMCC") || strings.Contains(name, "SKQP") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "CMake") {
795		t.Caches = append(t.Caches, CACHES_DOCKER...)
796	}
797}
798
799// timeout sets the timeout(s) for this task.
800func timeout(task *specs.TaskSpec, timeout time.Duration) {
801	task.ExecutionTimeout = timeout
802	task.IoTimeout = timeout // With kitchen, step logs don't count toward IoTimeout.
803}
804
805// attempts returns the desired MaxAttempts for this task.
806func attempts(name string) int {
807	if strings.Contains(name, "Android_Framework") {
808		// The reason for this has been lost to time.
809		return 1
810	}
811	if !(strings.HasPrefix(name, "Build-") || strings.HasPrefix(name, "Upload-")) {
812		for _, extraConfig := range []string{"ASAN", "MSAN", "TSAN", "UBSAN", "Valgrind"} {
813			if strings.Contains(name, extraConfig) {
814				// Sanitizers often find non-deterministic issues that retries would hide.
815				return 1
816			}
817		}
818	}
819	// Retry by default to hide random bot/hardware failures.
820	return 2
821}
822
823// compile generates a compile task. Returns the name of the last task in the
824// generated chain of tasks, which the Job should add as a dependency.
825func compile(b *specs.TasksCfgBuilder, name string, parts map[string]string) string {
826	task := kitchenTask(name, "compile", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, swarmDimensions(parts), nil, OUTPUT_BUILD)
827	usesGit(task, name)
828	usesDocker(task, name)
829
830	// Android bots require a toolchain.
831	if strings.Contains(name, "Android") {
832		if parts["extra_config"] == "Android_Framework" {
833			// Do not need a toolchain when building the
834			// Android Framework.
835		} else if strings.Contains(name, "Mac") {
836			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("android_ndk_darwin"))
837		} else if strings.Contains(name, "Win") {
838			pkg := b.MustGetCipdPackageFromAsset("android_ndk_windows")
839			pkg.Path = "n"
840			task.CipdPackages = append(task.CipdPackages, pkg)
841		} else if !strings.Contains(name, "SKQP") {
842			task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_NDK_LINUX_NAME))
843		}
844	} else if strings.Contains(name, "Chromecast") {
845		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("cast_toolchain"))
846		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
847	} else if strings.Contains(name, "Chromebook") {
848		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_linux"))
849		if parts["target_arch"] == "x86_64" {
850			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_x86_64_gles"))
851		} else if parts["target_arch"] == "arm" {
852			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("armhf_sysroot"))
853			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("chromebook_arm_gles"))
854		}
855	} else if strings.Contains(name, "Debian") {
856		if strings.Contains(name, "Clang") {
857			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_linux"))
858		}
859		if parts["target_arch"] == "mips64el" || parts["target_arch"] == "loongson3a" {
860			if parts["compiler"] != "GCC" {
861				glog.Fatalf("mips64el toolchain is GCC, but compiler is %q in %q", parts["compiler"], name)
862			}
863			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("mips64el_toolchain_linux"))
864		}
865		if strings.Contains(name, "SwiftShader") {
866			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("cmake_linux"))
867		}
868		if strings.Contains(name, "OpenCL") {
869			task.CipdPackages = append(task.CipdPackages,
870				b.MustGetCipdPackageFromAsset("opencl_headers"),
871				b.MustGetCipdPackageFromAsset("opencl_ocl_icd_linux"),
872			)
873		}
874	} else if strings.Contains(name, "Win") {
875		task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_WIN_TOOLCHAIN_NAME))
876		if strings.Contains(name, "Clang") {
877			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("clang_win"))
878		}
879		if strings.Contains(name, "OpenCL") {
880			task.CipdPackages = append(task.CipdPackages,
881				b.MustGetCipdPackageFromAsset("opencl_headers"),
882			)
883		}
884	} else if strings.Contains(name, "Mac") {
885		task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_XCODE...)
886		task.Caches = append(task.Caches, &specs.Cache{
887			Name: "xcode",
888			Path: "cache/Xcode.app",
889		})
890		if strings.Contains(name, "CommandBuffer") {
891			timeout(task, 2*time.Hour)
892		}
893		if strings.Contains(name, "MoltenVK") {
894			task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("moltenvk"))
895		}
896	}
897
898	// Add the task.
899	b.MustAddTask(name, task)
900
901	// All compile tasks are runnable as their own Job. Assert that the Job
902	// is listed in JOBS.
903	if !util.In(name, JOBS) {
904		glog.Fatalf("Job %q is missing from the JOBS list!", name)
905	}
906
907	// Upload the skiaserve binary only for Linux Android compile bots.
908	// See skbug.com/7399 for context.
909	if parts["configuration"] == "Release" &&
910		parts["extra_config"] == "Android" &&
911		!strings.Contains(parts["os"], "Win") &&
912		!strings.Contains(parts["os"], "Mac") {
913		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
914		task := kitchenTask(uploadName, "upload_skiaserve", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_BINARY, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
915		task.Dependencies = append(task.Dependencies, name)
916		b.MustAddTask(uploadName, task)
917		return uploadName
918	}
919
920	return name
921}
922
923// recreateSKPs generates a RecreateSKPs task. Returns the name of the last
924// task in the generated chain of tasks, which the Job should add as a
925// dependency.
926func recreateSKPs(b *specs.TasksCfgBuilder, name string) string {
927	dims := []string{
928		"pool:SkiaCT",
929		fmt.Sprintf("os:%s", DEFAULT_OS_LINUX_GCE),
930	}
931	task := kitchenTask(name, "recreate_skps", "swarm_recipe.isolate", SERVICE_ACCOUNT_RECREATE_SKPS, dims, nil, OUTPUT_NONE)
932	task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GIT...)
933	usesGo(b, task)
934	timeout(task, 4*time.Hour)
935	b.MustAddTask(name, task)
936	return name
937}
938
939// updateGoDEPS generates an UpdateGoDEPS task. Returns the name of the last
940// task in the generated chain of tasks, which the Job should add as a
941// dependency.
942func updateGoDEPS(b *specs.TasksCfgBuilder, name string) string {
943	dims := linuxGceDimensions(MACHINE_TYPE_LARGE)
944	task := kitchenTask(name, "update_go_deps", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPDATE_GO_DEPS, dims, nil, OUTPUT_NONE)
945	task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GIT...)
946	usesGo(b, task)
947	b.MustAddTask(name, task)
948	return name
949}
950
951// checkGeneratedFiles verifies that no generated SKSL files have been edited
952// by hand.
953func checkGeneratedFiles(b *specs.TasksCfgBuilder, name string) string {
954	task := kitchenTask(name, "check_generated_files", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_LARGE), nil, OUTPUT_NONE)
955	task.Caches = append(task.Caches, CACHES_WORKDIR...)
956	b.MustAddTask(name, task)
957	return name
958}
959
960// housekeeper generates a Housekeeper task. Returns the name of the last task
961// in the generated chain of tasks, which the Job should add as a dependency.
962func housekeeper(b *specs.TasksCfgBuilder, name string) string {
963	task := kitchenTask(name, "housekeeper", "swarm_recipe.isolate", SERVICE_ACCOUNT_HOUSEKEEPER, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
964	usesGit(task, name)
965	b.MustAddTask(name, task)
966	return name
967}
968
969// bookmaker generates a Bookmaker task. Returns the name of the last task
970// in the generated chain of tasks, which the Job should add as a dependency.
971func bookmaker(b *specs.TasksCfgBuilder, name, compileTaskName string) string {
972	task := kitchenTask(name, "bookmaker", "swarm_recipe.isolate", SERVICE_ACCOUNT_BOOKMAKER, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
973	task.Caches = append(task.Caches, CACHES_WORKDIR...)
974	task.CipdPackages = append(task.CipdPackages, CIPD_PKGS_GIT...)
975	task.Dependencies = append(task.Dependencies, compileTaskName, isolateCIPDAsset(b, ISOLATE_GO_DEPS_NAME))
976	timeout(task, 2*time.Hour)
977	b.MustAddTask(name, task)
978	return name
979}
980
981// androidFrameworkCompile generates an Android Framework Compile task. Returns
982// the name of the last task in the generated chain of tasks, which the Job
983// should add as a dependency.
984func androidFrameworkCompile(b *specs.TasksCfgBuilder, name string) string {
985	task := kitchenTask(name, "android_compile", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
986	timeout(task, time.Hour)
987	b.MustAddTask(name, task)
988	return name
989}
990
991// infra generates an infra_tests task. Returns the name of the last task in the
992// generated chain of tasks, which the Job should add as a dependency.
993func infra(b *specs.TasksCfgBuilder, name string) string {
994	task := kitchenTask(name, "infra", "swarm_recipe.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_SMALL), nil, OUTPUT_NONE)
995	usesGit(task, name)
996	usesGo(b, task)
997	b.MustAddTask(name, task)
998	return name
999}
1000
1001func buildstats(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string) string {
1002	task := kitchenTask(name, "compute_buildstats", "swarm_recipe.isolate", "", swarmDimensions(parts), nil, OUTPUT_PERF)
1003	task.Dependencies = append(task.Dependencies, compileTaskName)
1004	task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("bloaty"))
1005	b.MustAddTask(name, task)
1006
1007	// Upload release results (for tracking in perf)
1008	// We have some jobs that are FYI (e.g. Debug-CanvasKit)
1009	if strings.Contains(name, "Release") {
1010		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
1011		extraProps := map[string]string{
1012			"gs_bucket": CONFIG.GsBucketNano,
1013		}
1014		uploadTask := kitchenTask(name, "upload_buildstats_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_NANO, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1015		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1016		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1017		b.MustAddTask(uploadName, uploadTask)
1018		return uploadName
1019	}
1020
1021	return name
1022}
1023
1024func getParentRevisionName(compileTaskName string, parts map[string]string) string {
1025	if parts["extra_config"] == "" {
1026		return compileTaskName + "-ParentRevision"
1027	} else {
1028		return compileTaskName + "_ParentRevision"
1029	}
1030}
1031
1032// calmbench generates a calmbench task. Returns the name of the last task in the
1033// generated chain of tasks, which the Job should add as a dependency.
1034func calmbench(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName, compileParentName string) string {
1035	task := kitchenTask(name, "calmbench", "calmbench.isolate", "", swarmDimensions(parts), nil, OUTPUT_PERF)
1036	usesGit(task, name)
1037	task.Dependencies = append(task.Dependencies, compileTaskName, compileParentName, ISOLATE_SKP_NAME, ISOLATE_SVG_NAME)
1038	if parts["cpu_or_gpu_value"] == "QuadroP400" {
1039		// Specify "rack" dimension for consistent test results.
1040		// See https://bugs.chromium.org/p/chromium/issues/detail?id=784662&desc=2#c34
1041		// for more context.
1042		if parts["os"] == "Ubuntu18" {
1043			task.Dimensions = append(task.Dimensions, "rack:2")
1044		} else {
1045			task.Dimensions = append(task.Dimensions, "rack:1")
1046		}
1047	}
1048	b.MustAddTask(name, task)
1049
1050	// Upload results if necessary.
1051	if strings.Contains(name, "Release") && doUpload(name) {
1052		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
1053		extraProps := map[string]string{
1054			"gs_bucket": CONFIG.GsBucketCalm,
1055		}
1056		uploadTask := kitchenTask(name, "upload_calmbench_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_CALMBENCH, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1057		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1058		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1059		b.MustAddTask(uploadName, uploadTask)
1060		return uploadName
1061	}
1062
1063	return name
1064}
1065
1066// doUpload indicates whether the given Job should upload its results.
1067func doUpload(name string) bool {
1068	for _, s := range CONFIG.NoUpload {
1069		m, err := regexp.MatchString(s, name)
1070		if err != nil {
1071			glog.Fatal(err)
1072		}
1073		if m {
1074			return false
1075		}
1076	}
1077	return true
1078}
1079
1080// test generates a Test task. Returns the name of the last task in the
1081// generated chain of tasks, which the Job should add as a dependency.
1082func test(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
1083	recipe := "test"
1084	if strings.Contains(name, "SKQP") {
1085		recipe = "skqp_test"
1086		if strings.Contains(name, "Emulator") {
1087			recipe = "test_skqp_emulator"
1088		}
1089	} else if strings.Contains(name, "OpenCL") {
1090		// TODO(dogben): Longer term we may not want this to be called a "Test" task, but until we start
1091		// running hs_bench or kx, it will be easier to fit into the current job name schema.
1092		recipe = "compute_test"
1093	} else if strings.Contains(name, "PathKit") {
1094		recipe = "test_pathkit"
1095	} else if strings.Contains(name, "CanvasKit") {
1096		recipe = "test_canvaskit"
1097	} else if strings.Contains(name, "LottieWeb") {
1098		recipe = "test_lottie_web"
1099	}
1100	extraProps := map[string]string{
1101		"gold_hashes_url": CONFIG.GoldHashesURL,
1102	}
1103	iid := internalHardwareLabel(parts)
1104	if iid != nil {
1105		extraProps["internal_hardware_label"] = strconv.Itoa(*iid)
1106	}
1107	isolate := "test_skia_bundled.isolate"
1108	if strings.Contains(name, "CanvasKit") || strings.Contains(name, "Emulator") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "PathKit") {
1109		isolate = "swarm_recipe.isolate"
1110	}
1111	task := kitchenTask(name, recipe, isolate, "", swarmDimensions(parts), extraProps, OUTPUT_TEST)
1112	task.CipdPackages = append(task.CipdPackages, pkgs...)
1113	if strings.Contains(name, "Lottie") {
1114		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("lottie-samples"))
1115	}
1116	if !strings.Contains(name, "LottieWeb") {
1117		// Test.+LottieWeb doesn't require anything in Skia to be compiled.
1118		task.Dependencies = append(task.Dependencies, compileTaskName)
1119	}
1120
1121	if strings.Contains(name, "Android_ASAN") {
1122		task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_NDK_LINUX_NAME))
1123	}
1124	if strings.Contains(name, "SKQP") {
1125		if !strings.Contains(name, "Emulator") {
1126			task.Dependencies = append(task.Dependencies, isolateCIPDAsset(b, ISOLATE_GCLOUD_LINUX_NAME))
1127		}
1128	}
1129	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
1130		task.Dependencies = append(task.Dependencies, deps...)
1131	}
1132	task.Expiration = 20 * time.Hour
1133
1134	timeout(task, 4*time.Hour)
1135	if strings.Contains(parts["extra_config"], "Valgrind") {
1136		timeout(task, 9*time.Hour)
1137		task.Expiration = 48 * time.Hour
1138		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
1139		task.Dimensions = append(task.Dimensions, "valgrind:1")
1140	} else if strings.Contains(parts["extra_config"], "MSAN") {
1141		timeout(task, 9*time.Hour)
1142	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
1143		// skia:6737
1144		timeout(task, 6*time.Hour)
1145	}
1146	b.MustAddTask(name, task)
1147
1148	// Upload results if necessary. TODO(kjlubick): If we do coverage analysis at the same
1149	// time as normal tests (which would be nice), cfg.json needs to have Coverage removed.
1150	if doUpload(name) {
1151		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
1152		extraProps := map[string]string{
1153			"gs_bucket": CONFIG.GsBucketGm,
1154		}
1155		uploadTask := kitchenTask(name, "upload_dm_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_GM, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1156		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1157		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1158		b.MustAddTask(uploadName, uploadTask)
1159		return uploadName
1160	}
1161
1162	return name
1163}
1164
1165// perf generates a Perf task. Returns the name of the last task in the
1166// generated chain of tasks, which the Job should add as a dependency.
1167func perf(b *specs.TasksCfgBuilder, name string, parts map[string]string, compileTaskName string, pkgs []*specs.CipdPackage) string {
1168	recipe := "perf"
1169	isolate := relpath("perf_skia_bundled.isolate")
1170	if strings.Contains(parts["extra_config"], "Skpbench") {
1171		recipe = "skpbench"
1172		isolate = relpath("skpbench_skia_bundled.isolate")
1173	} else if strings.Contains(name, "PathKit") {
1174		recipe = "perf_pathkit"
1175	} else if strings.Contains(name, "CanvasKit") {
1176		recipe = "perf_canvaskit"
1177	}
1178	task := kitchenTask(name, recipe, isolate, "", swarmDimensions(parts), nil, OUTPUT_PERF)
1179	task.CipdPackages = append(task.CipdPackages, pkgs...)
1180	task.Dependencies = append(task.Dependencies, compileTaskName)
1181	task.Expiration = 20 * time.Hour
1182	timeout(task, 4*time.Hour)
1183	if deps := getIsolatedCIPDDeps(parts); len(deps) > 0 {
1184		task.Dependencies = append(task.Dependencies, deps...)
1185	}
1186
1187	if strings.Contains(parts["extra_config"], "Valgrind") {
1188		timeout(task, 9*time.Hour)
1189		task.Expiration = 48 * time.Hour
1190		task.CipdPackages = append(task.CipdPackages, b.MustGetCipdPackageFromAsset("valgrind"))
1191		task.Dimensions = append(task.Dimensions, "valgrind:1")
1192	} else if strings.Contains(parts["extra_config"], "MSAN") {
1193		timeout(task, 9*time.Hour)
1194	} else if parts["arch"] == "x86" && parts["configuration"] == "Debug" {
1195		// skia:6737
1196		timeout(task, 6*time.Hour)
1197	}
1198	iid := internalHardwareLabel(parts)
1199	if iid != nil {
1200		task.Command = append(task.Command, fmt.Sprintf("internal_hardware_label=%d", *iid))
1201	}
1202	if parts["cpu_or_gpu_value"] == "QuadroP400" {
1203		// Specify "rack" dimension for consistent test results.
1204		// See https://bugs.chromium.org/p/chromium/issues/detail?id=784662&desc=2#c34
1205		// for more context.
1206		if parts["os"] == "Ubuntu18" {
1207			task.Dimensions = append(task.Dimensions, "rack:2")
1208		} else {
1209			task.Dimensions = append(task.Dimensions, "rack:1")
1210		}
1211	}
1212	b.MustAddTask(name, task)
1213
1214	// Upload results if necessary.
1215	if strings.Contains(name, "Release") && doUpload(name) {
1216		uploadName := fmt.Sprintf("%s%s%s", PREFIX_UPLOAD, jobNameSchema.Sep, name)
1217		extraProps := map[string]string{
1218			"gs_bucket": CONFIG.GsBucketNano,
1219		}
1220		uploadTask := kitchenTask(name, "upload_nano_results", "swarm_recipe.isolate", SERVICE_ACCOUNT_UPLOAD_NANO, linuxGceDimensions(MACHINE_TYPE_SMALL), extraProps, OUTPUT_NONE)
1221		uploadTask.CipdPackages = append(uploadTask.CipdPackages, CIPD_PKGS_GSUTIL...)
1222		uploadTask.Dependencies = append(uploadTask.Dependencies, name)
1223		b.MustAddTask(uploadName, uploadTask)
1224		return uploadName
1225	}
1226	return name
1227}
1228
1229// Run the presubmit.
1230func presubmit(b *specs.TasksCfgBuilder, name string) string {
1231	extraProps := map[string]string{
1232		"category":         "cq",
1233		"patch_gerrit_url": "https://skia-review.googlesource.com",
1234		"patch_project":    "skia",
1235		"patch_ref":        specs.PLACEHOLDER_PATCH_REF,
1236		"reason":           "CQ",
1237		"repo_name":        "skia",
1238	}
1239	// Use MACHINE_TYPE_LARGE because it seems to save time versus MEDIUM and we want presubmit to be
1240	// fast.
1241	task := kitchenTask(name, "run_presubmit", "empty.isolate", SERVICE_ACCOUNT_COMPILE, linuxGceDimensions(MACHINE_TYPE_LARGE), extraProps, OUTPUT_NONE)
1242
1243	replaceArg := func(key, value string) {
1244		found := false
1245		for idx, arg := range task.Command {
1246			if arg == key {
1247				task.Command[idx+1] = value
1248				found = true
1249			}
1250		}
1251		if !found {
1252			task.Command = append(task.Command, key, value)
1253		}
1254	}
1255	replaceArg("-repository", "https://chromium.googlesource.com/chromium/tools/build")
1256	replaceArg("-revision", "HEAD")
1257	usesGit(task, name)
1258	task.Dependencies = []string{} // No bundled recipes for this one.
1259	b.MustAddTask(name, task)
1260	return name
1261}
1262
1263// process generates tasks and jobs for the given job name.
1264func process(b *specs.TasksCfgBuilder, name string) {
1265	var priority float64 // Leave as default for most jobs.
1266	deps := []string{}
1267
1268	// Bundle Recipes.
1269	if name == BUNDLE_RECIPES_NAME {
1270		deps = append(deps, bundleRecipes(b))
1271	}
1272
1273	// Isolate CIPD assets.
1274	if _, ok := ISOLATE_ASSET_MAPPING[name]; ok {
1275		deps = append(deps, isolateCIPDAsset(b, name))
1276	}
1277
1278	parts, err := jobNameSchema.ParseJobName(name)
1279	if err != nil {
1280		glog.Fatal(err)
1281	}
1282
1283	// RecreateSKPs.
1284	if strings.Contains(name, "RecreateSKPs") {
1285		deps = append(deps, recreateSKPs(b, name))
1286	}
1287
1288	// Update Go DEPS.
1289	if strings.Contains(name, "UpdateGoDEPS") {
1290		deps = append(deps, updateGoDEPS(b, name))
1291	}
1292
1293	// Infra tests.
1294	if name == "Housekeeper-PerCommit-InfraTests" {
1295		deps = append(deps, infra(b, name))
1296	}
1297
1298	// Compile bots.
1299	if parts["role"] == "Build" {
1300		if parts["extra_config"] == "Android_Framework" {
1301			// Android Framework compile tasks use a different recipe.
1302			deps = append(deps, androidFrameworkCompile(b, name))
1303		} else {
1304			deps = append(deps, compile(b, name, parts))
1305		}
1306	}
1307
1308	// Most remaining bots need a compile task.
1309	compileTaskName := deriveCompileTaskName(name, parts)
1310	compileTaskParts, err := jobNameSchema.ParseJobName(compileTaskName)
1311	if err != nil {
1312		glog.Fatal(err)
1313	}
1314	compileParentName := getParentRevisionName(compileTaskName, compileTaskParts)
1315	compileParentParts, err := jobNameSchema.ParseJobName(compileParentName)
1316	if err != nil {
1317		glog.Fatal(err)
1318	}
1319
1320	// These bots do not need a compile task.
1321	if parts["role"] != "Build" &&
1322		name != "Housekeeper-Nightly-UpdateGoDEPS" &&
1323		name != "Housekeeper-PerCommit-BundleRecipes" &&
1324		name != "Housekeeper-PerCommit-InfraTests" &&
1325		name != "Housekeeper-PerCommit-CheckGeneratedFiles" &&
1326		name != "Housekeeper-OnDemand-Presubmit" &&
1327		name != "Housekeeper-PerCommit" &&
1328		!strings.Contains(name, "Android_Framework") &&
1329		!strings.Contains(name, "RecreateSKPs") &&
1330		!strings.Contains(name, "Housekeeper-PerCommit-Isolate") &&
1331		!strings.Contains(name, "LottieWeb") {
1332		compile(b, compileTaskName, compileTaskParts)
1333		if parts["role"] == "Calmbench" {
1334			compile(b, compileParentName, compileParentParts)
1335		}
1336	}
1337
1338	// Housekeepers.
1339	if name == "Housekeeper-PerCommit" {
1340		deps = append(deps, housekeeper(b, name))
1341	}
1342	if name == "Housekeeper-PerCommit-CheckGeneratedFiles" {
1343		deps = append(deps, checkGeneratedFiles(b, name))
1344	}
1345	if name == "Housekeeper-OnDemand-Presubmit" {
1346		priority = 1
1347		deps = append(deps, presubmit(b, name))
1348	}
1349	if strings.Contains(name, "Bookmaker") {
1350		deps = append(deps, bookmaker(b, name, compileTaskName))
1351	}
1352
1353	// Common assets needed by the remaining bots.
1354
1355	pkgs := []*specs.CipdPackage{}
1356
1357	if deps := getIsolatedCIPDDeps(parts); len(deps) == 0 {
1358		pkgs = []*specs.CipdPackage{
1359			b.MustGetCipdPackageFromAsset("skimage"),
1360			b.MustGetCipdPackageFromAsset("skp"),
1361			b.MustGetCipdPackageFromAsset("svg"),
1362		}
1363	}
1364
1365	if strings.Contains(name, "Ubuntu") || strings.Contains(name, "Debian") {
1366		if strings.Contains(name, "SAN") {
1367			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("clang_linux"))
1368		}
1369		if strings.Contains(name, "Vulkan") {
1370			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("linux_vulkan_sdk"))
1371		}
1372		if strings.Contains(name, "Intel") && strings.Contains(name, "GPU") {
1373			pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("mesa_intel_driver_linux"))
1374		}
1375		if strings.Contains(name, "OpenCL") {
1376			pkgs = append(pkgs,
1377				b.MustGetCipdPackageFromAsset("opencl_ocl_icd_linux"),
1378				b.MustGetCipdPackageFromAsset("opencl_intel_neo_linux"),
1379			)
1380		}
1381	}
1382	if strings.Contains(name, "ProcDump") {
1383		pkgs = append(pkgs, b.MustGetCipdPackageFromAsset("procdump_win"))
1384	}
1385	if strings.Contains(name, "CanvasKit") || strings.Contains(name, "LottieWeb") || strings.Contains(name, "PathKit") {
1386		// Docker-based tests that don't need the standard CIPD assets
1387		pkgs = []*specs.CipdPackage{}
1388	}
1389
1390	// Test bots.
1391	if parts["role"] == "Test" {
1392		deps = append(deps, test(b, name, parts, compileTaskName, pkgs))
1393	}
1394
1395	// Perf bots.
1396	if parts["role"] == "Perf" {
1397		deps = append(deps, perf(b, name, parts, compileTaskName, pkgs))
1398	}
1399
1400	// Calmbench bots.
1401	if parts["role"] == "Calmbench" {
1402		deps = append(deps, calmbench(b, name, parts, compileTaskName, compileParentName))
1403	}
1404
1405	// BuildStats bots. This computes things like binary size.
1406	if parts["role"] == "BuildStats" {
1407		deps = append(deps, buildstats(b, name, parts, compileTaskName))
1408	}
1409
1410	// Add the Job spec.
1411	j := &specs.JobSpec{
1412		Priority:  priority,
1413		TaskSpecs: deps,
1414		Trigger:   specs.TRIGGER_ANY_BRANCH,
1415	}
1416	if strings.Contains(name, "-Nightly-") {
1417		j.Trigger = specs.TRIGGER_NIGHTLY
1418	} else if strings.Contains(name, "-Weekly-") {
1419		j.Trigger = specs.TRIGGER_WEEKLY
1420	} else if strings.Contains(name, "Flutter") || strings.Contains(name, "CommandBuffer") {
1421		j.Trigger = specs.TRIGGER_MASTER_ONLY
1422	} else if strings.Contains(name, "-OnDemand-") || strings.Contains(name, "Android_Framework") {
1423		j.Trigger = specs.TRIGGER_ON_DEMAND
1424	}
1425	b.MustAddJob(name, j)
1426}
1427
1428func loadJson(flag *string, defaultFlag string, val interface{}) {
1429	if *flag == "" {
1430		*flag = defaultFlag
1431	}
1432	b, err := ioutil.ReadFile(*flag)
1433	if err != nil {
1434		glog.Fatal(err)
1435	}
1436	if err := json.Unmarshal(b, val); err != nil {
1437		glog.Fatal(err)
1438	}
1439}
1440
1441// Regenerate the tasks.json file.
1442func main() {
1443	b := specs.MustNewTasksCfgBuilder()
1444	b.SetAssetsDir(*assetsDir)
1445	infraBots := path.Join(b.CheckoutRoot(), "infra", "bots")
1446
1447	// Load the jobs from a JSON file.
1448	loadJson(jobsFile, path.Join(infraBots, "jobs.json"), &JOBS)
1449
1450	// Load general config information from a JSON file.
1451	loadJson(cfgFile, path.Join(infraBots, "cfg.json"), &CONFIG)
1452
1453	// Create the JobNameSchema.
1454	if *builderNameSchemaFile == "" {
1455		*builderNameSchemaFile = path.Join(b.CheckoutRoot(), "infra", "bots", "recipe_modules", "builder_name_schema", "builder_name_schema.json")
1456	}
1457	schema, err := NewJobNameSchema(*builderNameSchemaFile)
1458	if err != nil {
1459		glog.Fatal(err)
1460	}
1461	jobNameSchema = schema
1462
1463	// Create Tasks and Jobs.
1464	for _, name := range JOBS {
1465		process(b, name)
1466	}
1467
1468	b.MustFinish()
1469}
1470
1471// TODO(borenet): The below really belongs in its own file, probably next to the
1472// builder_name_schema.json file.
1473
1474// schema is a sub-struct of JobNameSchema.
1475type schema struct {
1476	Keys         []string `json:"keys"`
1477	OptionalKeys []string `json:"optional_keys"`
1478	RecurseRoles []string `json:"recurse_roles"`
1479}
1480
1481// JobNameSchema is a struct used for (de)constructing Job names in a
1482// predictable format.
1483type JobNameSchema struct {
1484	Schema map[string]*schema `json:"builder_name_schema"`
1485	Sep    string             `json:"builder_name_sep"`
1486}
1487
1488// NewJobNameSchema returns a JobNameSchema instance based on the given JSON
1489// file.
1490func NewJobNameSchema(jsonFile string) (*JobNameSchema, error) {
1491	var rv JobNameSchema
1492	f, err := os.Open(jsonFile)
1493	if err != nil {
1494		return nil, err
1495	}
1496	defer util.Close(f)
1497	if err := json.NewDecoder(f).Decode(&rv); err != nil {
1498		return nil, err
1499	}
1500	return &rv, nil
1501}
1502
1503// ParseJobName splits the given Job name into its component parts, according
1504// to the schema.
1505func (s *JobNameSchema) ParseJobName(n string) (map[string]string, error) {
1506	popFront := func(items []string) (string, []string, error) {
1507		if len(items) == 0 {
1508			return "", nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
1509		}
1510		return items[0], items[1:], nil
1511	}
1512
1513	result := map[string]string{}
1514
1515	var parse func(int, string, []string) ([]string, error)
1516	parse = func(depth int, role string, parts []string) ([]string, error) {
1517		s, ok := s.Schema[role]
1518		if !ok {
1519			return nil, fmt.Errorf("Invalid job name; %q is not a valid role.", role)
1520		}
1521		if depth == 0 {
1522			result["role"] = role
1523		} else {
1524			result[fmt.Sprintf("sub-role-%d", depth)] = role
1525		}
1526		var err error
1527		for _, key := range s.Keys {
1528			var value string
1529			value, parts, err = popFront(parts)
1530			if err != nil {
1531				return nil, err
1532			}
1533			result[key] = value
1534		}
1535		for _, subRole := range s.RecurseRoles {
1536			if len(parts) > 0 && parts[0] == subRole {
1537				parts, err = parse(depth+1, parts[0], parts[1:])
1538				if err != nil {
1539					return nil, err
1540				}
1541			}
1542		}
1543		for _, key := range s.OptionalKeys {
1544			if len(parts) > 0 {
1545				var value string
1546				value, parts, err = popFront(parts)
1547				if err != nil {
1548					return nil, err
1549				}
1550				result[key] = value
1551			}
1552		}
1553		if len(parts) > 0 {
1554			return nil, fmt.Errorf("Invalid job name: %s (too many parts)", n)
1555		}
1556		return parts, nil
1557	}
1558
1559	split := strings.Split(n, s.Sep)
1560	if len(split) < 2 {
1561		return nil, fmt.Errorf("Invalid job name: %s (not enough parts)", n)
1562	}
1563	role := split[0]
1564	split = split[1:]
1565	_, err := parse(0, role, split)
1566	return result, err
1567}
1568
1569// MakeJobName assembles the given parts of a Job name, according to the schema.
1570func (s *JobNameSchema) MakeJobName(parts map[string]string) (string, error) {
1571	rvParts := make([]string, 0, len(parts))
1572
1573	var process func(int, map[string]string) (map[string]string, error)
1574	process = func(depth int, parts map[string]string) (map[string]string, error) {
1575		roleKey := "role"
1576		if depth != 0 {
1577			roleKey = fmt.Sprintf("sub-role-%d", depth)
1578		}
1579		role, ok := parts[roleKey]
1580		if !ok {
1581			return nil, fmt.Errorf("Invalid job parts; missing key %q", roleKey)
1582		}
1583
1584		s, ok := s.Schema[role]
1585		if !ok {
1586			return nil, fmt.Errorf("Invalid job parts; unknown role %q", role)
1587		}
1588		rvParts = append(rvParts, role)
1589		delete(parts, roleKey)
1590
1591		for _, key := range s.Keys {
1592			value, ok := parts[key]
1593			if !ok {
1594				return nil, fmt.Errorf("Invalid job parts; missing %q", key)
1595			}
1596			rvParts = append(rvParts, value)
1597			delete(parts, key)
1598		}
1599
1600		if len(s.RecurseRoles) > 0 {
1601			subRoleKey := fmt.Sprintf("sub-role-%d", depth+1)
1602			subRole, ok := parts[subRoleKey]
1603			if !ok {
1604				return nil, fmt.Errorf("Invalid job parts; missing %q", subRoleKey)
1605			}
1606			rvParts = append(rvParts, subRole)
1607			delete(parts, subRoleKey)
1608			found := false
1609			for _, recurseRole := range s.RecurseRoles {
1610				if recurseRole == subRole {
1611					found = true
1612					var err error
1613					parts, err = process(depth+1, parts)
1614					if err != nil {
1615						return nil, err
1616					}
1617					break
1618				}
1619			}
1620			if !found {
1621				return nil, fmt.Errorf("Invalid job parts; unknown sub-role %q", subRole)
1622			}
1623		}
1624		for _, key := range s.OptionalKeys {
1625			if value, ok := parts[key]; ok {
1626				rvParts = append(rvParts, value)
1627				delete(parts, key)
1628			}
1629		}
1630		if len(parts) > 0 {
1631			return nil, fmt.Errorf("Invalid job parts: too many parts: %v", parts)
1632		}
1633		return parts, nil
1634	}
1635
1636	// Copy the parts map, so that we can modify at will.
1637	partsCpy := make(map[string]string, len(parts))
1638	for k, v := range parts {
1639		partsCpy[k] = v
1640	}
1641	if _, err := process(0, partsCpy); err != nil {
1642		return "", err
1643	}
1644	return strings.Join(rvParts, s.Sep), nil
1645}
1646