• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This executable is meant to be a general way to gather perf data using puppeteer. The logic
6// (e.g. what bench to run, how to process that particular output) is selected using the ExtraConfig
7// part of the task name.
8package main
9
10import (
11	"context"
12	"encoding/json"
13	"flag"
14	"fmt"
15	"io/ioutil"
16	"math"
17	"os"
18	"path/filepath"
19	"sort"
20
21	"go.skia.org/infra/go/exec"
22	"go.skia.org/infra/go/skerr"
23	"go.skia.org/infra/task_driver/go/lib/os_steps"
24	"go.skia.org/infra/task_driver/go/td"
25)
26
27const perfKeyWebGLVersion = "webgl_version"
28
29func main() {
30	var (
31		// Required properties for this task.
32		projectID     = flag.String("project_id", "", "ID of the Google Cloud project.")
33		taskName      = flag.String("task_name", "", "Name of the task.")
34		benchmarkPath = flag.String("benchmark_path", "", "Path to location of the benchmark files (e.g. //tools/perf-puppeteer).")
35		outputPath    = flag.String("output_path", "", "Perf Output will be produced here")
36		gitHash       = flag.String("git_hash", "", "Git hash this data corresponds to")
37		taskID        = flag.String("task_id", "", "task id this data was generated on")
38		nodeBinPath   = flag.String("node_bin_path", "", "Path to the node bin directory (should have npm also). This directory *must* be on the PATH when this executable is called, otherwise, the wrong node or npm version may be found (e.g. the one on the system), even if we are explicitly calling npm with the absolute path.")
39
40		// These flags feed into the perf trace keys associated with the output data.
41		osTrace            = flag.String("os_trace", "", "OS this is running on.")
42		modelTrace         = flag.String("model_trace", "", "Description of host machine.")
43		cpuOrGPUTrace      = flag.String("cpu_or_gpu_trace", "", "If this is a CPU or GPU configuration.")
44		cpuOrGPUValueTrace = flag.String("cpu_or_gpu_value_trace", "", "The hardware of this CPU/GPU")
45		webGLVersion       = flag.String("webgl_version", "", "Major WebGl version to use when creating gl drawing context. 1 or 2")
46
47		// Flags that may be required for certain configs
48		canvaskitBinPath = flag.String("canvaskit_bin_path", "", "The location of a canvaskit.js and canvaskit.wasm")
49
50		// Debugging flags.
51		local       = flag.Bool("local", false, "True if running locally (as opposed to on the bots)")
52		outputSteps = flag.String("o", "", "If provided, dump a JSON blob of step data to the given file. Prints to stdout if '-' is given.")
53	)
54
55	// Setup.
56	ctx := td.StartRun(projectID, taskID, taskName, outputSteps, local)
57	defer td.EndRun(ctx)
58
59	keys := map[string]string{
60		"os":                *osTrace,
61		"model":             *modelTrace,
62		perfKeyCpuOrGPU:     *cpuOrGPUTrace,
63		"cpu_or_gpu_value":  *cpuOrGPUValueTrace,
64		perfKeyWebGLVersion: *webGLVersion,
65	}
66
67	outputWithoutResults, err := makePerfObj(*gitHash, *taskID, os.Getenv("SWARMING_BOT_ID"), keys)
68	if err != nil {
69		td.Fatal(ctx, skerr.Wrap(err))
70	}
71	// Absolute paths work more consistently than relative paths.
72	nodeBinAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *nodeBinPath, "node_bin_path")
73	benchmarkAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *benchmarkPath, "benchmark_path")
74	canvaskitBinAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *canvaskitBinPath, "canvaskit_bin_path")
75	outputAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *outputPath, "output_path")
76
77	if err := setup(ctx, benchmarkAbsPath, nodeBinAbsPath); err != nil {
78		td.Fatal(ctx, skerr.Wrap(err))
79	}
80
81	if err := benchCanvas(ctx, outputWithoutResults, benchmarkAbsPath, canvaskitBinAbsPath, nodeBinAbsPath); err != nil {
82		td.Fatal(ctx, skerr.Wrap(err))
83	}
84
85	// outputFile name should be unique between tasks, so as to avoid having duplicate name files
86	// uploaded to GCS.
87	outputFile := filepath.Join(outputAbsPath, fmt.Sprintf("perf-%s.json", *taskID))
88	if err := processFramesData(ctx, outputWithoutResults, benchmarkAbsPath, outputFile); err != nil {
89		td.Fatal(ctx, skerr.Wrap(err))
90	}
91}
92
93const perfKeyCpuOrGPU = "cpu_or_gpu"
94
95func makePerfObj(gitHash, taskID, machineID string, keys map[string]string) (perfJSONFormat, error) {
96	rv := perfJSONFormat{}
97	if gitHash == "" {
98		return rv, skerr.Fmt("Must provide --git_hash")
99	}
100	if taskID == "" {
101		return rv, skerr.Fmt("Must provide --task_id")
102	}
103	rv.GitHash = gitHash
104	rv.SwarmingTaskID = taskID
105	rv.SwarmingMachineID = machineID
106	rv.Key = keys
107	rv.Key["arch"] = "wasm"
108	rv.Key["browser"] = "Chromium"
109	rv.Key["configuration"] = "Release"
110	rv.Key["extra_config"] = "CanvasPerf"
111	rv.Key["binary"] = "CanvasKit"
112	rv.Results = map[string]map[string]perfResult{}
113	return rv, nil
114}
115
116func setup(ctx context.Context, benchmarkPath, nodeBinPath string) error {
117	ctx = td.StartStep(ctx, td.Props("setup").Infra())
118	defer td.EndStep(ctx)
119
120	if _, err := exec.RunCwd(ctx, benchmarkPath, filepath.Join(nodeBinPath, "npm"), "ci"); err != nil {
121		return td.FailStep(ctx, skerr.Wrap(err))
122	}
123
124	if err := os.MkdirAll(filepath.Join(benchmarkPath, "out"), 0777); err != nil {
125		return td.FailStep(ctx, skerr.Wrap(err))
126	}
127	return nil
128}
129
130// benchCanvas runs the puppeteer canvas_perf_driver.html test and parses the results.
131func benchCanvas(ctx context.Context, perf perfJSONFormat, benchmarkPath, canvaskitBinPath, nodeBinPath string) error {
132	ctx = td.StartStep(ctx, td.Props("perf canvas tests"))
133	defer td.EndStep(ctx)
134
135	err := td.Do(ctx, td.Props("Benchmark Canvas"), func(ctx context.Context) error {
136		// See comment in setup about why we specify the absolute path for node.
137		args := []string{filepath.Join(nodeBinPath, "node"),
138			"perf-canvaskit-with-puppeteer",
139			"--bench_html", "canvas_perf.html",
140			"--canvaskit_js", filepath.Join(canvaskitBinPath, "canvaskit.js"),
141			"--canvaskit_wasm", filepath.Join(canvaskitBinPath, "canvaskit.wasm"),
142			"--assets", "canvas_perf_assets", // relative path
143			"--output", filepath.Join(benchmarkPath, "out", "perf.json"),
144			"--timeout=300",
145		}
146		if perf.Key[perfKeyCpuOrGPU] != "CPU" {
147			args = append(args, "--use_gpu")
148			if perf.Key[perfKeyWebGLVersion] == "1" {
149				args = append(args, "--query_params webgl1")
150			}
151		}
152
153		_, err := exec.RunCwd(ctx, benchmarkPath, args...)
154		if err != nil {
155			return skerr.Wrap(err)
156		}
157		return nil
158	})
159	if err != nil {
160		return td.FailStep(ctx, skerr.Wrap(err))
161	}
162	return nil
163}
164
165// description of the output file format
166type perfJSONFormat struct {
167	GitHash           string            `json:"gitHash"`
168	SwarmingTaskID    string            `json:"swarming_task_id"`
169	SwarmingMachineID string            `json:"swarming_machine_id"`
170	Key               map[string]string `json:"key"`
171	// Maps bench name -> "config" -> result key -> value
172	Results map[string]map[string]perfResult `json:"results"`
173}
174
175type perfResult map[string]float32
176
177// description of the input file format.
178type oneTestResult struct {
179	WithoutFlushMS []float32 `json:"without_flush_ms"`
180	WithFlushMS    []float32 `json:"with_flush_ms"`
181	TotalFrameMS   []float32 `json:"total_frame_ms"`
182}
183
184// processFramesData looks at the result of benchCanvas, computes summary data on
185// those files and adds them as Results into the provided perf object. The perf object is then
186// written in JSON format to outputPath.
187func processFramesData(ctx context.Context, perf perfJSONFormat, benchmarkPath, outputFilePath string) error {
188	perfJSONPath := filepath.Join(benchmarkPath, "out", "perf.json")
189	ctx = td.StartStep(ctx, td.Props("process perf output "+perfJSONPath))
190	defer td.EndStep(ctx)
191
192	err := td.Do(ctx, td.Props("Process "+perfJSONPath), func(ctx context.Context) error {
193		config := "software"
194		if perf.Key[perfKeyCpuOrGPU] != "CPU" {
195			config = "webgl2"
196			if perf.Key[perfKeyWebGLVersion] == "1" {
197				config = "webgl1"
198			}
199		}
200		b, err := os_steps.ReadFile(ctx, perfJSONPath)
201		if err != nil {
202			return skerr.Wrap(err)
203		}
204		var fileData map[string]oneTestResult
205		if err := json.Unmarshal(b, &fileData); err != nil {
206			return skerr.Wrap(err)
207		}
208
209		for name, item := range fileData {
210			metrics, err := calculatePerfFromTest(item) // item is a oneTestResult
211			if err != nil {
212				return skerr.Wrap(err)
213			}
214			perf.Results[name] = map[string]perfResult{
215				config: metrics,
216			}
217		}
218		return nil
219	})
220	if err != nil {
221		return td.FailStep(ctx, skerr.Wrap(err))
222	}
223
224	err = td.Do(ctx, td.Props("Writing perf JSON file to "+outputFilePath), func(ctx context.Context) error {
225		if err := os.MkdirAll(filepath.Dir(outputFilePath), 0777); err != nil {
226			return skerr.Wrap(err)
227		}
228		b, err := json.MarshalIndent(perf, "", "  ")
229		if err != nil {
230			return skerr.Wrap(err)
231		}
232		if err = ioutil.WriteFile(outputFilePath, b, 0666); err != nil {
233			return skerr.Wrap(err)
234		}
235		return nil
236	})
237	if err != nil {
238		return td.FailStep(ctx, skerr.Wrap(err))
239	}
240
241	return nil
242}
243
244// Computer averages and quantiles of the frame time results from one test.
245func calculatePerfFromTest(metrics oneTestResult) (map[string]float32, error) {
246	avgWithoutFlushMS, medianWithoutFlushMS, stddevWithoutFlushMS, _, _, _ := summarize(metrics.WithoutFlushMS)
247	avgWithFlushMS, medianWithFlushMS, stddevWithFlushMS, _, _, _ := summarize(metrics.WithFlushMS)
248	avgFrame, medFrame, stdFrame, percentile90Frame, percentile95Frame, percentile99Frame := summarize(metrics.TotalFrameMS)
249
250	rv := map[string]float32{
251		"avg_render_without_flush_ms":    avgWithoutFlushMS,
252		"median_render_without_flush_ms": medianWithoutFlushMS,
253		"stddev_render_without_flush_ms": stddevWithoutFlushMS,
254
255		"avg_render_with_flush_ms":    avgWithFlushMS,
256		"median_render_with_flush_ms": medianWithFlushMS,
257		"stddev_render_with_flush_ms": stddevWithFlushMS,
258
259		"avg_render_frame_ms":    avgFrame,
260		"median_render_frame_ms": medFrame,
261		"stddev_render_frame_ms": stdFrame,
262
263		// more detailed statistics on total frame times
264		"90th_percentile_frame_ms": percentile90Frame,
265		"95th_percentile_frame_ms": percentile95Frame,
266		"99th_percentile_frame_ms": percentile99Frame,
267	}
268	return rv, nil
269}
270
271func summarize(input []float32) (float32, float32, float32, float32, float32, float32) {
272	// Make a copy of the data so we don't mutate the order of the original
273	sorted := make([]float32, len(input))
274	copy(sorted, input)
275	sort.Slice(sorted, func(i, j int) bool {
276		return sorted[i] < sorted[j]
277	})
278
279	avg := computeAverage(sorted)
280	variance := float32(0)
281	for i := 0; i < len(sorted); i++ {
282		variance += (sorted[i] - avg) * (sorted[i] - avg)
283	}
284	stddev := float32(math.Sqrt(float64(variance / float32(len(sorted)))))
285
286	medIdx := (len(sorted) * 50) / 100
287	percentile90Idx := (len(sorted) * 90) / 100
288	percentile95Idx := (len(sorted) * 95) / 100
289	percentile99Idx := (len(sorted) * 99) / 100
290
291	return avg, sorted[medIdx], stddev, sorted[percentile90Idx], sorted[percentile95Idx], sorted[percentile99Idx]
292}
293
294func computeAverage(d []float32) float32 {
295	avg := float32(0)
296	for i := 0; i < len(d); i++ {
297		avg += d[i]
298	}
299	avg /= float32(len(d))
300	return avg
301}
302