1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <ctype.h>
9
10 #include "bench/nanobench.h"
11
12 #include "bench/AndroidCodecBench.h"
13 #include "bench/Benchmark.h"
14 #include "bench/CodecBench.h"
15 #include "bench/CodecBenchPriv.h"
16 #include "bench/GMBench.h"
17 #include "bench/MSKPBench.h"
18 #include "bench/RecordingBench.h"
19 #include "bench/ResultsWriter.h"
20 #include "bench/SKPAnimationBench.h"
21 #include "bench/SKPBench.h"
22 #include "bench/SkGlyphCacheBench.h"
23 #include "bench/SkSLBench.h"
24 #include "include/codec/SkAndroidCodec.h"
25 #include "include/codec/SkCodec.h"
26 #include "include/core/SkCanvas.h"
27 #include "include/core/SkData.h"
28 #include "include/core/SkGraphics.h"
29 #include "include/core/SkImageEncoder.h"
30 #include "include/core/SkPictureRecorder.h"
31 #include "include/core/SkString.h"
32 #include "include/core/SkSurface.h"
33 #include "include/core/SkTime.h"
34 #include "include/private/base/SkMacros.h"
35 #include "src/base/SkAutoMalloc.h"
36 #include "src/base/SkLeanWindows.h"
37 #include "src/core/SkColorSpacePriv.h"
38 #include "src/core/SkOSFile.h"
39 #include "src/core/SkTaskGroup.h"
40 #include "src/core/SkTraceEvent.h"
41 #include "src/utils/SkJSONWriter.h"
42 #include "src/utils/SkOSPath.h"
43 #include "src/utils/SkShaderUtils.h"
44 #include "tools/AutoreleasePool.h"
45 #include "tools/CrashHandler.h"
46 #include "tools/MSKPPlayer.h"
47 #include "tools/ProcStats.h"
48 #include "tools/Stats.h"
49 #include "tools/ToolUtils.h"
50 #include "tools/flags/CommonFlags.h"
51 #include "tools/flags/CommonFlagsConfig.h"
52 #include "tools/ios_utils.h"
53 #include "tools/trace/EventTracingPriv.h"
54 #include "tools/trace/SkDebugfTracer.h"
55
56 #if defined(SK_ENABLE_SVG)
57 #include "modules/svg/include/SkSVGDOM.h"
58 #include "modules/svg/include/SkSVGNode.h"
59 #endif
60
61 #ifdef SK_ENABLE_ANDROID_UTILS
62 #include "bench/BitmapRegionDecoderBench.h"
63 #include "client_utils/android/BitmapRegionDecoder.h"
64 #endif
65
66 #if defined(SK_GRAPHITE)
67 #include "include/gpu/graphite/Context.h"
68 #include "include/gpu/graphite/Recorder.h"
69 #include "include/gpu/graphite/Recording.h"
70 #include "tools/graphite/ContextFactory.h"
71 #include "tools/graphite/GraphiteTestContext.h"
72 #endif
73
74 #include <cinttypes>
75 #include <memory>
76 #include <optional>
77 #include <stdlib.h>
78 #include <thread>
79
80 extern bool gSkForceRasterPipelineBlitter;
81 extern bool gForceHighPrecisionRasterPipeline;
82 extern bool gUseSkVMBlitter;
83 extern bool gSkVMAllowJIT;
84 extern bool gSkVMJITViaDylib;
85
86 #include "src/utils/SkBlitterTraceCommon.h"
87 SK_BLITTER_TRACE_INIT
88
89 #ifndef SK_BUILD_FOR_WIN
90 #include <unistd.h>
91
92 #endif
93
94 #include "include/gpu/GrDirectContext.h"
95 #include "src/gpu/ganesh/GrCaps.h"
96 #include "src/gpu/ganesh/GrDirectContextPriv.h"
97 #include "src/gpu/ganesh/SkGr.h"
98 #include "tools/gpu/GrContextFactory.h"
99
100 using sk_gpu_test::ContextInfo;
101 using sk_gpu_test::GrContextFactory;
102 using sk_gpu_test::TestContext;
103
104 GrContextOptions grContextOpts;
105
106 static const int kAutoTuneLoops = 0;
107
loops_help_txt()108 static SkString loops_help_txt() {
109 SkString help;
110 help.printf("Number of times to run each bench. Set this to %d to auto-"
111 "tune for each bench. Timings are only reported when auto-tuning.",
112 kAutoTuneLoops);
113 return help;
114 }
115
to_string(int n)116 static SkString to_string(int n) {
117 SkString str;
118 str.appendS32(n);
119 return str;
120 }
121
122 static DEFINE_int(loops, kAutoTuneLoops, loops_help_txt().c_str());
123
124 static DEFINE_int(samples, 10, "Number of samples to measure for each bench.");
125 static DEFINE_int(ms, 0, "If >0, run each bench for this many ms instead of obeying --samples.");
126 static DEFINE_int(overheadLoops, 100000, "Loops to estimate timer overhead.");
127 static DEFINE_double(overheadGoal, 0.0001,
128 "Loop until timer overhead is at most this fraction of our measurments.");
129 static DEFINE_double(gpuMs, 5, "Target bench time in millseconds for GPU.");
130 static DEFINE_int(gpuFrameLag, 5,
131 "If unknown, estimated maximum number of frames GPU allows to lag.");
132
133 static DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
134 static DEFINE_int(maxCalibrationAttempts, 3,
135 "Try up to this many times to guess loops for a bench, or skip the bench.");
136 static DEFINE_int(maxLoops, 1000000, "Never run a bench more times than this.");
137 static DEFINE_string(clip, "0,0,1000,1000", "Clip for SKPs.");
138 static DEFINE_string(scales, "1.0", "Space-separated scales for SKPs.");
139 static DEFINE_string(zoom, "1.0,0",
140 "Comma-separated zoomMax,zoomPeriodMs factors for a periodic SKP zoom "
141 "function that ping-pongs between 1.0 and zoomMax.");
142 static DEFINE_bool(bbh, true, "Build a BBH for SKPs?");
143 static DEFINE_bool(loopSKP, true, "Loop SKPs like we do for micro benches?");
144 static DEFINE_int(flushEvery, 10, "Flush --outResultsFile every Nth run.");
145 static DEFINE_bool(gpuStats, false, "Print GPU stats after each gpu benchmark?");
146 static DEFINE_bool(gpuStatsDump, false, "Dump GPU stats after each benchmark to json");
147 static DEFINE_bool(dmsaaStatsDump, false, "Dump DMSAA stats after each benchmark to json");
148 static DEFINE_bool(keepAlive, false, "Print a message every so often so that we don't time out");
149 static DEFINE_bool(csv, false, "Print status in CSV format");
150 static DEFINE_string(sourceType, "",
151 "Apply usual --match rules to source type: bench, gm, skp, image, etc.");
152 static DEFINE_string(benchType, "",
153 "Apply usual --match rules to bench type: micro, recording, "
154 "piping, playback, skcodec, etc.");
155
156 static DEFINE_bool(forceRasterPipeline, false, "sets gSkForceRasterPipelineBlitter");
157 static DEFINE_bool(forceRasterPipelineHP, false, "sets gSkForceRasterPipelineBlitter and gForceHighPrecisionRasterPipeline");
158 static DEFINE_bool(skvm, false, "sets gUseSkVMBlitter");
159 static DEFINE_bool(jit, true, "JIT SkVM?");
160 static DEFINE_bool(dylib, false, "JIT via dylib (much slower compile but easier to debug/profile)");
161
162 static DEFINE_bool2(pre_log, p, false,
163 "Log before running each test. May be incomprehensible when threading");
164
165 static DEFINE_bool(cpu, true, "Run CPU-bound work?");
166 static DEFINE_bool(gpu, true, "Run GPU-bound work?");
167 static DEFINE_bool(dryRun, false,
168 "just print the tests that would be run, without actually running them.");
169 static DEFINE_string(images, "",
170 "List of images and/or directories to decode. A directory with no images"
171 " is treated as a fatal error.");
172 static DEFINE_bool(simpleCodec, false,
173 "Runs of a subset of the codec tests, always N32, Premul or Opaque");
174
175 static DEFINE_string2(match, m, nullptr,
176 "[~][^]substring[$] [...] of name to run.\n"
177 "Multiple matches may be separated by spaces.\n"
178 "~ causes a matching name to always be skipped\n"
179 "^ requires the start of the name to match\n"
180 "$ requires the end of the name to match\n"
181 "^ and $ requires an exact match\n"
182 "If a name does not match any list entry,\n"
183 "it is skipped unless some list entry starts with ~");
184
185 static DEFINE_bool2(quiet, q, false, "if true, don't print status updates.");
186 static DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver.");
187
188
189 static DEFINE_string(skps, "skps", "Directory to read skps from.");
190 static DEFINE_string(mskps, "mskps", "Directory to read mskps from.");
191 static DEFINE_string(svgs, "", "Directory to read SVGs from, or a single SVG file.");
192 static DEFINE_string(texttraces, "", "Directory to read TextBlobTrace files from.");
193
194 static DEFINE_int_2(threads, j, -1,
195 "Run threadsafe tests on a threadpool with this many extra threads, "
196 "defaulting to one extra thread per core.");
197
198 static DEFINE_string2(writePath, w, "", "If set, write bitmaps here as .pngs.");
199
200 static DEFINE_string(key, "",
201 "Space-separated key/value pairs to add to JSON identifying this builder.");
202 static DEFINE_string(properties, "",
203 "Space-separated key/value pairs to add to JSON identifying this run.");
204
205 static DEFINE_bool(purgeBetweenBenches, false,
206 "Call SkGraphics::PurgeAllCaches() between each benchmark?");
207
208 static DEFINE_bool(splitPerfettoTracesByBenchmark, true,
209 "Create separate perfetto trace files for each benchmark?\n"
210 "Will only take effect if perfetto tracing is enabled. See --trace.");
211
now_ms()212 static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
213
humanize(double ms)214 static SkString humanize(double ms) {
215 if (FLAGS_verbose) return SkStringPrintf("%" PRIu64, (uint64_t)(ms*1e6));
216 return HumanizeMs(ms);
217 }
218 #define HUMANIZE(ms) humanize(ms).c_str()
219
init(SkImageInfo info,Benchmark * bench)220 bool Target::init(SkImageInfo info, Benchmark* bench) {
221 if (Benchmark::kRaster_Backend == config.backend) {
222 this->surface = SkSurface::MakeRaster(info);
223 if (!this->surface) {
224 return false;
225 }
226 }
227 return true;
228 }
capturePixels(SkBitmap * bmp)229 bool Target::capturePixels(SkBitmap* bmp) {
230 SkCanvas* canvas = this->getCanvas();
231 if (!canvas) {
232 return false;
233 }
234 bmp->allocPixels(canvas->imageInfo());
235 if (!canvas->readPixels(*bmp, 0, 0)) {
236 SkDebugf("Can't read canvas pixels.\n");
237 return false;
238 }
239 return true;
240 }
241
242 struct GPUTarget : public Target {
GPUTargetGPUTarget243 explicit GPUTarget(const Config& c) : Target(c) {}
244 ContextInfo contextInfo;
245 std::unique_ptr<GrContextFactory> factory;
246
~GPUTargetGPUTarget247 ~GPUTarget() override {
248 // For Vulkan we need to release all our refs to the GrContext before destroy the vulkan
249 // context which happens at the end of this destructor. Thus we need to release the surface
250 // here which holds a ref to the GrContext.
251 surface.reset();
252 }
253
setupGPUTarget254 void setup() override {
255 this->contextInfo.testContext()->makeCurrent();
256 // Make sure we're done with whatever came before.
257 this->contextInfo.testContext()->finish();
258 }
endTimingGPUTarget259 void endTiming() override {
260 if (this->contextInfo.testContext()) {
261 this->contextInfo.testContext()->flushAndWaitOnSync(contextInfo.directContext());
262 }
263 }
syncCPUGPUTarget264 void syncCPU() override { this->contextInfo.testContext()->finish(); }
265
needsFrameTimingGPUTarget266 bool needsFrameTiming(int* maxFrameLag) const override {
267 if (!this->contextInfo.testContext()->getMaxGpuFrameLag(maxFrameLag)) {
268 // Frame lag is unknown.
269 *maxFrameLag = FLAGS_gpuFrameLag;
270 }
271 return true;
272 }
initGPUTarget273 bool init(SkImageInfo info, Benchmark* bench) override {
274 GrContextOptions options = grContextOpts;
275 bench->modifyGrContextOptions(&options);
276 this->factory = std::make_unique<GrContextFactory>(options);
277 SkSurfaceProps props(this->config.surfaceFlags, kRGB_H_SkPixelGeometry);
278 this->surface = SkSurface::MakeRenderTarget(
279 this->factory->get(this->config.ctxType, this->config.ctxOverrides),
280 skgpu::Budgeted::kNo,
281 info,
282 this->config.samples,
283 &props);
284 this->contextInfo =
285 this->factory->getContextInfo(this->config.ctxType, this->config.ctxOverrides);
286 if (!this->surface) {
287 return false;
288 }
289 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
290 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
291 "Timings might not be accurate.\n", this->config.name.c_str());
292 }
293 return true;
294 }
295
dumpStatsGPUTarget296 void dumpStats() override {
297 auto context = this->contextInfo.directContext();
298
299 context->priv().printCacheStats();
300 context->priv().printGpuStats();
301 context->priv().printContextStats();
302 }
303 };
304
305 #if defined(SK_GRAPHITE)
306 struct GraphiteTarget : public Target {
GraphiteTargetGraphiteTarget307 explicit GraphiteTarget(const Config& c) : Target(c) {}
308 using TestContext = skiatest::graphite::GraphiteTestContext;
309 using ContextFactory = skiatest::graphite::ContextFactory;
310
311 std::unique_ptr<ContextFactory> factory;
312
313 TestContext* testContext;
314 skgpu::graphite::Context* context;
315 std::unique_ptr<skgpu::graphite::Recorder> recorder;
316
~GraphiteTargetGraphiteTarget317 ~GraphiteTarget() override {}
318
setupGraphiteTarget319 void setup() override {}
320
endTimingGraphiteTarget321 void endTiming() override {
322 if (context && recorder) {
323 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
324 if (recording) {
325 this->testContext->submitRecordingAndWaitOnSync(this->context, recording.get());
326 }
327 }
328 }
syncCPUGraphiteTarget329 void syncCPU() override {
330 if (context && recorder) {
331 // TODO: have a way to sync work with out submitting a Recording which is currently
332 // required. Probably need to get to the point where the backend command buffers are
333 // stored on the Context and not Recordings before this is feasible.
334 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
335 if (recording) {
336 skgpu::graphite::InsertRecordingInfo info;
337 info.fRecording = recording.get();
338 this->context->insertRecording(info);
339 }
340 this->context->submit(skgpu::graphite::SyncToCpu::kYes);
341 }
342 }
343
needsFrameTimingGraphiteTarget344 bool needsFrameTiming(int* maxFrameLag) const override {
345 SkAssertResult(this->testContext->getMaxGpuFrameLag(maxFrameLag));
346 return true;
347 }
initGraphiteTarget348 bool init(SkImageInfo info, Benchmark* bench) override {
349 GrContextOptions options = grContextOpts;
350 bench->modifyGrContextOptions(&options);
351 // TODO: We should merge Ganesh and Graphite context options and then actually use the
352 // context options when we make the factory here.
353 this->factory = std::make_unique<ContextFactory>();
354
355 auto [testCtx, ctx] = this->factory->getContextInfo(this->config.ctxType);
356 if (!ctx) {
357 return false;
358 }
359 this->testContext = testCtx;
360 this->context = ctx;
361
362 this->recorder = this->context->makeRecorder(ToolUtils::CreateTestingRecorderOptions());
363 if (!this->recorder) {
364 return false;
365 }
366
367 this->surface = SkSurface::MakeGraphite(this->recorder.get(), info);
368 if (!this->surface) {
369 return false;
370 }
371 // TODO: get fence stuff working
372 #if 0
373 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
374 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
375 "Timings might not be accurate.\n", this->config.name.c_str());
376 }
377 #endif
378 return true;
379 }
380
dumpStatsGraphiteTarget381 void dumpStats() override {
382 }
383 };
384 #endif // SK_GRAPHITE
385
time(int loops,Benchmark * bench,Target * target)386 static double time(int loops, Benchmark* bench, Target* target) {
387 SkCanvas* canvas = target->getCanvas();
388 if (canvas) {
389 canvas->clear(SK_ColorWHITE);
390 }
391 bench->preDraw(canvas);
392 double start = now_ms();
393 canvas = target->beginTiming(canvas);
394
395 SK_BLITTER_TRACE_LOCAL_SETUP;
396 bench->draw(loops, canvas);
397 SK_BLITTER_TRACE_LOCAL_TEARDOWN;
398
399 target->endTiming();
400 double elapsed = now_ms() - start;
401 bench->postDraw(canvas);
402 return elapsed;
403 }
404
estimate_timer_overhead()405 static double estimate_timer_overhead() {
406 double overhead = 0;
407 for (int i = 0; i < FLAGS_overheadLoops; i++) {
408 double start = now_ms();
409 overhead += now_ms() - start;
410 }
411 return overhead / FLAGS_overheadLoops;
412 }
413
detect_forever_loops(int loops)414 static int detect_forever_loops(int loops) {
415 // look for a magic run-forever value
416 if (loops < 0) {
417 loops = SK_MaxS32;
418 }
419 return loops;
420 }
421
clamp_loops(int loops)422 static int clamp_loops(int loops) {
423 if (loops < 1) {
424 SkDebugf("ERROR: clamping loops from %d to 1. "
425 "There's probably something wrong with the bench.\n", loops);
426 return 1;
427 }
428 if (loops > FLAGS_maxLoops) {
429 SkDebugf("WARNING: clamping loops from %d to FLAGS_maxLoops, %d.\n", loops, FLAGS_maxLoops);
430 return FLAGS_maxLoops;
431 }
432 return loops;
433 }
434
write_canvas_png(Target * target,const SkString & filename)435 static bool write_canvas_png(Target* target, const SkString& filename) {
436
437 if (filename.isEmpty()) {
438 return false;
439 }
440 if (target->getCanvas() &&
441 kUnknown_SkColorType == target->getCanvas()->imageInfo().colorType()) {
442 return false;
443 }
444
445 SkBitmap bmp;
446
447 if (!target->capturePixels(&bmp)) {
448 return false;
449 }
450
451 SkString dir = SkOSPath::Dirname(filename.c_str());
452 if (!sk_mkdir(dir.c_str())) {
453 SkDebugf("Can't make dir %s.\n", dir.c_str());
454 return false;
455 }
456 SkFILEWStream stream(filename.c_str());
457 if (!stream.isValid()) {
458 SkDebugf("Can't write %s.\n", filename.c_str());
459 return false;
460 }
461 if (!SkEncodeImage(&stream, bmp, SkEncodedImageFormat::kPNG, 100)) {
462 SkDebugf("Can't encode a PNG.\n");
463 return false;
464 }
465 return true;
466 }
467
468 static int kFailedLoops = -2;
setup_cpu_bench(const double overhead,Target * target,Benchmark * bench)469 static int setup_cpu_bench(const double overhead, Target* target, Benchmark* bench) {
470 // First figure out approximately how many loops of bench it takes to make overhead negligible.
471 double bench_plus_overhead = 0.0;
472 int round = 0;
473 int loops = bench->calculateLoops(FLAGS_loops);
474 if (kAutoTuneLoops == loops) {
475 while (bench_plus_overhead < overhead) {
476 if (round++ == FLAGS_maxCalibrationAttempts) {
477 SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
478 bench->getUniqueName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
479 return kFailedLoops;
480 }
481 bench_plus_overhead = time(1, bench, target);
482 }
483 }
484
485 // Later we'll just start and stop the timer once but loop N times.
486 // We'll pick N to make timer overhead negligible:
487 //
488 // overhead
489 // ------------------------- < FLAGS_overheadGoal
490 // overhead + N * Bench Time
491 //
492 // where bench_plus_overhead ~=~ overhead + Bench Time.
493 //
494 // Doing some math, we get:
495 //
496 // (overhead / FLAGS_overheadGoal) - overhead
497 // ------------------------------------------ < N
498 // bench_plus_overhead - overhead)
499 //
500 // Luckily, this also works well in practice. :)
501 if (kAutoTuneLoops == loops) {
502 const double numer = overhead / FLAGS_overheadGoal - overhead;
503 const double denom = bench_plus_overhead - overhead;
504 loops = (int)ceil(numer / denom);
505 loops = clamp_loops(loops);
506 } else {
507 loops = detect_forever_loops(loops);
508 }
509
510 return loops;
511 }
512
setup_gpu_bench(Target * target,Benchmark * bench,int maxGpuFrameLag)513 static int setup_gpu_bench(Target* target, Benchmark* bench, int maxGpuFrameLag) {
514 // First, figure out how many loops it'll take to get a frame up to FLAGS_gpuMs.
515 int loops = bench->calculateLoops(FLAGS_loops);
516 if (kAutoTuneLoops == loops) {
517 loops = 1;
518 double elapsed = 0;
519 do {
520 if (1<<30 == loops) {
521 // We're about to wrap. Something's wrong with the bench.
522 loops = 0;
523 break;
524 }
525 loops *= 2;
526 // If the GPU lets frames lag at all, we need to make sure we're timing
527 // _this_ round, not still timing last round.
528 for (int i = 0; i < maxGpuFrameLag; i++) {
529 elapsed = time(loops, bench, target);
530 }
531 } while (elapsed < FLAGS_gpuMs);
532
533 // We've overshot at least a little. Scale back linearly.
534 loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
535 loops = clamp_loops(loops);
536
537 // Make sure we're not still timing our calibration.
538 target->syncCPU();
539 } else {
540 loops = detect_forever_loops(loops);
541 }
542 // Pretty much the same deal as the calibration: do some warmup to make
543 // sure we're timing steady-state pipelined frames.
544 for (int i = 0; i < maxGpuFrameLag; i++) {
545 time(loops, bench, target);
546 }
547
548 return loops;
549 }
550
551 #define kBogusContextType GrContextFactory::kGL_ContextType
552 #define kBogusContextOverrides GrContextFactory::ContextOverrides::kNone
553
create_config(const SkCommandLineConfig * config)554 static std::optional<Config> create_config(const SkCommandLineConfig* config) {
555 if (const auto* gpuConfig = config->asConfigGpu()) {
556 if (!FLAGS_gpu) {
557 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
558 return std::nullopt;
559 }
560
561 const auto ctxType = gpuConfig->getContextType();
562 const auto ctxOverrides = gpuConfig->getContextOverrides();
563 const auto sampleCount = gpuConfig->getSamples();
564 const auto colorType = gpuConfig->getColorType();
565 if (gpuConfig->getSurfType() != SkCommandLineConfigGpu::SurfType::kDefault) {
566 SkDebugf("This tool only supports the default surface type.");
567 return std::nullopt;
568 }
569
570 GrContextFactory factory(grContextOpts);
571 if (const auto ctx = factory.get(ctxType, ctxOverrides)) {
572 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
573 int supportedSampleCount =
574 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
575 if (sampleCount != supportedSampleCount) {
576 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
577 config->getTag().c_str(),
578 sampleCount);
579 return std::nullopt;
580 }
581 } else {
582 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
583 return std::nullopt;
584 }
585
586 return Config{gpuConfig->getTag(),
587 Benchmark::kGPU_Backend,
588 colorType,
589 kPremul_SkAlphaType,
590 config->refColorSpace(),
591 sampleCount,
592 ctxType,
593 ctxOverrides,
594 gpuConfig->getSurfaceFlags()};
595 }
596 #if defined(SK_GRAPHITE)
597 if (const auto* gpuConfig = config->asConfigGraphite()) {
598 if (!FLAGS_gpu) {
599 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
600 return std::nullopt;
601 }
602
603 const auto graphiteCtxType = gpuConfig->getContextType();
604 const auto sampleCount = 1; // TODO: gpuConfig->getSamples();
605 const auto colorType = gpuConfig->getColorType();
606
607 using ContextFactory = skiatest::graphite::ContextFactory;
608
609 ContextFactory factory{};
610 auto [testContext, ctx] = factory.getContextInfo(graphiteCtxType);
611 if (ctx) {
612 // TODO: Add graphite ctx queries for supported sample count by color type.
613 #if 0
614 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
615 int supportedSampleCount =
616 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
617 if (sampleCount != supportedSampleCount) {
618 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
619 config->getTag().c_str(),
620 sampleCount);
621 return std::nullopt;
622 }
623 #else
624 if (sampleCount > 1) {
625 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
626 config->getTag().c_str(),
627 sampleCount);
628 return std::nullopt;
629 }
630 #endif
631 } else {
632 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
633 return std::nullopt;
634 }
635
636 return Config{gpuConfig->getTag(),
637 Benchmark::kGraphite_Backend,
638 colorType,
639 kPremul_SkAlphaType,
640 config->refColorSpace(),
641 sampleCount,
642 graphiteCtxType,
643 kBogusContextOverrides,
644 0};
645 }
646 #endif
647
648 #define CPU_CONFIG(name, backend, color, alpha) \
649 if (config->getBackend().equals(name)) { \
650 if (!FLAGS_cpu) { \
651 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str()); \
652 return std::nullopt; \
653 } \
654 return Config{SkString(name), \
655 Benchmark::backend, \
656 color, \
657 alpha, \
658 config->refColorSpace(), \
659 0, \
660 kBogusContextType, \
661 kBogusContextOverrides, \
662 0}; \
663 }
664
665 CPU_CONFIG("nonrendering", kNonRendering_Backend, kUnknown_SkColorType, kUnpremul_SkAlphaType)
666
667 CPU_CONFIG("a8", kRaster_Backend, kAlpha_8_SkColorType, kPremul_SkAlphaType)
668 CPU_CONFIG("565", kRaster_Backend, kRGB_565_SkColorType, kOpaque_SkAlphaType)
669 CPU_CONFIG("8888", kRaster_Backend, kN32_SkColorType, kPremul_SkAlphaType)
670 CPU_CONFIG("rgba", kRaster_Backend, kRGBA_8888_SkColorType, kPremul_SkAlphaType)
671 CPU_CONFIG("bgra", kRaster_Backend, kBGRA_8888_SkColorType, kPremul_SkAlphaType)
672 CPU_CONFIG("f16", kRaster_Backend, kRGBA_F16_SkColorType, kPremul_SkAlphaType)
673 CPU_CONFIG("srgba", kRaster_Backend, kSRGBA_8888_SkColorType, kPremul_SkAlphaType)
674
675 #undef CPU_CONFIG
676
677 SkDebugf("Unknown config '%s'.\n", config->getTag().c_str());
678 return std::nullopt;
679 }
680
681 // Append all configs that are enabled and supported.
create_configs(SkTArray<Config> * configs)682 void create_configs(SkTArray<Config>* configs) {
683 SkCommandLineConfigArray array;
684 ParseConfigs(FLAGS_config, &array);
685 for (int i = 0; i < array.size(); ++i) {
686 if (std::optional<Config> config = create_config(array[i].get())) {
687 configs->push_back(*config);
688 }
689 }
690
691 // If no just default configs were requested, then we're okay.
692 if (array.size() == 0 || FLAGS_config.size() == 0 ||
693 // Otherwise, make sure that all specified configs have been created.
694 array.size() == configs->size()) {
695 return;
696 }
697 exit(1);
698 }
699
700 // disable warning : switch statement contains default but no 'case' labels
701 #if defined _WIN32
702 #pragma warning ( push )
703 #pragma warning ( disable : 4065 )
704 #endif
705
706 // If bench is enabled for config, returns a Target* for it, otherwise nullptr.
is_enabled(Benchmark * bench,const Config & config)707 static Target* is_enabled(Benchmark* bench, const Config& config) {
708 if (!bench->isSuitableFor(config.backend)) {
709 return nullptr;
710 }
711
712 SkImageInfo info = SkImageInfo::Make(bench->getSize().fX, bench->getSize().fY,
713 config.color, config.alpha, config.colorSpace);
714
715 Target* target = nullptr;
716
717 switch (config.backend) {
718 case Benchmark::kGPU_Backend:
719 target = new GPUTarget(config);
720 break;
721 #if defined(SK_GRAPHITE)
722 case Benchmark::kGraphite_Backend:
723 target = new GraphiteTarget(config);
724 break;
725 #endif
726 default:
727 target = new Target(config);
728 break;
729 }
730
731 if (!target->init(info, bench)) {
732 delete target;
733 return nullptr;
734 }
735 return target;
736 }
737
738 #if defined _WIN32
739 #pragma warning ( pop )
740 #endif
741
742 #ifdef SK_ENABLE_ANDROID_UTILS
valid_brd_bench(sk_sp<SkData> encoded,SkColorType colorType,uint32_t sampleSize,uint32_t minOutputSize,int * width,int * height)743 static bool valid_brd_bench(sk_sp<SkData> encoded, SkColorType colorType, uint32_t sampleSize,
744 uint32_t minOutputSize, int* width, int* height) {
745 auto brd = android::skia::BitmapRegionDecoder::Make(encoded);
746 if (nullptr == brd) {
747 // This is indicates that subset decoding is not supported for a particular image format.
748 return false;
749 }
750
751 if (sampleSize * minOutputSize > (uint32_t) brd->width() || sampleSize * minOutputSize >
752 (uint32_t) brd->height()) {
753 // This indicates that the image is not large enough to decode a
754 // minOutputSize x minOutputSize subset at the given sampleSize.
755 return false;
756 }
757
758 // Set the image width and height. The calling code will use this to choose subsets to decode.
759 *width = brd->width();
760 *height = brd->height();
761 return true;
762 }
763 #endif
764
cleanup_run(Target * target)765 static void cleanup_run(Target* target) {
766 delete target;
767 }
768
collect_files(const CommandLineFlags::StringArray & paths,const char * ext,SkTArray<SkString> * list)769 static void collect_files(const CommandLineFlags::StringArray& paths,
770 const char* ext,
771 SkTArray<SkString>* list) {
772 for (int i = 0; i < paths.size(); ++i) {
773 if (SkStrEndsWith(paths[i], ext)) {
774 list->push_back(SkString(paths[i]));
775 } else {
776 SkOSFile::Iter it(paths[i], ext);
777 SkString path;
778 while (it.next(&path)) {
779 list->push_back(SkOSPath::Join(paths[i], path.c_str()));
780 }
781 }
782 }
783 }
784
785 class BenchmarkStream {
786 public:
BenchmarkStream()787 BenchmarkStream() : fBenches(BenchRegistry::Head())
788 , fGMs(skiagm::GMRegistry::Head()) {
789 collect_files(FLAGS_skps, ".skp", &fSKPs);
790 collect_files(FLAGS_mskps, ".mskp", &fMSKPs);
791 collect_files(FLAGS_svgs, ".svg", &fSVGs);
792 collect_files(FLAGS_texttraces, ".trace", &fTextBlobTraces);
793
794 if (4 != sscanf(FLAGS_clip[0], "%d,%d,%d,%d",
795 &fClip.fLeft, &fClip.fTop, &fClip.fRight, &fClip.fBottom)) {
796 SkDebugf("Can't parse %s from --clip as an SkIRect.\n", FLAGS_clip[0]);
797 exit(1);
798 }
799
800 for (int i = 0; i < FLAGS_scales.size(); i++) {
801 if (1 != sscanf(FLAGS_scales[i], "%f", &fScales.push_back())) {
802 SkDebugf("Can't parse %s from --scales as an SkScalar.\n", FLAGS_scales[i]);
803 exit(1);
804 }
805 }
806
807 if (2 != sscanf(FLAGS_zoom[0], "%f,%lf", &fZoomMax, &fZoomPeriodMs)) {
808 SkDebugf("Can't parse %s from --zoom as a zoomMax,zoomPeriodMs.\n", FLAGS_zoom[0]);
809 exit(1);
810 }
811
812 // Prepare the images for decoding
813 if (!CommonFlags::CollectImages(FLAGS_images, &fImages)) {
814 exit(1);
815 }
816
817 // Choose the candidate color types for image decoding
818 fColorTypes.push_back(kN32_SkColorType);
819 if (!FLAGS_simpleCodec) {
820 fColorTypes.push_back(kRGB_565_SkColorType);
821 fColorTypes.push_back(kAlpha_8_SkColorType);
822 fColorTypes.push_back(kGray_8_SkColorType);
823 }
824 }
825
ReadPicture(const char * path)826 static sk_sp<SkPicture> ReadPicture(const char* path) {
827 // Not strictly necessary, as it will be checked again later,
828 // but helps to avoid a lot of pointless work if we're going to skip it.
829 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
830 return nullptr;
831 }
832
833 std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(path);
834 if (!stream) {
835 SkDebugf("Could not read %s.\n", path);
836 return nullptr;
837 }
838
839 return SkPicture::MakeFromStream(stream.get());
840 }
841
ReadMSKP(const char * path)842 static std::unique_ptr<MSKPPlayer> ReadMSKP(const char* path) {
843 // Not strictly necessary, as it will be checked again later,
844 // but helps to avoid a lot of pointless work if we're going to skip it.
845 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
846 return nullptr;
847 }
848
849 std::unique_ptr<SkStreamSeekable> stream = SkStream::MakeFromFile(path);
850 if (!stream) {
851 SkDebugf("Could not read %s.\n", path);
852 return nullptr;
853 }
854
855 return MSKPPlayer::Make(stream.get());
856 }
857
ReadSVGPicture(const char * path)858 static sk_sp<SkPicture> ReadSVGPicture(const char* path) {
859 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
860 return nullptr;
861 }
862 sk_sp<SkData> data(SkData::MakeFromFileName(path));
863 if (!data) {
864 SkDebugf("Could not read %s.\n", path);
865 return nullptr;
866 }
867
868 #if defined(SK_ENABLE_SVG)
869 SkMemoryStream stream(std::move(data));
870 sk_sp<SkSVGDOM> svgDom = SkSVGDOM::MakeFromStream(stream);
871 if (!svgDom) {
872 SkDebugf("Could not parse %s.\n", path);
873 return nullptr;
874 }
875
876 // Use the intrinsic SVG size if available, otherwise fall back to a default value.
877 static const SkSize kDefaultContainerSize = SkSize::Make(128, 128);
878 if (svgDom->containerSize().isEmpty()) {
879 svgDom->setContainerSize(kDefaultContainerSize);
880 }
881
882 SkPictureRecorder recorder;
883 svgDom->render(recorder.beginRecording(svgDom->containerSize().width(),
884 svgDom->containerSize().height()));
885 return recorder.finishRecordingAsPicture();
886 #else
887 return nullptr;
888 #endif // defined(SK_ENABLE_SVG)
889 }
890
next()891 Benchmark* next() {
892 std::unique_ptr<Benchmark> bench;
893 do {
894 bench.reset(this->rawNext());
895 if (!bench) {
896 return nullptr;
897 }
898 } while (CommandLineFlags::ShouldSkip(FLAGS_sourceType, fSourceType) ||
899 CommandLineFlags::ShouldSkip(FLAGS_benchType, fBenchType));
900 return bench.release();
901 }
902
rawNext()903 Benchmark* rawNext() {
904 if (fBenches) {
905 Benchmark* bench = fBenches->get()(nullptr);
906 fBenches = fBenches->next();
907 fSourceType = "bench";
908 fBenchType = "micro";
909 return bench;
910 }
911
912 while (fGMs) {
913 std::unique_ptr<skiagm::GM> gm = fGMs->get()();
914 fGMs = fGMs->next();
915 if (gm->runAsBench()) {
916 fSourceType = "gm";
917 fBenchType = "micro";
918 return new GMBench(std::move(gm));
919 }
920 }
921
922 while (fCurrentTextBlobTrace < fTextBlobTraces.size()) {
923 SkString path = fTextBlobTraces[fCurrentTextBlobTrace++];
924 SkString basename = SkOSPath::Basename(path.c_str());
925 static constexpr char kEnding[] = ".trace";
926 if (basename.endsWith(kEnding)) {
927 basename.remove(basename.size() - strlen(kEnding), strlen(kEnding));
928 }
929 fSourceType = "texttrace";
930 fBenchType = "micro";
931 return CreateDiffCanvasBench(
932 SkStringPrintf("SkDiffBench-%s", basename.c_str()),
933 [path](){ return SkStream::MakeFromFile(path.c_str()); });
934 }
935
936 // First add all .skps as RecordingBenches.
937 while (fCurrentRecording < fSKPs.size()) {
938 const SkString& path = fSKPs[fCurrentRecording++];
939 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
940 if (!pic) {
941 continue;
942 }
943 SkString name = SkOSPath::Basename(path.c_str());
944 fSourceType = "skp";
945 fBenchType = "recording";
946 fSKPBytes = static_cast<double>(pic->approximateBytesUsed());
947 fSKPOps = pic->approximateOpCount();
948 return new RecordingBench(name.c_str(), pic.get(), FLAGS_bbh);
949 }
950
951 // Add all .skps as DeserializePictureBenchs.
952 while (fCurrentDeserialPicture < fSKPs.size()) {
953 const SkString& path = fSKPs[fCurrentDeserialPicture++];
954 sk_sp<SkData> data = SkData::MakeFromFileName(path.c_str());
955 if (!data) {
956 continue;
957 }
958 SkString name = SkOSPath::Basename(path.c_str());
959 fSourceType = "skp";
960 fBenchType = "deserial";
961 fSKPBytes = static_cast<double>(data->size());
962 fSKPOps = 0;
963 return new DeserializePictureBench(name.c_str(), std::move(data));
964 }
965
966 // Then once each for each scale as SKPBenches (playback).
967 while (fCurrentScale < fScales.size()) {
968 while (fCurrentSKP < fSKPs.size()) {
969 const SkString& path = fSKPs[fCurrentSKP++];
970 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
971 if (!pic) {
972 continue;
973 }
974
975 if (FLAGS_bbh) {
976 // The SKP we read off disk doesn't have a BBH. Re-record so it grows one.
977 SkRTreeFactory factory;
978 SkPictureRecorder recorder;
979 pic->playback(recorder.beginRecording(pic->cullRect().width(),
980 pic->cullRect().height(),
981 &factory));
982 pic = recorder.finishRecordingAsPicture();
983 }
984 SkString name = SkOSPath::Basename(path.c_str());
985 fSourceType = "skp";
986 fBenchType = "playback";
987 return new SKPBench(name.c_str(), pic.get(), fClip, fScales[fCurrentScale],
988 FLAGS_loopSKP);
989 }
990
991 while (fCurrentSVG < fSVGs.size()) {
992 const char* path = fSVGs[fCurrentSVG++].c_str();
993 if (sk_sp<SkPicture> pic = ReadSVGPicture(path)) {
994 fSourceType = "svg";
995 fBenchType = "playback";
996 return new SKPBench(SkOSPath::Basename(path).c_str(), pic.get(), fClip,
997 fScales[fCurrentScale], FLAGS_loopSKP);
998 }
999 }
1000
1001 fCurrentSKP = 0;
1002 fCurrentSVG = 0;
1003 fCurrentScale++;
1004 }
1005
1006 // Now loop over each skp again if we have an animation
1007 if (fZoomMax != 1.0f && fZoomPeriodMs > 0) {
1008 while (fCurrentAnimSKP < fSKPs.size()) {
1009 const SkString& path = fSKPs[fCurrentAnimSKP];
1010 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
1011 if (!pic) {
1012 fCurrentAnimSKP++;
1013 continue;
1014 }
1015
1016 fCurrentAnimSKP++;
1017 SkString name = SkOSPath::Basename(path.c_str());
1018 sk_sp<SKPAnimationBench::Animation> animation =
1019 SKPAnimationBench::MakeZoomAnimation(fZoomMax, fZoomPeriodMs);
1020 return new SKPAnimationBench(name.c_str(), pic.get(), fClip, std::move(animation),
1021 FLAGS_loopSKP);
1022 }
1023 }
1024
1025 // Read all MSKPs as benches
1026 while (fCurrentMSKP < fMSKPs.size()) {
1027 const SkString& path = fMSKPs[fCurrentMSKP++];
1028 std::unique_ptr<MSKPPlayer> player = ReadMSKP(path.c_str());
1029 if (!player) {
1030 continue;
1031 }
1032 SkString name = SkOSPath::Basename(path.c_str());
1033 fSourceType = "mskp";
1034 fBenchType = "mskp";
1035 return new MSKPBench(std::move(name), std::move(player));
1036 }
1037
1038 for (; fCurrentCodec < fImages.size(); fCurrentCodec++) {
1039 fSourceType = "image";
1040 fBenchType = "skcodec";
1041 const SkString& path = fImages[fCurrentCodec];
1042 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1043 continue;
1044 }
1045 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1046 std::unique_ptr<SkCodec> codec(SkCodec::MakeFromData(encoded));
1047 if (!codec) {
1048 // Nothing to time.
1049 SkDebugf("Cannot find codec for %s\n", path.c_str());
1050 continue;
1051 }
1052
1053 while (fCurrentColorType < fColorTypes.size()) {
1054 const SkColorType colorType = fColorTypes[fCurrentColorType];
1055
1056 SkAlphaType alphaType = codec->getInfo().alphaType();
1057 if (FLAGS_simpleCodec) {
1058 if (kUnpremul_SkAlphaType == alphaType) {
1059 alphaType = kPremul_SkAlphaType;
1060 }
1061
1062 fCurrentColorType++;
1063 } else {
1064 switch (alphaType) {
1065 case kOpaque_SkAlphaType:
1066 // We only need to test one alpha type (opaque).
1067 fCurrentColorType++;
1068 break;
1069 case kUnpremul_SkAlphaType:
1070 case kPremul_SkAlphaType:
1071 if (0 == fCurrentAlphaType) {
1072 // Test unpremul first.
1073 alphaType = kUnpremul_SkAlphaType;
1074 fCurrentAlphaType++;
1075 } else {
1076 // Test premul.
1077 alphaType = kPremul_SkAlphaType;
1078 fCurrentAlphaType = 0;
1079 fCurrentColorType++;
1080 }
1081 break;
1082 default:
1083 SkASSERT(false);
1084 fCurrentColorType++;
1085 break;
1086 }
1087 }
1088
1089 // Make sure we can decode to this color type and alpha type.
1090 SkImageInfo info =
1091 codec->getInfo().makeColorType(colorType).makeAlphaType(alphaType);
1092 const size_t rowBytes = info.minRowBytes();
1093 SkAutoMalloc storage(info.computeByteSize(rowBytes));
1094
1095 const SkCodec::Result result = codec->getPixels(
1096 info, storage.get(), rowBytes);
1097 switch (result) {
1098 case SkCodec::kSuccess:
1099 case SkCodec::kIncompleteInput:
1100 return new CodecBench(SkOSPath::Basename(path.c_str()),
1101 encoded.get(), colorType, alphaType);
1102 case SkCodec::kInvalidConversion:
1103 // This is okay. Not all conversions are valid.
1104 break;
1105 default:
1106 // This represents some sort of failure.
1107 SkASSERT(false);
1108 break;
1109 }
1110 }
1111 fCurrentColorType = 0;
1112 }
1113
1114 // Run AndroidCodecBenches
1115 const int sampleSizes[] = { 2, 4, 8 };
1116 for (; fCurrentAndroidCodec < fImages.size(); fCurrentAndroidCodec++) {
1117 fSourceType = "image";
1118 fBenchType = "skandroidcodec";
1119
1120 const SkString& path = fImages[fCurrentAndroidCodec];
1121 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1122 continue;
1123 }
1124 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1125 std::unique_ptr<SkAndroidCodec> codec(SkAndroidCodec::MakeFromData(encoded));
1126 if (!codec) {
1127 // Nothing to time.
1128 SkDebugf("Cannot find codec for %s\n", path.c_str());
1129 continue;
1130 }
1131
1132 while (fCurrentSampleSize < (int) std::size(sampleSizes)) {
1133 int sampleSize = sampleSizes[fCurrentSampleSize];
1134 fCurrentSampleSize++;
1135 if (10 * sampleSize > std::min(codec->getInfo().width(), codec->getInfo().height())) {
1136 // Avoid benchmarking scaled decodes of already small images.
1137 break;
1138 }
1139
1140 return new AndroidCodecBench(SkOSPath::Basename(path.c_str()),
1141 encoded.get(), sampleSize);
1142 }
1143 fCurrentSampleSize = 0;
1144 }
1145
1146 #ifdef SK_ENABLE_ANDROID_UTILS
1147 // Run the BRDBenches
1148 // We intend to create benchmarks that model the use cases in
1149 // android/libraries/social/tiledimage. In this library, an image is decoded in 512x512
1150 // tiles. The image can be translated freely, so the location of a tile may be anywhere in
1151 // the image. For that reason, we will benchmark decodes in five representative locations
1152 // in the image. Additionally, this use case utilizes power of two scaling, so we will
1153 // test on power of two sample sizes. The output tile is always 512x512, so, when a
1154 // sampleSize is used, the size of the subset that is decoded is always
1155 // (sampleSize*512)x(sampleSize*512).
1156 // There are a few good reasons to only test on power of two sample sizes at this time:
1157 // All use cases we are aware of only scale by powers of two.
1158 // PNG decodes use the indicated sampling strategy regardless of the sample size, so
1159 // these tests are sufficient to provide good coverage of our scaling options.
1160 const uint32_t brdSampleSizes[] = { 1, 2, 4, 8, 16 };
1161 const uint32_t minOutputSize = 512;
1162 for (; fCurrentBRDImage < fImages.size(); fCurrentBRDImage++) {
1163 fSourceType = "image";
1164 fBenchType = "BRD";
1165
1166 const SkString& path = fImages[fCurrentBRDImage];
1167 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1168 continue;
1169 }
1170
1171 while (fCurrentColorType < fColorTypes.size()) {
1172 while (fCurrentSampleSize < (int) std::size(brdSampleSizes)) {
1173 while (fCurrentSubsetType <= kLastSingle_SubsetType) {
1174
1175 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1176 const SkColorType colorType = fColorTypes[fCurrentColorType];
1177 uint32_t sampleSize = brdSampleSizes[fCurrentSampleSize];
1178 int currentSubsetType = fCurrentSubsetType++;
1179
1180 int width = 0;
1181 int height = 0;
1182 if (!valid_brd_bench(encoded, colorType, sampleSize, minOutputSize,
1183 &width, &height)) {
1184 break;
1185 }
1186
1187 SkString basename = SkOSPath::Basename(path.c_str());
1188 SkIRect subset;
1189 const uint32_t subsetSize = sampleSize * minOutputSize;
1190 switch (currentSubsetType) {
1191 case kTopLeft_SubsetType:
1192 basename.append("_TopLeft");
1193 subset = SkIRect::MakeXYWH(0, 0, subsetSize, subsetSize);
1194 break;
1195 case kTopRight_SubsetType:
1196 basename.append("_TopRight");
1197 subset = SkIRect::MakeXYWH(width - subsetSize, 0, subsetSize,
1198 subsetSize);
1199 break;
1200 case kMiddle_SubsetType:
1201 basename.append("_Middle");
1202 subset = SkIRect::MakeXYWH((width - subsetSize) / 2,
1203 (height - subsetSize) / 2, subsetSize, subsetSize);
1204 break;
1205 case kBottomLeft_SubsetType:
1206 basename.append("_BottomLeft");
1207 subset = SkIRect::MakeXYWH(0, height - subsetSize, subsetSize,
1208 subsetSize);
1209 break;
1210 case kBottomRight_SubsetType:
1211 basename.append("_BottomRight");
1212 subset = SkIRect::MakeXYWH(width - subsetSize,
1213 height - subsetSize, subsetSize, subsetSize);
1214 break;
1215 default:
1216 SkASSERT(false);
1217 }
1218
1219 return new BitmapRegionDecoderBench(basename.c_str(), encoded.get(),
1220 colorType, sampleSize, subset);
1221 }
1222 fCurrentSubsetType = 0;
1223 fCurrentSampleSize++;
1224 }
1225 fCurrentSampleSize = 0;
1226 fCurrentColorType++;
1227 }
1228 fCurrentColorType = 0;
1229 }
1230 #endif // SK_ENABLE_ANDROID_UTILS
1231
1232 return nullptr;
1233 }
1234
fillCurrentOptions(NanoJSONResultsWriter & log) const1235 void fillCurrentOptions(NanoJSONResultsWriter& log) const {
1236 log.appendCString("source_type", fSourceType);
1237 log.appendCString("bench_type", fBenchType);
1238 if (0 == strcmp(fSourceType, "skp")) {
1239 log.appendString("clip",
1240 SkStringPrintf("%d %d %d %d", fClip.fLeft, fClip.fTop,
1241 fClip.fRight, fClip.fBottom));
1242 SkASSERT_RELEASE(fCurrentScale < fScales.size()); // debugging paranoia
1243 log.appendString("scale", SkStringPrintf("%.2g", fScales[fCurrentScale]));
1244 }
1245 }
1246
fillCurrentMetrics(NanoJSONResultsWriter & log) const1247 void fillCurrentMetrics(NanoJSONResultsWriter& log) const {
1248 if (0 == strcmp(fBenchType, "recording")) {
1249 log.appendMetric("bytes", fSKPBytes);
1250 log.appendMetric("ops", fSKPOps);
1251 }
1252 }
1253
1254 private:
1255 #ifdef SK_ENABLE_ANDROID_UTILS
1256 enum SubsetType {
1257 kTopLeft_SubsetType = 0,
1258 kTopRight_SubsetType = 1,
1259 kMiddle_SubsetType = 2,
1260 kBottomLeft_SubsetType = 3,
1261 kBottomRight_SubsetType = 4,
1262 kTranslate_SubsetType = 5,
1263 kZoom_SubsetType = 6,
1264 kLast_SubsetType = kZoom_SubsetType,
1265 kLastSingle_SubsetType = kBottomRight_SubsetType,
1266 };
1267 #endif
1268
1269 const BenchRegistry* fBenches;
1270 const skiagm::GMRegistry* fGMs;
1271 SkIRect fClip;
1272 SkTArray<SkScalar> fScales;
1273 SkTArray<SkString> fSKPs;
1274 SkTArray<SkString> fMSKPs;
1275 SkTArray<SkString> fSVGs;
1276 SkTArray<SkString> fTextBlobTraces;
1277 SkTArray<SkString> fImages;
1278 SkTArray<SkColorType, true> fColorTypes;
1279 SkScalar fZoomMax;
1280 double fZoomPeriodMs;
1281
1282 double fSKPBytes, fSKPOps;
1283
1284 const char* fSourceType; // What we're benching: bench, GM, SKP, ...
1285 const char* fBenchType; // How we bench it: micro, recording, playback, ...
1286 int fCurrentRecording = 0;
1287 int fCurrentDeserialPicture = 0;
1288 int fCurrentMSKP = 0;
1289 int fCurrentScale = 0;
1290 int fCurrentSKP = 0;
1291 int fCurrentSVG = 0;
1292 int fCurrentTextBlobTrace = 0;
1293 int fCurrentCodec = 0;
1294 int fCurrentAndroidCodec = 0;
1295 #ifdef SK_ENABLE_ANDROID_UTILS
1296 int fCurrentBRDImage = 0;
1297 int fCurrentSubsetType = 0;
1298 #endif
1299 int fCurrentColorType = 0;
1300 int fCurrentAlphaType = 0;
1301 int fCurrentSampleSize = 0;
1302 int fCurrentAnimSKP = 0;
1303 };
1304
1305 // Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
1306 // This prints something every once in a while so that it knows we're still working.
start_keepalive()1307 static void start_keepalive() {
1308 static std::thread* intentionallyLeaked = new std::thread([]{
1309 for (;;) {
1310 static const int kSec = 1200;
1311 #if defined(SK_BUILD_FOR_WIN)
1312 Sleep(kSec * 1000);
1313 #else
1314 sleep(kSec);
1315 #endif
1316 SkDebugf("\nBenchmarks still running...\n");
1317 }
1318 });
1319 (void)intentionallyLeaked;
1320 SK_INTENTIONALLY_LEAKED(intentionallyLeaked);
1321 }
1322
1323 class NanobenchShaderErrorHandler : public GrContextOptions::ShaderErrorHandler {
compileError(const char * shader,const char * errors)1324 void compileError(const char* shader, const char* errors) override {
1325 // Nanobench should abort if any shader can't compile. Failure is much better than
1326 // reporting meaningless performance metrics.
1327 std::string message = SkShaderUtils::BuildShaderErrorMessage(shader, errors);
1328 SK_ABORT("\n%s", message.c_str());
1329 }
1330 };
1331
main(int argc,char ** argv)1332 int main(int argc, char** argv) {
1333 CommandLineFlags::Parse(argc, argv);
1334
1335 initializeEventTracingForTools();
1336
1337 #if defined(SK_BUILD_FOR_IOS)
1338 cd_Documents();
1339 #endif
1340 SetupCrashHandler();
1341 SkAutoGraphics ag;
1342 SkTaskGroup::Enabler enabled(FLAGS_threads);
1343
1344 CommonFlags::SetCtxOptions(&grContextOpts);
1345
1346 NanobenchShaderErrorHandler errorHandler;
1347 grContextOpts.fShaderErrorHandler = &errorHandler;
1348
1349 if (kAutoTuneLoops != FLAGS_loops) {
1350 FLAGS_samples = 1;
1351 FLAGS_gpuFrameLag = 0;
1352 }
1353
1354 if (!FLAGS_writePath.isEmpty()) {
1355 SkDebugf("Writing files to %s.\n", FLAGS_writePath[0]);
1356 if (!sk_mkdir(FLAGS_writePath[0])) {
1357 SkDebugf("Could not create %s. Files won't be written.\n", FLAGS_writePath[0]);
1358 FLAGS_writePath.set(0, nullptr);
1359 }
1360 }
1361
1362 std::unique_ptr<SkWStream> logStream(new SkNullWStream);
1363 if (!FLAGS_outResultsFile.isEmpty()) {
1364 #if defined(SK_RELEASE)
1365 logStream.reset(new SkFILEWStream(FLAGS_outResultsFile[0]));
1366 #else
1367 SkDebugf("I'm ignoring --outResultsFile because this is a Debug build.");
1368 return 1;
1369 #endif
1370 }
1371 NanoJSONResultsWriter log(logStream.get(), SkJSONWriter::Mode::kPretty);
1372 log.beginObject(); // root
1373
1374 if (1 == FLAGS_properties.size() % 2) {
1375 SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
1376 return 1;
1377 }
1378 for (int i = 1; i < FLAGS_properties.size(); i += 2) {
1379 log.appendCString(FLAGS_properties[i-1], FLAGS_properties[i]);
1380 }
1381
1382 if (1 == FLAGS_key.size() % 2) {
1383 SkDebugf("ERROR: --key must be passed with an even number of arguments.\n");
1384 return 1;
1385 }
1386 if (FLAGS_key.size()) {
1387 log.beginObject("key");
1388 for (int i = 1; i < FLAGS_key.size(); i += 2) {
1389 log.appendCString(FLAGS_key[i - 1], FLAGS_key[i]);
1390 }
1391 log.endObject(); // key
1392 }
1393
1394 const double overhead = estimate_timer_overhead();
1395 if (!FLAGS_quiet && !FLAGS_csv) {
1396 SkDebugf("Timer overhead: %s\n", HUMANIZE(overhead));
1397 }
1398
1399 SkTArray<double> samples;
1400
1401 if (kAutoTuneLoops != FLAGS_loops) {
1402 SkDebugf("Fixed number of loops; times would only be misleading so we won't print them.\n");
1403 } else if (FLAGS_quiet) {
1404 SkDebugf("! -> high variance, ? -> moderate variance\n");
1405 SkDebugf(" micros \tbench\n");
1406 } else if (FLAGS_csv) {
1407 SkDebugf("min,median,mean,max,stddev,config,bench\n");
1408 } else if (FLAGS_ms) {
1409 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\tsamples\tconfig\tbench\n");
1410 } else {
1411 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\t%-*s\tconfig\tbench\n",
1412 FLAGS_samples, "samples");
1413 }
1414
1415 GrRecordingContextPriv::DMSAAStats combinedDMSAAStats;
1416
1417 SkTArray<Config> configs;
1418 create_configs(&configs);
1419
1420 if (FLAGS_keepAlive) {
1421 start_keepalive();
1422 }
1423
1424 CommonFlags::SetAnalyticAA();
1425
1426 gSkForceRasterPipelineBlitter = FLAGS_forceRasterPipelineHP || FLAGS_forceRasterPipeline;
1427 gForceHighPrecisionRasterPipeline = FLAGS_forceRasterPipelineHP;
1428 gUseSkVMBlitter = FLAGS_skvm;
1429 gSkVMAllowJIT = FLAGS_jit;
1430 gSkVMJITViaDylib = FLAGS_dylib;
1431
1432 // The SkSL memory benchmark must run before any GPU painting occurs. SkSL allocates memory for
1433 // its modules the first time they are accessed, and this test is trying to measure the size of
1434 // those allocations. If a paint has already occurred, some modules will have already been
1435 // loaded, so we won't be able to capture a delta for them.
1436 log.beginObject("results");
1437 RunSkSLModuleBenchmarks(&log);
1438
1439 int runs = 0;
1440 BenchmarkStream benchStream;
1441 AutoreleasePool pool;
1442 while (Benchmark* b = benchStream.next()) {
1443 std::unique_ptr<Benchmark> bench(b);
1444 if (CommandLineFlags::ShouldSkip(FLAGS_match, bench->getUniqueName())) {
1445 continue;
1446 }
1447
1448 if (!configs.empty()) {
1449 log.beginBench(bench->getUniqueName(), bench->getSize().fX, bench->getSize().fY);
1450 bench->delayedSetup();
1451 }
1452 for (int i = 0; i < configs.size(); ++i) {
1453 Target* target = is_enabled(b, configs[i]);
1454 if (!target) {
1455 continue;
1456 }
1457
1458 // During HWUI output this canvas may be nullptr.
1459 SkCanvas* canvas = target->getCanvas();
1460 const char* config = target->config.name.c_str();
1461
1462 if (FLAGS_pre_log || FLAGS_dryRun) {
1463 SkDebugf("Running %s\t%s\n"
1464 , bench->getUniqueName()
1465 , config);
1466 if (FLAGS_dryRun) {
1467 continue;
1468 }
1469 }
1470
1471 if (FLAGS_purgeBetweenBenches) {
1472 SkGraphics::PurgeAllCaches();
1473 }
1474
1475 if (FLAGS_splitPerfettoTracesByBenchmark) {
1476 TRACE_EVENT_API_NEW_TRACE_SECTION(TRACE_STR_COPY(bench->getUniqueName()));
1477 }
1478 TRACE_EVENT2("skia", "Benchmark", "name", TRACE_STR_COPY(bench->getUniqueName()),
1479 "config", TRACE_STR_COPY(config));
1480
1481 target->setup();
1482 bench->perCanvasPreDraw(canvas);
1483
1484 int maxFrameLag;
1485 int loops = target->needsFrameTiming(&maxFrameLag)
1486 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
1487 : setup_cpu_bench(overhead, target, bench.get());
1488
1489 if (kFailedLoops == loops) {
1490 // Can't be timed. A warning note has already been printed.
1491 cleanup_run(target);
1492 continue;
1493 }
1494
1495 if (runs == 0 && FLAGS_ms < 1000) {
1496 // Run the first bench for 1000ms to warm up the nanobench if FLAGS_ms < 1000.
1497 // Otherwise, the first few benches' measurements will be inaccurate.
1498 auto stop = now_ms() + 1000;
1499 do {
1500 time(loops, bench.get(), target);
1501 pool.drain();
1502 } while (now_ms() < stop);
1503 }
1504
1505 if (FLAGS_ms) {
1506 samples.clear();
1507 auto stop = now_ms() + FLAGS_ms;
1508 do {
1509 samples.push_back(time(loops, bench.get(), target) / loops);
1510 pool.drain();
1511 } while (now_ms() < stop);
1512 } else {
1513 samples.reset(FLAGS_samples);
1514 for (int s = 0; s < FLAGS_samples; s++) {
1515 samples[s] = time(loops, bench.get(), target) / loops;
1516 pool.drain();
1517 }
1518 }
1519
1520 // Scale each result to the benchmark's own units, time/unit.
1521 for (double& sample : samples) {
1522 sample *= (1.0 / bench->getUnits());
1523 }
1524
1525 SkTArray<SkString> keys;
1526 SkTArray<double> values;
1527 if (configs[i].backend == Benchmark::kGPU_Backend) {
1528 if (FLAGS_gpuStatsDump) {
1529 // TODO cache stats
1530 bench->getGpuStats(canvas, &keys, &values);
1531 }
1532 if (FLAGS_dmsaaStatsDump && bench->getDMSAAStats(canvas->recordingContext())) {
1533 const auto& dmsaaStats = canvas->recordingContext()->priv().dmsaaStats();
1534 dmsaaStats.dumpKeyValuePairs(&keys, &values);
1535 dmsaaStats.dump();
1536 combinedDMSAAStats.merge(dmsaaStats);
1537 }
1538 }
1539
1540 bench->perCanvasPostDraw(canvas);
1541
1542 if (Benchmark::kNonRendering_Backend != target->config.backend &&
1543 !FLAGS_writePath.isEmpty() && FLAGS_writePath[0]) {
1544 SkString pngFilename = SkOSPath::Join(FLAGS_writePath[0], config);
1545 pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getUniqueName());
1546 pngFilename.append(".png");
1547 write_canvas_png(target, pngFilename);
1548 }
1549
1550 // Building stats.plot often shows up in profiles,
1551 // so skip building it when we're not going to print it anyway.
1552 const bool want_plot = !FLAGS_quiet && !FLAGS_ms;
1553
1554 Stats stats(samples, want_plot);
1555 log.beginObject(config);
1556
1557 log.beginObject("options");
1558 log.appendCString("name", bench->getName());
1559 benchStream.fillCurrentOptions(log);
1560 log.endObject(); // options
1561
1562 // Metrics
1563 log.appendMetric("min_ms", stats.min);
1564 log.appendMetric("min_ratio", sk_ieee_double_divide(stats.median, stats.min));
1565 log.beginArray("samples");
1566 for (double sample : samples) {
1567 log.appendDoubleDigits(sample, 16);
1568 }
1569 log.endArray(); // samples
1570 benchStream.fillCurrentMetrics(log);
1571 if (!keys.empty()) {
1572 // dump to json, only SKPBench currently returns valid keys / values
1573 SkASSERT(keys.size() == values.size());
1574 for (int j = 0; j < keys.size(); j++) {
1575 log.appendMetric(keys[j].c_str(), values[j]);
1576 }
1577 }
1578
1579 log.endObject(); // config
1580
1581 if (runs++ % FLAGS_flushEvery == 0) {
1582 log.flush();
1583 }
1584
1585 if (kAutoTuneLoops != FLAGS_loops) {
1586 if (configs.size() == 1) {
1587 config = ""; // Only print the config if we run the same bench on more than one.
1588 }
1589 SkDebugf("%4d/%-4dMB\t%s\t%s "
1590 , sk_tools::getCurrResidentSetSizeMB()
1591 , sk_tools::getMaxResidentSetSizeMB()
1592 , bench->getUniqueName()
1593 , config);
1594 SK_BLITTER_TRACE_PRINT;
1595 SkDebugf("\n");
1596 } else if (FLAGS_quiet) {
1597 const char* mark = " ";
1598 const double stddev_percent =
1599 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1600 if (stddev_percent > 5) mark = "?";
1601 if (stddev_percent > 10) mark = "!";
1602
1603 SkDebugf("%10.2f %s\t%s\t%s\n",
1604 stats.median*1e3, mark, bench->getUniqueName(), config);
1605 } else if (FLAGS_csv) {
1606 const double stddev_percent =
1607 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1608 SkDebugf("%g,%g,%g,%g,%g,%s,%s\n"
1609 , stats.min
1610 , stats.median
1611 , stats.mean
1612 , stats.max
1613 , stddev_percent
1614 , config
1615 , bench->getUniqueName()
1616 );
1617 } else {
1618 const double stddev_percent =
1619 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1620 SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n"
1621 , sk_tools::getCurrResidentSetSizeMB()
1622 , sk_tools::getMaxResidentSetSizeMB()
1623 , loops
1624 , HUMANIZE(stats.min)
1625 , HUMANIZE(stats.median)
1626 , HUMANIZE(stats.mean)
1627 , HUMANIZE(stats.max)
1628 , stddev_percent
1629 , FLAGS_ms ? to_string(samples.size()).c_str() : stats.plot.c_str()
1630 , config
1631 , bench->getUniqueName()
1632 );
1633 }
1634
1635 if (FLAGS_gpuStats && Benchmark::kGPU_Backend == configs[i].backend) {
1636 target->dumpStats();
1637 }
1638
1639 if (FLAGS_verbose) {
1640 SkDebugf("Samples: ");
1641 for (int j = 0; j < samples.size(); j++) {
1642 SkDebugf("%s ", HUMANIZE(samples[j]));
1643 }
1644 SkDebugf("%s\n", bench->getUniqueName());
1645 }
1646 cleanup_run(target);
1647 pool.drain();
1648 }
1649 if (!configs.empty()) {
1650 log.endBench();
1651 }
1652 }
1653
1654 if (FLAGS_dmsaaStatsDump) {
1655 SkDebugf("<<Total Combined DMSAA Stats>>\n");
1656 combinedDMSAAStats.dump();
1657 }
1658
1659 SkGraphics::PurgeAllCaches();
1660
1661 log.beginBench("memory_usage", 0, 0);
1662 log.beginObject("meta"); // config
1663 log.appendS32("max_rss_mb", sk_tools::getMaxResidentSetSizeMB());
1664 log.endObject(); // config
1665 log.endBench();
1666
1667 log.endObject(); // results
1668 log.endObject(); // root
1669 log.flush();
1670
1671 return 0;
1672 }
1673