1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <ctype.h>
9
10 #include "bench/nanobench.h"
11
12 #include "bench/AndroidCodecBench.h"
13 #include "bench/Benchmark.h"
14 #include "bench/CodecBench.h"
15 #include "bench/CodecBenchPriv.h"
16 #include "bench/GMBench.h"
17 #include "bench/MSKPBench.h"
18 #include "bench/RecordingBench.h"
19 #include "bench/ResultsWriter.h"
20 #include "bench/SKPAnimationBench.h"
21 #include "bench/SKPBench.h"
22 #include "bench/SkGlyphCacheBench.h"
23 #include "bench/SkSLBench.h"
24 #include "include/codec/SkAndroidCodec.h"
25 #include "include/codec/SkCodec.h"
26 #include "include/codec/SkJpegDecoder.h"
27 #include "include/codec/SkPngDecoder.h"
28 #include "include/core/SkBBHFactory.h"
29 #include "include/core/SkCanvas.h"
30 #include "include/core/SkData.h"
31 #include "include/core/SkGraphics.h"
32 #include "include/core/SkPictureRecorder.h"
33 #include "include/core/SkString.h"
34 #include "include/core/SkSurface.h"
35 #include "include/encode/SkPngEncoder.h"
36 #include "include/private/base/SkMacros.h"
37 #include "src/base/SkAutoMalloc.h"
38 #include "src/base/SkLeanWindows.h"
39 #include "src/base/SkTime.h"
40 #include "src/core/SkColorSpacePriv.h"
41 #include "src/core/SkOSFile.h"
42 #include "src/core/SkTaskGroup.h"
43 #include "src/core/SkTraceEvent.h"
44 #include "src/utils/SkJSONWriter.h"
45 #include "src/utils/SkOSPath.h"
46 #include "src/utils/SkShaderUtils.h"
47 #include "tools/AutoreleasePool.h"
48 #include "tools/CrashHandler.h"
49 #include "tools/MSKPPlayer.h"
50 #include "tools/ProcStats.h"
51 #include "tools/Stats.h"
52 #include "tools/ToolUtils.h"
53 #include "tools/flags/CommonFlags.h"
54 #include "tools/flags/CommonFlagsConfig.h"
55 #include "tools/fonts/FontToolUtils.h"
56 #include "tools/ios_utils.h"
57 #include "tools/trace/EventTracingPriv.h"
58 #include "tools/trace/SkDebugfTracer.h"
59
60 #if defined(SK_ENABLE_SVG)
61 #include "modules/skshaper/utils/FactoryHelpers.h"
62 #include "modules/svg/include/SkSVGDOM.h"
63 #include "modules/svg/include/SkSVGNode.h"
64 #endif
65
66 #ifdef SK_ENABLE_ANDROID_UTILS
67 #include "bench/BitmapRegionDecoderBench.h"
68 #include "client_utils/android/BitmapRegionDecoder.h"
69 #endif
70
71 #if defined(SK_GRAPHITE)
72 #include "include/gpu/graphite/Context.h"
73 #include "include/gpu/graphite/Recorder.h"
74 #include "include/gpu/graphite/Recording.h"
75 #include "include/gpu/graphite/Surface.h"
76 #include "tools/GpuToolUtils.h"
77 #include "tools/graphite/ContextFactory.h"
78 #include "tools/graphite/GraphiteTestContext.h"
79 #endif
80
81 #include <cinttypes>
82 #include <memory>
83 #include <optional>
84 #include <stdlib.h>
85 #include <thread>
86
87 extern bool gSkForceRasterPipelineBlitter;
88 extern bool gForceHighPrecisionRasterPipeline;
89
90 #ifndef SK_BUILD_FOR_WIN
91 #include <unistd.h>
92 #endif
93
94 #include "include/gpu/GrDirectContext.h"
95 #include "include/gpu/ganesh/SkSurfaceGanesh.h"
96 #include "src/gpu/ganesh/GrCaps.h"
97 #include "src/gpu/ganesh/GrDirectContextPriv.h"
98 #include "src/gpu/ganesh/SkGr.h"
99 #include "tools/gpu/GrContextFactory.h"
100
101 using namespace skia_private;
102
103 using sk_gpu_test::ContextInfo;
104 using sk_gpu_test::GrContextFactory;
105 using sk_gpu_test::TestContext;
106
107 GrContextOptions grContextOpts;
108
109 static const int kAutoTuneLoops = 0;
110
loops_help_txt()111 static SkString loops_help_txt() {
112 SkString help;
113 help.printf("Number of times to run each bench. Set this to %d to auto-"
114 "tune for each bench. Timings are only reported when auto-tuning.",
115 kAutoTuneLoops);
116 return help;
117 }
118
to_string(int n)119 static SkString to_string(int n) {
120 SkString str;
121 str.appendS32(n);
122 return str;
123 }
124
125 static DEFINE_int(loops, kAutoTuneLoops, loops_help_txt().c_str());
126
127 static DEFINE_int(samples, 10, "Number of samples to measure for each bench.");
128 static DEFINE_int(ms, 0, "If >0, run each bench for this many ms instead of obeying --samples.");
129 static DEFINE_int(overheadLoops, 100000, "Loops to estimate timer overhead.");
130 static DEFINE_double(overheadGoal, 0.0001,
131 "Loop until timer overhead is at most this fraction of our measurments.");
132 static DEFINE_double(gpuMs, 5, "Target bench time in millseconds for GPU.");
133 static DEFINE_int(gpuFrameLag, 5,
134 "If unknown, estimated maximum number of frames GPU allows to lag.");
135
136 static DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
137 static DEFINE_int(maxCalibrationAttempts, 3,
138 "Try up to this many times to guess loops for a bench, or skip the bench.");
139 static DEFINE_int(maxLoops, 1000000, "Never run a bench more times than this.");
140 static DEFINE_string(clip, "0,0,1000,1000", "Clip for SKPs.");
141 static DEFINE_string(scales, "1.0", "Space-separated scales for SKPs.");
142 static DEFINE_string(zoom, "1.0,0",
143 "Comma-separated zoomMax,zoomPeriodMs factors for a periodic SKP zoom "
144 "function that ping-pongs between 1.0 and zoomMax.");
145 static DEFINE_bool(bbh, true, "Build a BBH for SKPs?");
146 static DEFINE_bool(loopSKP, true, "Loop SKPs like we do for micro benches?");
147 static DEFINE_int(flushEvery, 10, "Flush --outResultsFile every Nth run.");
148 static DEFINE_bool(gpuStats, false, "Print GPU stats after each gpu benchmark?");
149 static DEFINE_bool(gpuStatsDump, false, "Dump GPU stats after each benchmark to json");
150 static DEFINE_bool(dmsaaStatsDump, false, "Dump DMSAA stats after each benchmark to json");
151 static DEFINE_bool(keepAlive, false, "Print a message every so often so that we don't time out");
152 static DEFINE_bool(csv, false, "Print status in CSV format");
153 static DEFINE_string(sourceType, "",
154 "Apply usual --match rules to source type: bench, gm, skp, image, etc.");
155 static DEFINE_string(benchType, "",
156 "Apply usual --match rules to bench type: micro, recording, "
157 "piping, playback, skcodec, etc.");
158
159 static DEFINE_bool(forceRasterPipeline, false, "sets gSkForceRasterPipelineBlitter");
160 static DEFINE_bool(forceRasterPipelineHP, false, "sets gSkForceRasterPipelineBlitter and gForceHighPrecisionRasterPipeline");
161
162 static DEFINE_bool2(pre_log, p, false,
163 "Log before running each test. May be incomprehensible when threading");
164
165 static DEFINE_bool(cpu, true, "Run CPU-bound work?");
166 static DEFINE_bool(gpu, true, "Run GPU-bound work?");
167 static DEFINE_bool(dryRun, false,
168 "just print the tests that would be run, without actually running them.");
169 static DEFINE_string(images, "",
170 "List of images and/or directories to decode. A directory with no images"
171 " is treated as a fatal error.");
172 static DEFINE_bool(simpleCodec, false,
173 "Runs of a subset of the codec tests, always N32, Premul or Opaque");
174
175 static DEFINE_string2(match, m, nullptr,
176 "[~][^]substring[$] [...] of name to run.\n"
177 "Multiple matches may be separated by spaces.\n"
178 "~ causes a matching name to always be skipped\n"
179 "^ requires the start of the name to match\n"
180 "$ requires the end of the name to match\n"
181 "^ and $ requires an exact match\n"
182 "If a name does not match any list entry,\n"
183 "it is skipped unless some list entry starts with ~");
184
185 static DEFINE_bool2(quiet, q, false, "if true, don't print status updates.");
186 static DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver.");
187
188
189 static DEFINE_string(skps, "skps", "Directory to read skps from.");
190 static DEFINE_string(mskps, "mskps", "Directory to read mskps from.");
191 static DEFINE_string(svgs, "", "Directory to read SVGs from, or a single SVG file.");
192 static DEFINE_string(texttraces, "", "Directory to read TextBlobTrace files from.");
193
194 static DEFINE_int_2(threads, j, -1,
195 "Run threadsafe tests on a threadpool with this many extra threads, "
196 "defaulting to one extra thread per core.");
197
198 static DEFINE_string2(writePath, w, "", "If set, write bitmaps here as .pngs.");
199
200 static DEFINE_string(key, "",
201 "Space-separated key/value pairs to add to JSON identifying this builder.");
202 static DEFINE_string(properties, "",
203 "Space-separated key/value pairs to add to JSON identifying this run.");
204
205 static DEFINE_bool(purgeBetweenBenches, false,
206 "Call SkGraphics::PurgeAllCaches() between each benchmark?");
207
208 static DEFINE_bool(splitPerfettoTracesByBenchmark, true,
209 "Create separate perfetto trace files for each benchmark?\n"
210 "Will only take effect if perfetto tracing is enabled. See --trace.");
211
212 static DEFINE_bool(runtimeCPUDetection, true, "Skip runtime CPU detection and optimization");
213
now_ms()214 static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
215
humanize(double ms)216 static SkString humanize(double ms) {
217 if (FLAGS_verbose) return SkStringPrintf("%" PRIu64, (uint64_t)(ms*1e6));
218 return HumanizeMs(ms);
219 }
220 #define HUMANIZE(ms) humanize(ms).c_str()
221
init(SkImageInfo info,Benchmark * bench)222 bool Target::init(SkImageInfo info, Benchmark* bench) {
223 if (Benchmark::Backend::kRaster == config.backend) {
224 this->surface = SkSurfaces::Raster(info);
225 if (!this->surface) {
226 return false;
227 }
228 }
229 return true;
230 }
capturePixels(SkBitmap * bmp)231 bool Target::capturePixels(SkBitmap* bmp) {
232 SkCanvas* canvas = this->getCanvas();
233 if (!canvas) {
234 return false;
235 }
236 bmp->allocPixels(canvas->imageInfo());
237 if (!canvas->readPixels(*bmp, 0, 0)) {
238 SkDebugf("Can't read canvas pixels.\n");
239 return false;
240 }
241 return true;
242 }
243
244 struct GPUTarget : public Target {
GPUTargetGPUTarget245 explicit GPUTarget(const Config& c) : Target(c) {}
246 ContextInfo contextInfo;
247 std::unique_ptr<GrContextFactory> factory;
248
~GPUTargetGPUTarget249 ~GPUTarget() override {
250 // For Vulkan we need to release all our refs to the GrContext before destroy the vulkan
251 // context which happens at the end of this destructor. Thus we need to release the surface
252 // here which holds a ref to the GrContext.
253 surface.reset();
254 }
255
onSetupGPUTarget256 void onSetup() override {
257 this->contextInfo.testContext()->makeCurrent();
258 }
endTimingGPUTarget259 void endTiming() override {
260 if (this->contextInfo.testContext()) {
261 this->contextInfo.testContext()->flushAndWaitOnSync(contextInfo.directContext());
262 }
263 }
submitWorkAndSyncCPUGPUTarget264 void submitWorkAndSyncCPU() override {
265 if (this->contextInfo.testContext()) {
266 this->contextInfo.testContext()->flushAndSyncCpu(contextInfo.directContext());
267 }
268 }
269
needsFrameTimingGPUTarget270 bool needsFrameTiming(int* maxFrameLag) const override {
271 if (!this->contextInfo.testContext()->getMaxGpuFrameLag(maxFrameLag)) {
272 // Frame lag is unknown.
273 *maxFrameLag = FLAGS_gpuFrameLag;
274 }
275 return true;
276 }
initGPUTarget277 bool init(SkImageInfo info, Benchmark* bench) override {
278 GrContextOptions options = grContextOpts;
279 bench->modifyGrContextOptions(&options);
280 this->factory = std::make_unique<GrContextFactory>(options);
281 SkSurfaceProps props(this->config.surfaceFlags, kRGB_H_SkPixelGeometry);
282 this->surface = SkSurfaces::RenderTarget(
283 this->factory->get(this->config.ctxType, this->config.ctxOverrides),
284 skgpu::Budgeted::kNo,
285 info,
286 this->config.samples,
287 &props);
288 this->contextInfo =
289 this->factory->getContextInfo(this->config.ctxType, this->config.ctxOverrides);
290 if (!this->surface) {
291 return false;
292 }
293 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
294 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
295 "Timings might not be accurate.\n", this->config.name.c_str());
296 }
297 return true;
298 }
299
dumpStatsGPUTarget300 void dumpStats() override {
301 auto context = this->contextInfo.directContext();
302
303 context->priv().printCacheStats();
304 context->priv().printGpuStats();
305 context->priv().printContextStats();
306 }
307 };
308
309 #if defined(SK_GRAPHITE)
310 struct GraphiteTarget : public Target {
GraphiteTargetGraphiteTarget311 explicit GraphiteTarget(const Config& c) : Target(c) {}
312 using TestContext = skiatest::graphite::GraphiteTestContext;
313 using ContextFactory = skiatest::graphite::ContextFactory;
314
315 std::unique_ptr<ContextFactory> factory;
316
317 TestContext* testContext;
318 skgpu::graphite::Context* context;
319 std::unique_ptr<skgpu::graphite::Recorder> recorder;
320
~GraphiteTargetGraphiteTarget321 ~GraphiteTarget() override {
322 // For Vulkan we need to release all our refs before we destroy the vulkan context which
323 // happens at the end of this destructor. Thus we need to release the surface here which
324 // holds a ref to the Graphite device
325 surface.reset();
326 }
327
endTimingGraphiteTarget328 void endTiming() override {
329 if (context && recorder) {
330 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
331 if (recording) {
332 this->testContext->submitRecordingAndWaitOnSync(this->context, recording.get());
333 }
334 }
335 }
submitWorkAndSyncCPUGraphiteTarget336 void submitWorkAndSyncCPU() override {
337 if (context && recorder) {
338 // TODO: have a way to sync work with out submitting a Recording which is currently
339 // required. Probably need to get to the point where the backend command buffers are
340 // stored on the Context and not Recordings before this is feasible.
341 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
342 if (recording) {
343 skgpu::graphite::InsertRecordingInfo info;
344 info.fRecording = recording.get();
345 this->context->insertRecording(info);
346 }
347 this->context->submit(skgpu::graphite::SyncToCpu::kYes);
348 }
349 }
350
needsFrameTimingGraphiteTarget351 bool needsFrameTiming(int* maxFrameLag) const override {
352 SkAssertResult(this->testContext->getMaxGpuFrameLag(maxFrameLag));
353 return true;
354 }
initGraphiteTarget355 bool init(SkImageInfo info, Benchmark* bench) override {
356 GrContextOptions options = grContextOpts;
357 bench->modifyGrContextOptions(&options);
358 // TODO: We should merge Ganesh and Graphite context options and then actually use the
359 // context options when we make the factory here.
360 this->factory = std::make_unique<ContextFactory>();
361
362 skiatest::graphite::ContextInfo ctxInfo =
363 this->factory->getContextInfo(this->config.ctxType);
364 if (!ctxInfo.fContext) {
365 return false;
366 }
367 this->testContext = ctxInfo.fTestContext;
368 this->context = ctxInfo.fContext;
369
370 this->recorder = this->context->makeRecorder(ToolUtils::CreateTestingRecorderOptions());
371 if (!this->recorder) {
372 return false;
373 }
374
375 this->surface = SkSurfaces::RenderTarget(this->recorder.get(), info);
376 if (!this->surface) {
377 return false;
378 }
379 // TODO: get fence stuff working
380 #if 0
381 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
382 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
383 "Timings might not be accurate.\n", this->config.name.c_str());
384 }
385 #endif
386 return true;
387 }
388
dumpStatsGraphiteTarget389 void dumpStats() override {
390 }
391 };
392 #endif // SK_GRAPHITE
393
time(int loops,Benchmark * bench,Target * target)394 static double time(int loops, Benchmark* bench, Target* target) {
395 SkCanvas* canvas = target->getCanvas();
396 if (canvas) {
397 canvas->clear(SK_ColorWHITE);
398 }
399 bench->preDraw(canvas);
400 double start = now_ms();
401 canvas = target->beginTiming(canvas);
402
403 bench->draw(loops, canvas);
404
405 target->endTiming();
406 double elapsed = now_ms() - start;
407 bench->postDraw(canvas);
408 return elapsed;
409 }
410
estimate_timer_overhead()411 static double estimate_timer_overhead() {
412 double overhead = 0;
413 for (int i = 0; i < FLAGS_overheadLoops; i++) {
414 double start = now_ms();
415 overhead += now_ms() - start;
416 }
417 return overhead / FLAGS_overheadLoops;
418 }
419
detect_forever_loops(int loops)420 static int detect_forever_loops(int loops) {
421 // look for a magic run-forever value
422 if (loops < 0) {
423 loops = SK_MaxS32;
424 }
425 return loops;
426 }
427
clamp_loops(int loops)428 static int clamp_loops(int loops) {
429 if (loops < 1) {
430 SkDebugf("ERROR: clamping loops from %d to 1. "
431 "There's probably something wrong with the bench.\n", loops);
432 return 1;
433 }
434 if (loops > FLAGS_maxLoops) {
435 SkDebugf("WARNING: clamping loops from %d to FLAGS_maxLoops, %d.\n", loops, FLAGS_maxLoops);
436 return FLAGS_maxLoops;
437 }
438 return loops;
439 }
440
write_canvas_png(Target * target,const SkString & filename)441 static bool write_canvas_png(Target* target, const SkString& filename) {
442
443 if (filename.isEmpty()) {
444 return false;
445 }
446 if (target->getCanvas() &&
447 kUnknown_SkColorType == target->getCanvas()->imageInfo().colorType()) {
448 return false;
449 }
450
451 SkBitmap bmp;
452
453 if (!target->capturePixels(&bmp)) {
454 return false;
455 }
456
457 SkString dir = SkOSPath::Dirname(filename.c_str());
458 if (!sk_mkdir(dir.c_str())) {
459 SkDebugf("Can't make dir %s.\n", dir.c_str());
460 return false;
461 }
462 SkFILEWStream stream(filename.c_str());
463 if (!stream.isValid()) {
464 SkDebugf("Can't write %s.\n", filename.c_str());
465 return false;
466 }
467 if (!SkPngEncoder::Encode(&stream, bmp.pixmap(), {})) {
468 SkDebugf("Can't encode a PNG.\n");
469 return false;
470 }
471 return true;
472 }
473
474 static int kFailedLoops = -2;
setup_cpu_bench(const double overhead,Target * target,Benchmark * bench)475 static int setup_cpu_bench(const double overhead, Target* target, Benchmark* bench) {
476 // First figure out approximately how many loops of bench it takes to make overhead negligible.
477 double bench_plus_overhead = 0.0;
478 int round = 0;
479 int loops = bench->shouldLoop() ? FLAGS_loops : 1;
480 if (kAutoTuneLoops == loops) {
481 while (bench_plus_overhead < overhead) {
482 if (round++ == FLAGS_maxCalibrationAttempts) {
483 SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
484 bench->getUniqueName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
485 return kFailedLoops;
486 }
487 bench_plus_overhead = time(1, bench, target);
488 }
489 }
490
491 // Later we'll just start and stop the timer once but loop N times.
492 // We'll pick N to make timer overhead negligible:
493 //
494 // overhead
495 // ------------------------- < FLAGS_overheadGoal
496 // overhead + N * Bench Time
497 //
498 // where bench_plus_overhead ~=~ overhead + Bench Time.
499 //
500 // Doing some math, we get:
501 //
502 // (overhead / FLAGS_overheadGoal) - overhead
503 // ------------------------------------------ < N
504 // bench_plus_overhead - overhead)
505 //
506 // Luckily, this also works well in practice. :)
507 if (kAutoTuneLoops == loops) {
508 const double numer = overhead / FLAGS_overheadGoal - overhead;
509 const double denom = bench_plus_overhead - overhead;
510 loops = (int)ceil(numer / denom);
511 loops = clamp_loops(loops);
512 } else {
513 loops = detect_forever_loops(loops);
514 }
515
516 return loops;
517 }
518
setup_gpu_bench(Target * target,Benchmark * bench,int maxGpuFrameLag)519 static int setup_gpu_bench(Target* target, Benchmark* bench, int maxGpuFrameLag) {
520 // First, figure out how many loops it'll take to get a frame up to FLAGS_gpuMs.
521 int loops = bench->shouldLoop() ? FLAGS_loops : 1;
522 if (kAutoTuneLoops == loops) {
523 loops = 1;
524 double elapsed = 0;
525 do {
526 if (1<<30 == loops) {
527 // We're about to wrap. Something's wrong with the bench.
528 loops = 0;
529 break;
530 }
531 loops *= 2;
532 // If the GPU lets frames lag at all, we need to make sure we're timing
533 // _this_ round, not still timing last round.
534 for (int i = 0; i < maxGpuFrameLag; i++) {
535 elapsed = time(loops, bench, target);
536 }
537 } while (elapsed < FLAGS_gpuMs);
538
539 // We've overshot at least a little. Scale back linearly.
540 loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
541 loops = clamp_loops(loops);
542
543 // Make sure we're not still timing our calibration.
544 target->submitWorkAndSyncCPU();
545 } else {
546 loops = detect_forever_loops(loops);
547 }
548 // Pretty much the same deal as the calibration: do some warmup to make
549 // sure we're timing steady-state pipelined frames.
550 for (int i = 0; i < maxGpuFrameLag; i++) {
551 time(loops, bench, target);
552 }
553
554 return loops;
555 }
556
557 #define kBogusContextType skgpu::ContextType::kGL
558 #define kBogusContextOverrides GrContextFactory::ContextOverrides::kNone
559
create_config(const SkCommandLineConfig * config)560 static std::optional<Config> create_config(const SkCommandLineConfig* config) {
561 if (const auto* gpuConfig = config->asConfigGpu()) {
562 if (!FLAGS_gpu) {
563 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
564 return std::nullopt;
565 }
566
567 const auto ctxType = gpuConfig->getContextType();
568 const auto ctxOverrides = gpuConfig->getContextOverrides();
569 const auto sampleCount = gpuConfig->getSamples();
570 const auto colorType = gpuConfig->getColorType();
571 if (gpuConfig->getSurfType() != SkCommandLineConfigGpu::SurfType::kDefault) {
572 SkDebugf("This tool only supports the default surface type.");
573 return std::nullopt;
574 }
575
576 GrContextFactory factory(grContextOpts);
577 if (const auto ctx = factory.get(ctxType, ctxOverrides)) {
578 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
579 int supportedSampleCount =
580 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
581 if (sampleCount != supportedSampleCount) {
582 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
583 config->getTag().c_str(),
584 sampleCount);
585 return std::nullopt;
586 }
587 } else {
588 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
589 return std::nullopt;
590 }
591
592 return Config{gpuConfig->getTag(),
593 Benchmark::Backend::kGanesh,
594 colorType,
595 kPremul_SkAlphaType,
596 config->refColorSpace(),
597 sampleCount,
598 ctxType,
599 ctxOverrides,
600 gpuConfig->getSurfaceFlags()};
601 }
602 #if defined(SK_GRAPHITE)
603 if (const auto* gpuConfig = config->asConfigGraphite()) {
604 if (!FLAGS_gpu) {
605 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
606 return std::nullopt;
607 }
608
609 const auto graphiteCtxType = gpuConfig->getContextType();
610 const auto sampleCount = 1; // TODO: gpuConfig->getSamples();
611 const auto colorType = gpuConfig->getColorType();
612
613 using ContextFactory = skiatest::graphite::ContextFactory;
614
615 ContextFactory factory(gpuConfig->asConfigGraphite()->getOptions());
616 skiatest::graphite::ContextInfo ctxInfo = factory.getContextInfo(graphiteCtxType);
617 skgpu::graphite::Context* ctx = ctxInfo.fContext;
618 if (ctx) {
619 // TODO: Add graphite ctx queries for supported sample count by color type.
620 #if 0
621 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
622 int supportedSampleCount =
623 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
624 if (sampleCount != supportedSampleCount) {
625 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
626 config->getTag().c_str(),
627 sampleCount);
628 return std::nullopt;
629 }
630 #else
631 if (sampleCount > 1) {
632 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
633 config->getTag().c_str(),
634 sampleCount);
635 return std::nullopt;
636 }
637 #endif
638 } else {
639 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
640 return std::nullopt;
641 }
642
643 return Config{gpuConfig->getTag(),
644 Benchmark::Backend::kGraphite,
645 colorType,
646 kPremul_SkAlphaType,
647 config->refColorSpace(),
648 sampleCount,
649 graphiteCtxType,
650 kBogusContextOverrides,
651 0};
652 }
653 #endif
654
655 #define CPU_CONFIG(name, backend, color, alpha) \
656 if (config->getBackend().equals(name)) { \
657 if (!FLAGS_cpu) { \
658 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str()); \
659 return std::nullopt; \
660 } \
661 return Config{SkString(name), \
662 Benchmark::backend, \
663 color, \
664 alpha, \
665 config->refColorSpace(), \
666 0, \
667 kBogusContextType, \
668 kBogusContextOverrides, \
669 0}; \
670 }
671
672 CPU_CONFIG("nonrendering", Backend::kNonRendering, kUnknown_SkColorType, kUnpremul_SkAlphaType)
673
674 CPU_CONFIG("a8", Backend::kRaster, kAlpha_8_SkColorType, kPremul_SkAlphaType)
675 CPU_CONFIG("565", Backend::kRaster, kRGB_565_SkColorType, kOpaque_SkAlphaType)
676 CPU_CONFIG("8888", Backend::kRaster, kN32_SkColorType, kPremul_SkAlphaType)
677 CPU_CONFIG("rgba", Backend::kRaster, kRGBA_8888_SkColorType, kPremul_SkAlphaType)
678 CPU_CONFIG("bgra", Backend::kRaster, kBGRA_8888_SkColorType, kPremul_SkAlphaType)
679 CPU_CONFIG("f16", Backend::kRaster, kRGBA_F16_SkColorType, kPremul_SkAlphaType)
680 CPU_CONFIG("srgba", Backend::kRaster, kSRGBA_8888_SkColorType, kPremul_SkAlphaType)
681
682 #undef CPU_CONFIG
683
684 SkDebugf("Unknown config '%s'.\n", config->getTag().c_str());
685 return std::nullopt;
686 }
687
688 // Append all configs that are enabled and supported.
create_configs(TArray<Config> * configs)689 void create_configs(TArray<Config>* configs) {
690 SkCommandLineConfigArray array;
691 ParseConfigs(FLAGS_config, &array);
692 for (int i = 0; i < array.size(); ++i) {
693 if (std::optional<Config> config = create_config(array[i].get())) {
694 configs->push_back(*config);
695 }
696 }
697
698 // If no just default configs were requested, then we're okay.
699 if (array.size() == 0 || FLAGS_config.size() == 0 ||
700 // Otherwise, make sure that all specified configs have been created.
701 array.size() == configs->size()) {
702 return;
703 }
704 exit(1);
705 }
706
707 // disable warning : switch statement contains default but no 'case' labels
708 #if defined _WIN32
709 #pragma warning ( push )
710 #pragma warning ( disable : 4065 )
711 #endif
712
713 // If bench is enabled for config, returns a Target* for it, otherwise nullptr.
is_enabled(Benchmark * bench,const Config & config)714 static Target* is_enabled(Benchmark* bench, const Config& config) {
715 if (!bench->isSuitableFor(config.backend)) {
716 return nullptr;
717 }
718
719 SkImageInfo info =
720 SkImageInfo::Make(bench->getSize(), config.color, config.alpha, config.colorSpace);
721
722 Target* target = nullptr;
723
724 switch (config.backend) {
725 case Benchmark::Backend::kGanesh:
726 target = new GPUTarget(config);
727 break;
728 #if defined(SK_GRAPHITE)
729 case Benchmark::Backend::kGraphite:
730 target = new GraphiteTarget(config);
731 break;
732 #endif
733 default:
734 target = new Target(config);
735 break;
736 }
737
738 if (!target->init(info, bench)) {
739 delete target;
740 return nullptr;
741 }
742 return target;
743 }
744
745 #if defined _WIN32
746 #pragma warning ( pop )
747 #endif
748
749 #ifdef SK_ENABLE_ANDROID_UTILS
valid_brd_bench(sk_sp<SkData> encoded,SkColorType colorType,uint32_t sampleSize,uint32_t minOutputSize,int * width,int * height)750 static bool valid_brd_bench(sk_sp<SkData> encoded, SkColorType colorType, uint32_t sampleSize,
751 uint32_t minOutputSize, int* width, int* height) {
752 auto brd = android::skia::BitmapRegionDecoder::Make(encoded);
753 if (nullptr == brd) {
754 // This is indicates that subset decoding is not supported for a particular image format.
755 return false;
756 }
757
758 if (sampleSize * minOutputSize > (uint32_t) brd->width() || sampleSize * minOutputSize >
759 (uint32_t) brd->height()) {
760 // This indicates that the image is not large enough to decode a
761 // minOutputSize x minOutputSize subset at the given sampleSize.
762 return false;
763 }
764
765 // Set the image width and height. The calling code will use this to choose subsets to decode.
766 *width = brd->width();
767 *height = brd->height();
768 return true;
769 }
770 #endif
771
cleanup_run(Target * target)772 static void cleanup_run(Target* target) {
773 delete target;
774 }
775
collect_files(const CommandLineFlags::StringArray & paths,const char * ext,TArray<SkString> * list)776 static void collect_files(const CommandLineFlags::StringArray& paths,
777 const char* ext,
778 TArray<SkString>* list) {
779 for (int i = 0; i < paths.size(); ++i) {
780 if (SkStrEndsWith(paths[i], ext)) {
781 list->push_back(SkString(paths[i]));
782 } else {
783 SkOSFile::Iter it(paths[i], ext);
784 SkString path;
785 while (it.next(&path)) {
786 list->push_back(SkOSPath::Join(paths[i], path.c_str()));
787 }
788 }
789 }
790 }
791
792 class BenchmarkStream {
793 public:
BenchmarkStream()794 BenchmarkStream() : fBenches(BenchRegistry::Head())
795 , fGMs(skiagm::GMRegistry::Head()) {
796 collect_files(FLAGS_skps, ".skp", &fSKPs);
797 collect_files(FLAGS_mskps, ".mskp", &fMSKPs);
798 collect_files(FLAGS_svgs, ".svg", &fSVGs);
799 collect_files(FLAGS_texttraces, ".trace", &fTextBlobTraces);
800
801 if (4 != sscanf(FLAGS_clip[0], "%d,%d,%d,%d",
802 &fClip.fLeft, &fClip.fTop, &fClip.fRight, &fClip.fBottom)) {
803 SkDebugf("Can't parse %s from --clip as an SkIRect.\n", FLAGS_clip[0]);
804 exit(1);
805 }
806
807 for (int i = 0; i < FLAGS_scales.size(); i++) {
808 if (1 != sscanf(FLAGS_scales[i], "%f", &fScales.push_back())) {
809 SkDebugf("Can't parse %s from --scales as an SkScalar.\n", FLAGS_scales[i]);
810 exit(1);
811 }
812 }
813
814 if (2 != sscanf(FLAGS_zoom[0], "%f,%lf", &fZoomMax, &fZoomPeriodMs)) {
815 SkDebugf("Can't parse %s from --zoom as a zoomMax,zoomPeriodMs.\n", FLAGS_zoom[0]);
816 exit(1);
817 }
818
819 // Prepare the images for decoding
820 if (!CommonFlags::CollectImages(FLAGS_images, &fImages)) {
821 exit(1);
822 }
823
824 // Choose the candidate color types for image decoding
825 fColorTypes.push_back(kN32_SkColorType);
826 if (!FLAGS_simpleCodec) {
827 fColorTypes.push_back(kRGB_565_SkColorType);
828 fColorTypes.push_back(kAlpha_8_SkColorType);
829 fColorTypes.push_back(kGray_8_SkColorType);
830 }
831 }
832
ReadPicture(const char * path)833 static sk_sp<SkPicture> ReadPicture(const char* path) {
834 // Not strictly necessary, as it will be checked again later,
835 // but helps to avoid a lot of pointless work if we're going to skip it.
836 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
837 return nullptr;
838 }
839
840 std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(path);
841 if (!stream) {
842 SkDebugf("Could not read %s.\n", path);
843 return nullptr;
844 }
845
846 return SkPicture::MakeFromStream(stream.get());
847 }
848
ReadMSKP(const char * path)849 static std::unique_ptr<MSKPPlayer> ReadMSKP(const char* path) {
850 // Not strictly necessary, as it will be checked again later,
851 // but helps to avoid a lot of pointless work if we're going to skip it.
852 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
853 return nullptr;
854 }
855
856 std::unique_ptr<SkStreamSeekable> stream = SkStream::MakeFromFile(path);
857 if (!stream) {
858 SkDebugf("Could not read %s.\n", path);
859 return nullptr;
860 }
861
862 return MSKPPlayer::Make(stream.get());
863 }
864
ReadSVGPicture(const char * path)865 static sk_sp<SkPicture> ReadSVGPicture(const char* path) {
866 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
867 return nullptr;
868 }
869 sk_sp<SkData> data(SkData::MakeFromFileName(path));
870 if (!data) {
871 SkDebugf("Could not read %s.\n", path);
872 return nullptr;
873 }
874
875 #if defined(SK_ENABLE_SVG)
876 SkMemoryStream stream(std::move(data));
877 sk_sp<SkSVGDOM> svgDom = SkSVGDOM::Builder()
878 .setFontManager(ToolUtils::TestFontMgr())
879 .setTextShapingFactory(SkShapers::BestAvailable())
880 .make(stream);
881 if (!svgDom) {
882 SkDebugf("Could not parse %s.\n", path);
883 return nullptr;
884 }
885
886 // Use the intrinsic SVG size if available, otherwise fall back to a default value.
887 static const SkSize kDefaultContainerSize = SkSize::Make(128, 128);
888 if (svgDom->containerSize().isEmpty()) {
889 svgDom->setContainerSize(kDefaultContainerSize);
890 }
891
892 SkPictureRecorder recorder;
893 svgDom->render(recorder.beginRecording(svgDom->containerSize().width(),
894 svgDom->containerSize().height()));
895 return recorder.finishRecordingAsPicture();
896 #else
897 return nullptr;
898 #endif // defined(SK_ENABLE_SVG)
899 }
900
next()901 Benchmark* next() {
902 std::unique_ptr<Benchmark> bench;
903 do {
904 bench.reset(this->rawNext());
905 if (!bench) {
906 return nullptr;
907 }
908 } while (CommandLineFlags::ShouldSkip(FLAGS_sourceType, fSourceType) ||
909 CommandLineFlags::ShouldSkip(FLAGS_benchType, fBenchType));
910 return bench.release();
911 }
912
rawNext()913 Benchmark* rawNext() {
914 if (fBenches) {
915 Benchmark* bench = fBenches->get()(nullptr);
916 fBenches = fBenches->next();
917 fSourceType = "bench";
918 fBenchType = "micro";
919 return bench;
920 }
921
922 while (fGMs) {
923 std::unique_ptr<skiagm::GM> gm = fGMs->get()();
924 if (gm->isBazelOnly()) {
925 // We skip Bazel-only GMs because they might not be regular GMs. The Bazel build
926 // reuses the notion of GMs to replace the notion of DM sources of various kinds,
927 // such as codec sources and image generation sources. See comments in the
928 // skiagm::GM::isBazelOnly function declaration for context.
929 continue;
930 }
931 fGMs = fGMs->next();
932 if (gm->runAsBench()) {
933 fSourceType = "gm";
934 fBenchType = "micro";
935 return new GMBench(std::move(gm));
936 }
937 }
938
939 while (fCurrentTextBlobTrace < fTextBlobTraces.size()) {
940 SkString path = fTextBlobTraces[fCurrentTextBlobTrace++];
941 SkString basename = SkOSPath::Basename(path.c_str());
942 static constexpr char kEnding[] = ".trace";
943 if (basename.endsWith(kEnding)) {
944 basename.remove(basename.size() - strlen(kEnding), strlen(kEnding));
945 }
946 fSourceType = "texttrace";
947 fBenchType = "micro";
948 return CreateDiffCanvasBench(
949 SkStringPrintf("SkDiffBench-%s", basename.c_str()),
950 [path](){ return SkStream::MakeFromFile(path.c_str()); });
951 }
952
953 // First add all .skps as RecordingBenches.
954 while (fCurrentRecording < fSKPs.size()) {
955 const SkString& path = fSKPs[fCurrentRecording++];
956 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
957 if (!pic) {
958 continue;
959 }
960 SkString name = SkOSPath::Basename(path.c_str());
961 fSourceType = "skp";
962 fBenchType = "recording";
963 fSKPBytes = static_cast<double>(pic->approximateBytesUsed());
964 fSKPOps = pic->approximateOpCount();
965 return new RecordingBench(name.c_str(), pic.get(), FLAGS_bbh);
966 }
967
968 // Add all .skps as DeserializePictureBenchs.
969 while (fCurrentDeserialPicture < fSKPs.size()) {
970 const SkString& path = fSKPs[fCurrentDeserialPicture++];
971 sk_sp<SkData> data = SkData::MakeFromFileName(path.c_str());
972 if (!data) {
973 continue;
974 }
975 SkString name = SkOSPath::Basename(path.c_str());
976 fSourceType = "skp";
977 fBenchType = "deserial";
978 fSKPBytes = static_cast<double>(data->size());
979 fSKPOps = 0;
980 return new DeserializePictureBench(name.c_str(), std::move(data));
981 }
982
983 // Then once each for each scale as SKPBenches (playback).
984 while (fCurrentScale < fScales.size()) {
985 while (fCurrentSKP < fSKPs.size()) {
986 const SkString& path = fSKPs[fCurrentSKP++];
987 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
988 if (!pic) {
989 continue;
990 }
991
992 if (FLAGS_bbh) {
993 // The SKP we read off disk doesn't have a BBH. Re-record so it grows one.
994 SkRTreeFactory factory;
995 SkPictureRecorder recorder;
996 pic->playback(recorder.beginRecording(pic->cullRect().width(),
997 pic->cullRect().height(),
998 &factory));
999 pic = recorder.finishRecordingAsPicture();
1000 }
1001 SkString name = SkOSPath::Basename(path.c_str());
1002 fSourceType = "skp";
1003 fBenchType = "playback";
1004 return new SKPBench(name.c_str(), pic.get(), fClip, fScales[fCurrentScale],
1005 FLAGS_loopSKP);
1006 }
1007
1008 while (fCurrentSVG < fSVGs.size()) {
1009 const char* path = fSVGs[fCurrentSVG++].c_str();
1010 if (sk_sp<SkPicture> pic = ReadSVGPicture(path)) {
1011 fSourceType = "svg";
1012 fBenchType = "playback";
1013 return new SKPBench(SkOSPath::Basename(path).c_str(), pic.get(), fClip,
1014 fScales[fCurrentScale], FLAGS_loopSKP);
1015 }
1016 }
1017
1018 fCurrentSKP = 0;
1019 fCurrentSVG = 0;
1020 fCurrentScale++;
1021 }
1022
1023 // Now loop over each skp again if we have an animation
1024 if (fZoomMax != 1.0f && fZoomPeriodMs > 0) {
1025 while (fCurrentAnimSKP < fSKPs.size()) {
1026 const SkString& path = fSKPs[fCurrentAnimSKP];
1027 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
1028 if (!pic) {
1029 fCurrentAnimSKP++;
1030 continue;
1031 }
1032
1033 fCurrentAnimSKP++;
1034 SkString name = SkOSPath::Basename(path.c_str());
1035 sk_sp<SKPAnimationBench::Animation> animation =
1036 SKPAnimationBench::MakeZoomAnimation(fZoomMax, fZoomPeriodMs);
1037 return new SKPAnimationBench(name.c_str(), pic.get(), fClip, std::move(animation),
1038 FLAGS_loopSKP);
1039 }
1040 }
1041
1042 // Read all MSKPs as benches
1043 while (fCurrentMSKP < fMSKPs.size()) {
1044 const SkString& path = fMSKPs[fCurrentMSKP++];
1045 std::unique_ptr<MSKPPlayer> player = ReadMSKP(path.c_str());
1046 if (!player) {
1047 continue;
1048 }
1049 SkString name = SkOSPath::Basename(path.c_str());
1050 fSourceType = "mskp";
1051 fBenchType = "mskp";
1052 return new MSKPBench(std::move(name), std::move(player));
1053 }
1054
1055 for (; fCurrentCodec < fImages.size(); fCurrentCodec++) {
1056 fSourceType = "image";
1057 fBenchType = "skcodec";
1058 const SkString& path = fImages[fCurrentCodec];
1059 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1060 continue;
1061 }
1062 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1063 std::unique_ptr<SkCodec> codec(SkCodec::MakeFromData(encoded));
1064 if (!codec) {
1065 // Nothing to time.
1066 SkDebugf("Cannot find codec for %s\n", path.c_str());
1067 continue;
1068 }
1069
1070 while (fCurrentColorType < fColorTypes.size()) {
1071 const SkColorType colorType = fColorTypes[fCurrentColorType];
1072
1073 SkAlphaType alphaType = codec->getInfo().alphaType();
1074 if (FLAGS_simpleCodec) {
1075 if (kUnpremul_SkAlphaType == alphaType) {
1076 alphaType = kPremul_SkAlphaType;
1077 }
1078
1079 fCurrentColorType++;
1080 } else {
1081 switch (alphaType) {
1082 case kOpaque_SkAlphaType:
1083 // We only need to test one alpha type (opaque).
1084 fCurrentColorType++;
1085 break;
1086 case kUnpremul_SkAlphaType:
1087 case kPremul_SkAlphaType:
1088 if (0 == fCurrentAlphaType) {
1089 // Test unpremul first.
1090 alphaType = kUnpremul_SkAlphaType;
1091 fCurrentAlphaType++;
1092 } else {
1093 // Test premul.
1094 alphaType = kPremul_SkAlphaType;
1095 fCurrentAlphaType = 0;
1096 fCurrentColorType++;
1097 }
1098 break;
1099 default:
1100 SkASSERT(false);
1101 fCurrentColorType++;
1102 break;
1103 }
1104 }
1105
1106 // Make sure we can decode to this color type and alpha type.
1107 SkImageInfo info =
1108 codec->getInfo().makeColorType(colorType).makeAlphaType(alphaType);
1109 const size_t rowBytes = info.minRowBytes();
1110 SkAutoMalloc storage(info.computeByteSize(rowBytes));
1111
1112 const SkCodec::Result result = codec->getPixels(
1113 info, storage.get(), rowBytes);
1114 switch (result) {
1115 case SkCodec::kSuccess:
1116 case SkCodec::kIncompleteInput:
1117 return new CodecBench(SkOSPath::Basename(path.c_str()),
1118 encoded.get(), colorType, alphaType);
1119 case SkCodec::kInvalidConversion:
1120 // This is okay. Not all conversions are valid.
1121 break;
1122 default:
1123 // This represents some sort of failure.
1124 SkASSERT(false);
1125 break;
1126 }
1127 }
1128 fCurrentColorType = 0;
1129 }
1130
1131 // Run AndroidCodecBenches
1132 const int sampleSizes[] = { 2, 4, 8 };
1133 for (; fCurrentAndroidCodec < fImages.size(); fCurrentAndroidCodec++) {
1134 fSourceType = "image";
1135 fBenchType = "skandroidcodec";
1136
1137 const SkString& path = fImages[fCurrentAndroidCodec];
1138 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1139 continue;
1140 }
1141 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1142 std::unique_ptr<SkAndroidCodec> codec(SkAndroidCodec::MakeFromData(encoded));
1143 if (!codec) {
1144 // Nothing to time.
1145 SkDebugf("Cannot find codec for %s\n", path.c_str());
1146 continue;
1147 }
1148
1149 while (fCurrentSampleSize < (int) std::size(sampleSizes)) {
1150 int sampleSize = sampleSizes[fCurrentSampleSize];
1151 fCurrentSampleSize++;
1152 if (10 * sampleSize > std::min(codec->getInfo().width(), codec->getInfo().height())) {
1153 // Avoid benchmarking scaled decodes of already small images.
1154 break;
1155 }
1156
1157 return new AndroidCodecBench(SkOSPath::Basename(path.c_str()),
1158 encoded.get(), sampleSize);
1159 }
1160 fCurrentSampleSize = 0;
1161 }
1162
1163 #ifdef SK_ENABLE_ANDROID_UTILS
1164 // Run the BRDBenches
1165 // We intend to create benchmarks that model the use cases in
1166 // android/libraries/social/tiledimage. In this library, an image is decoded in 512x512
1167 // tiles. The image can be translated freely, so the location of a tile may be anywhere in
1168 // the image. For that reason, we will benchmark decodes in five representative locations
1169 // in the image. Additionally, this use case utilizes power of two scaling, so we will
1170 // test on power of two sample sizes. The output tile is always 512x512, so, when a
1171 // sampleSize is used, the size of the subset that is decoded is always
1172 // (sampleSize*512)x(sampleSize*512).
1173 // There are a few good reasons to only test on power of two sample sizes at this time:
1174 // All use cases we are aware of only scale by powers of two.
1175 // PNG decodes use the indicated sampling strategy regardless of the sample size, so
1176 // these tests are sufficient to provide good coverage of our scaling options.
1177 const uint32_t brdSampleSizes[] = { 1, 2, 4, 8, 16 };
1178 const uint32_t minOutputSize = 512;
1179 for (; fCurrentBRDImage < fImages.size(); fCurrentBRDImage++) {
1180 fSourceType = "image";
1181 fBenchType = "BRD";
1182
1183 const SkString& path = fImages[fCurrentBRDImage];
1184 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1185 continue;
1186 }
1187
1188 while (fCurrentColorType < fColorTypes.size()) {
1189 while (fCurrentSampleSize < (int) std::size(brdSampleSizes)) {
1190 while (fCurrentSubsetType <= kLastSingle_SubsetType) {
1191
1192 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1193 const SkColorType colorType = fColorTypes[fCurrentColorType];
1194 uint32_t sampleSize = brdSampleSizes[fCurrentSampleSize];
1195 int currentSubsetType = fCurrentSubsetType++;
1196
1197 int width = 0;
1198 int height = 0;
1199 if (!valid_brd_bench(encoded, colorType, sampleSize, minOutputSize,
1200 &width, &height)) {
1201 break;
1202 }
1203
1204 SkString basename = SkOSPath::Basename(path.c_str());
1205 SkIRect subset;
1206 const uint32_t subsetSize = sampleSize * minOutputSize;
1207 switch (currentSubsetType) {
1208 case kTopLeft_SubsetType:
1209 basename.append("_TopLeft");
1210 subset = SkIRect::MakeXYWH(0, 0, subsetSize, subsetSize);
1211 break;
1212 case kTopRight_SubsetType:
1213 basename.append("_TopRight");
1214 subset = SkIRect::MakeXYWH(width - subsetSize, 0, subsetSize,
1215 subsetSize);
1216 break;
1217 case kMiddle_SubsetType:
1218 basename.append("_Middle");
1219 subset = SkIRect::MakeXYWH((width - subsetSize) / 2,
1220 (height - subsetSize) / 2, subsetSize, subsetSize);
1221 break;
1222 case kBottomLeft_SubsetType:
1223 basename.append("_BottomLeft");
1224 subset = SkIRect::MakeXYWH(0, height - subsetSize, subsetSize,
1225 subsetSize);
1226 break;
1227 case kBottomRight_SubsetType:
1228 basename.append("_BottomRight");
1229 subset = SkIRect::MakeXYWH(width - subsetSize,
1230 height - subsetSize, subsetSize, subsetSize);
1231 break;
1232 default:
1233 SkASSERT(false);
1234 }
1235
1236 return new BitmapRegionDecoderBench(basename.c_str(), encoded.get(),
1237 colorType, sampleSize, subset);
1238 }
1239 fCurrentSubsetType = 0;
1240 fCurrentSampleSize++;
1241 }
1242 fCurrentSampleSize = 0;
1243 fCurrentColorType++;
1244 }
1245 fCurrentColorType = 0;
1246 }
1247 #endif // SK_ENABLE_ANDROID_UTILS
1248
1249 return nullptr;
1250 }
1251
fillCurrentOptions(NanoJSONResultsWriter & log) const1252 void fillCurrentOptions(NanoJSONResultsWriter& log) const {
1253 log.appendCString("source_type", fSourceType);
1254 log.appendCString("bench_type", fBenchType);
1255 if (0 == strcmp(fSourceType, "skp")) {
1256 log.appendString("clip",
1257 SkStringPrintf("%d %d %d %d", fClip.fLeft, fClip.fTop,
1258 fClip.fRight, fClip.fBottom));
1259 SkASSERT_RELEASE(fCurrentScale < fScales.size()); // debugging paranoia
1260 log.appendString("scale", SkStringPrintf("%.2g", fScales[fCurrentScale]));
1261 }
1262 }
1263
fillCurrentMetrics(NanoJSONResultsWriter & log) const1264 void fillCurrentMetrics(NanoJSONResultsWriter& log) const {
1265 if (0 == strcmp(fBenchType, "recording")) {
1266 log.appendMetric("bytes", fSKPBytes);
1267 log.appendMetric("ops", fSKPOps);
1268 }
1269 }
1270
1271 private:
1272 #ifdef SK_ENABLE_ANDROID_UTILS
1273 enum SubsetType {
1274 kTopLeft_SubsetType = 0,
1275 kTopRight_SubsetType = 1,
1276 kMiddle_SubsetType = 2,
1277 kBottomLeft_SubsetType = 3,
1278 kBottomRight_SubsetType = 4,
1279 kTranslate_SubsetType = 5,
1280 kZoom_SubsetType = 6,
1281 kLast_SubsetType = kZoom_SubsetType,
1282 kLastSingle_SubsetType = kBottomRight_SubsetType,
1283 };
1284 #endif
1285
1286 const BenchRegistry* fBenches;
1287 const skiagm::GMRegistry* fGMs;
1288 SkIRect fClip;
1289 TArray<SkScalar> fScales;
1290 TArray<SkString> fSKPs;
1291 TArray<SkString> fMSKPs;
1292 TArray<SkString> fSVGs;
1293 TArray<SkString> fTextBlobTraces;
1294 TArray<SkString> fImages;
1295 TArray<SkColorType, true> fColorTypes;
1296 SkScalar fZoomMax;
1297 double fZoomPeriodMs;
1298
1299 double fSKPBytes, fSKPOps;
1300
1301 const char* fSourceType; // What we're benching: bench, GM, SKP, ...
1302 const char* fBenchType; // How we bench it: micro, recording, playback, ...
1303 int fCurrentRecording = 0;
1304 int fCurrentDeserialPicture = 0;
1305 int fCurrentMSKP = 0;
1306 int fCurrentScale = 0;
1307 int fCurrentSKP = 0;
1308 int fCurrentSVG = 0;
1309 int fCurrentTextBlobTrace = 0;
1310 int fCurrentCodec = 0;
1311 int fCurrentAndroidCodec = 0;
1312 #ifdef SK_ENABLE_ANDROID_UTILS
1313 int fCurrentBRDImage = 0;
1314 int fCurrentSubsetType = 0;
1315 #endif
1316 int fCurrentColorType = 0;
1317 int fCurrentAlphaType = 0;
1318 int fCurrentSampleSize = 0;
1319 int fCurrentAnimSKP = 0;
1320 };
1321
1322 // Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
1323 // This prints something every once in a while so that it knows we're still working.
start_keepalive()1324 static void start_keepalive() {
1325 static std::thread* intentionallyLeaked = new std::thread([]{
1326 for (;;) {
1327 static const int kSec = 1200;
1328 #if defined(SK_BUILD_FOR_WIN)
1329 Sleep(kSec * 1000);
1330 #else
1331 sleep(kSec);
1332 #endif
1333 SkDebugf("\nBenchmarks still running...\n");
1334 }
1335 });
1336 (void)intentionallyLeaked;
1337 SK_INTENTIONALLY_LEAKED(intentionallyLeaked);
1338 }
1339
1340 class NanobenchShaderErrorHandler : public GrContextOptions::ShaderErrorHandler {
compileError(const char * shader,const char * errors)1341 void compileError(const char* shader, const char* errors) override {
1342 // Nanobench should abort if any shader can't compile. Failure is much better than
1343 // reporting meaningless performance metrics.
1344 std::string message = SkShaderUtils::BuildShaderErrorMessage(shader, errors);
1345 SK_ABORT("\n%s", message.c_str());
1346 }
1347 };
1348
main(int argc,char ** argv)1349 int main(int argc, char** argv) {
1350 CommandLineFlags::Parse(argc, argv);
1351
1352 initializeEventTracingForTools();
1353
1354 #if defined(SK_BUILD_FOR_IOS)
1355 cd_Documents();
1356 #endif
1357 SetupCrashHandler();
1358 if (FLAGS_runtimeCPUDetection) {
1359 SkGraphics::Init();
1360 }
1361
1362 // Our benchmarks only currently decode .png or .jpg files
1363 SkCodecs::Register(SkPngDecoder::Decoder());
1364 SkCodecs::Register(SkJpegDecoder::Decoder());
1365
1366 SkTaskGroup::Enabler enabled(FLAGS_threads);
1367
1368 CommonFlags::SetCtxOptions(&grContextOpts);
1369
1370 NanobenchShaderErrorHandler errorHandler;
1371 grContextOpts.fShaderErrorHandler = &errorHandler;
1372
1373 if (kAutoTuneLoops != FLAGS_loops) {
1374 FLAGS_samples = 1;
1375 FLAGS_gpuFrameLag = 0;
1376 }
1377
1378 if (!FLAGS_writePath.isEmpty()) {
1379 SkDebugf("Writing files to %s.\n", FLAGS_writePath[0]);
1380 if (!sk_mkdir(FLAGS_writePath[0])) {
1381 SkDebugf("Could not create %s. Files won't be written.\n", FLAGS_writePath[0]);
1382 FLAGS_writePath.set(0, nullptr);
1383 }
1384 }
1385
1386 std::unique_ptr<SkWStream> logStream(new SkNullWStream);
1387 if (!FLAGS_outResultsFile.isEmpty()) {
1388 #if defined(SK_RELEASE)
1389 logStream.reset(new SkFILEWStream(FLAGS_outResultsFile[0]));
1390 #else
1391 SkDebugf("I'm ignoring --outResultsFile because this is a Debug build.");
1392 return 1;
1393 #endif
1394 }
1395 NanoJSONResultsWriter log(logStream.get(), SkJSONWriter::Mode::kPretty);
1396 log.beginObject(); // root
1397
1398 if (1 == FLAGS_properties.size() % 2) {
1399 SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
1400 return 1;
1401 }
1402 for (int i = 1; i < FLAGS_properties.size(); i += 2) {
1403 log.appendCString(FLAGS_properties[i-1], FLAGS_properties[i]);
1404 }
1405
1406 if (1 == FLAGS_key.size() % 2) {
1407 SkDebugf("ERROR: --key must be passed with an even number of arguments.\n");
1408 return 1;
1409 }
1410 if (FLAGS_key.size()) {
1411 log.beginObject("key");
1412 for (int i = 1; i < FLAGS_key.size(); i += 2) {
1413 log.appendCString(FLAGS_key[i - 1], FLAGS_key[i]);
1414 }
1415 log.endObject(); // key
1416 }
1417
1418 const double overhead = estimate_timer_overhead();
1419 if (!FLAGS_quiet && !FLAGS_csv) {
1420 SkDebugf("Timer overhead: %s\n", HUMANIZE(overhead));
1421 }
1422
1423 TArray<double> samples;
1424
1425 if (kAutoTuneLoops != FLAGS_loops) {
1426 SkDebugf("Fixed number of loops; times would only be misleading so we won't print them.\n");
1427 } else if (FLAGS_quiet) {
1428 SkDebugf("! -> high variance, ? -> moderate variance\n");
1429 SkDebugf(" micros \tbench\n");
1430 } else if (FLAGS_csv) {
1431 SkDebugf("min,median,mean,max,stddev,config,bench\n");
1432 } else if (FLAGS_ms) {
1433 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\tsamples\tconfig\tbench\n");
1434 } else {
1435 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\t%-*s\tconfig\tbench\n",
1436 FLAGS_samples, "samples");
1437 }
1438
1439 GrRecordingContextPriv::DMSAAStats combinedDMSAAStats;
1440
1441 TArray<Config> configs;
1442 create_configs(&configs);
1443
1444 if (FLAGS_keepAlive) {
1445 start_keepalive();
1446 }
1447
1448 gSkForceRasterPipelineBlitter = FLAGS_forceRasterPipelineHP || FLAGS_forceRasterPipeline;
1449 gForceHighPrecisionRasterPipeline = FLAGS_forceRasterPipelineHP;
1450
1451 // The SkSL memory benchmark must run before any GPU painting occurs. SkSL allocates memory for
1452 // its modules the first time they are accessed, and this test is trying to measure the size of
1453 // those allocations. If a paint has already occurred, some modules will have already been
1454 // loaded, so we won't be able to capture a delta for them.
1455 log.beginObject("results");
1456 RunSkSLModuleBenchmarks(&log);
1457
1458 int runs = 0;
1459 BenchmarkStream benchStream;
1460 AutoreleasePool pool;
1461 while (Benchmark* b = benchStream.next()) {
1462 std::unique_ptr<Benchmark> bench(b);
1463 if (CommandLineFlags::ShouldSkip(FLAGS_match, bench->getUniqueName())) {
1464 continue;
1465 }
1466
1467 if (!configs.empty()) {
1468 log.beginBench(
1469 bench->getUniqueName(), bench->getSize().width(), bench->getSize().height());
1470 bench->delayedSetup();
1471 }
1472 for (int i = 0; i < configs.size(); ++i) {
1473 Target* target = is_enabled(b, configs[i]);
1474 if (!target) {
1475 continue;
1476 }
1477
1478 // During HWUI output this canvas may be nullptr.
1479 SkCanvas* canvas = target->getCanvas();
1480 const char* config = target->config.name.c_str();
1481
1482 if (FLAGS_pre_log || FLAGS_dryRun) {
1483 SkDebugf("Running %s\t%s\n"
1484 , bench->getUniqueName()
1485 , config);
1486 if (FLAGS_dryRun) {
1487 continue;
1488 }
1489 }
1490
1491 if (FLAGS_purgeBetweenBenches) {
1492 SkGraphics::PurgeAllCaches();
1493 }
1494
1495 if (FLAGS_splitPerfettoTracesByBenchmark) {
1496 TRACE_EVENT_API_NEW_TRACE_SECTION(TRACE_STR_COPY(bench->getUniqueName()));
1497 }
1498 TRACE_EVENT2("skia", "Benchmark", "name", TRACE_STR_COPY(bench->getUniqueName()),
1499 "config", TRACE_STR_COPY(config));
1500
1501 target->setup();
1502 bench->perCanvasPreDraw(canvas);
1503
1504 int maxFrameLag;
1505 int loops = target->needsFrameTiming(&maxFrameLag)
1506 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
1507 : setup_cpu_bench(overhead, target, bench.get());
1508
1509 if (kFailedLoops == loops) {
1510 // Can't be timed. A warning note has already been printed.
1511 cleanup_run(target);
1512 continue;
1513 }
1514
1515 if (runs == 0 && FLAGS_ms < 1000) {
1516 // Run the first bench for 1000ms to warm up the nanobench if FLAGS_ms < 1000.
1517 // Otherwise, the first few benches' measurements will be inaccurate.
1518 auto stop = now_ms() + 1000;
1519 do {
1520 time(loops, bench.get(), target);
1521 pool.drain();
1522 } while (now_ms() < stop);
1523 }
1524
1525 if (FLAGS_ms) {
1526 samples.clear();
1527 auto stop = now_ms() + FLAGS_ms;
1528 do {
1529 samples.push_back(time(loops, bench.get(), target) / loops);
1530 pool.drain();
1531 } while (now_ms() < stop);
1532 } else {
1533 samples.reset(FLAGS_samples);
1534 for (int s = 0; s < FLAGS_samples; s++) {
1535 samples[s] = time(loops, bench.get(), target) / loops;
1536 pool.drain();
1537 }
1538 }
1539
1540 // Scale each result to the benchmark's own units, time/unit.
1541 for (double& sample : samples) {
1542 sample *= (1.0 / bench->getUnits());
1543 }
1544
1545 TArray<SkString> keys;
1546 TArray<double> values;
1547 if (configs[i].backend == Benchmark::Backend::kGanesh) {
1548 if (FLAGS_gpuStatsDump) {
1549 // TODO cache stats
1550 bench->getGpuStats(canvas, &keys, &values);
1551 }
1552 if (FLAGS_dmsaaStatsDump && bench->getDMSAAStats(canvas->recordingContext())) {
1553 const auto& dmsaaStats = canvas->recordingContext()->priv().dmsaaStats();
1554 dmsaaStats.dumpKeyValuePairs(&keys, &values);
1555 dmsaaStats.dump();
1556 combinedDMSAAStats.merge(dmsaaStats);
1557 }
1558 }
1559
1560 bench->perCanvasPostDraw(canvas);
1561
1562 if (Benchmark::Backend::kNonRendering != target->config.backend &&
1563 !FLAGS_writePath.isEmpty() && FLAGS_writePath[0]) {
1564 SkString pngFilename = SkOSPath::Join(FLAGS_writePath[0], config);
1565 pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getUniqueName());
1566 pngFilename.append(".png");
1567 write_canvas_png(target, pngFilename);
1568 }
1569
1570 // Building stats.plot often shows up in profiles,
1571 // so skip building it when we're not going to print it anyway.
1572 const bool want_plot = !FLAGS_quiet && !FLAGS_ms;
1573
1574 Stats stats(samples, want_plot);
1575 log.beginObject(config);
1576
1577 log.beginObject("options");
1578 log.appendCString("name", bench->getName());
1579 benchStream.fillCurrentOptions(log);
1580 log.endObject(); // options
1581
1582 // Metrics
1583 log.appendMetric("min_ms", stats.min);
1584 log.appendMetric("min_ratio", sk_ieee_double_divide(stats.median, stats.min));
1585 log.beginArray("samples");
1586 for (double sample : samples) {
1587 log.appendDoubleDigits(sample, 16);
1588 }
1589 log.endArray(); // samples
1590 benchStream.fillCurrentMetrics(log);
1591 if (!keys.empty()) {
1592 // dump to json, only SKPBench currently returns valid keys / values
1593 SkASSERT(keys.size() == values.size());
1594 for (int j = 0; j < keys.size(); j++) {
1595 log.appendMetric(keys[j].c_str(), values[j]);
1596 }
1597 }
1598
1599 log.endObject(); // config
1600
1601 if (runs++ % FLAGS_flushEvery == 0) {
1602 log.flush();
1603 }
1604
1605 if (kAutoTuneLoops != FLAGS_loops) {
1606 if (configs.size() == 1) {
1607 config = ""; // Only print the config if we run the same bench on more than one.
1608 }
1609 SkDebugf("%4d/%-4dMB\t%s\t%s "
1610 , sk_tools::getCurrResidentSetSizeMB()
1611 , sk_tools::getMaxResidentSetSizeMB()
1612 , bench->getUniqueName()
1613 , config);
1614 SkDebugf("\n");
1615 } else if (FLAGS_quiet) {
1616 const char* mark = " ";
1617 const double stddev_percent =
1618 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1619 if (stddev_percent > 5) mark = "?";
1620 if (stddev_percent > 10) mark = "!";
1621
1622 SkDebugf("%10.2f %s\t%s\t%s\n",
1623 stats.median*1e3, mark, bench->getUniqueName(), config);
1624 } else if (FLAGS_csv) {
1625 const double stddev_percent =
1626 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1627 SkDebugf("%g,%g,%g,%g,%g,%s,%s\n"
1628 , stats.min
1629 , stats.median
1630 , stats.mean
1631 , stats.max
1632 , stddev_percent
1633 , config
1634 , bench->getUniqueName()
1635 );
1636 } else {
1637 const double stddev_percent =
1638 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1639 SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n"
1640 , sk_tools::getCurrResidentSetSizeMB()
1641 , sk_tools::getMaxResidentSetSizeMB()
1642 , loops
1643 , HUMANIZE(stats.min)
1644 , HUMANIZE(stats.median)
1645 , HUMANIZE(stats.mean)
1646 , HUMANIZE(stats.max)
1647 , stddev_percent
1648 , FLAGS_ms ? to_string(samples.size()).c_str() : stats.plot.c_str()
1649 , config
1650 , bench->getUniqueName()
1651 );
1652 }
1653
1654 if (FLAGS_gpuStats && Benchmark::Backend::kGanesh == configs[i].backend) {
1655 target->dumpStats();
1656 }
1657
1658 if (FLAGS_verbose) {
1659 SkDebugf("Samples: ");
1660 for (int j = 0; j < samples.size(); j++) {
1661 SkDebugf("%s ", HUMANIZE(samples[j]));
1662 }
1663 SkDebugf("%s\n", bench->getUniqueName());
1664 }
1665 cleanup_run(target);
1666 pool.drain();
1667 }
1668 if (!configs.empty()) {
1669 log.endBench();
1670 }
1671 }
1672
1673 if (FLAGS_dmsaaStatsDump) {
1674 SkDebugf("<<Total Combined DMSAA Stats>>\n");
1675 combinedDMSAAStats.dump();
1676 }
1677
1678 SkGraphics::PurgeAllCaches();
1679
1680 log.beginBench("memory_usage", 0, 0);
1681 log.beginObject("meta"); // config
1682 log.appendS32("max_rss_mb", sk_tools::getMaxResidentSetSizeMB());
1683 log.endObject(); // config
1684 log.endBench();
1685
1686 log.endObject(); // results
1687 log.endObject(); // root
1688 log.flush();
1689
1690 return 0;
1691 }
1692