1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <ctype.h>
9
10 #include "bench/nanobench.h"
11
12 #include "bench/AndroidCodecBench.h"
13 #include "bench/Benchmark.h"
14 #include "bench/CodecBench.h"
15 #include "bench/CodecBenchPriv.h"
16 #include "bench/GMBench.h"
17 #include "bench/MSKPBench.h"
18 #include "bench/RecordingBench.h"
19 #include "bench/ResultsWriter.h"
20 #include "bench/SKPAnimationBench.h"
21 #include "bench/SKPBench.h"
22 #include "bench/SkGlyphCacheBench.h"
23 #include "bench/SkSLBench.h"
24 #include "include/codec/SkAndroidCodec.h"
25 #include "include/codec/SkCodec.h"
26 #include "include/codec/SkJpegDecoder.h"
27 #include "include/codec/SkPngDecoder.h"
28 #include "include/core/SkBBHFactory.h"
29 #include "include/core/SkCanvas.h"
30 #include "include/core/SkData.h"
31 #include "include/core/SkGraphics.h"
32 #include "include/core/SkPictureRecorder.h"
33 #include "include/core/SkString.h"
34 #include "include/core/SkSurface.h"
35 #include "include/encode/SkPngEncoder.h"
36 #include "include/private/base/SkMacros.h"
37 #include "src/base/SkAutoMalloc.h"
38 #include "src/base/SkLeanWindows.h"
39 #include "src/base/SkTime.h"
40 #include "src/core/SkColorSpacePriv.h"
41 #include "src/core/SkOSFile.h"
42 #include "src/core/SkTaskGroup.h"
43 #include "src/core/SkTraceEvent.h"
44 #include "src/utils/SkJSONWriter.h"
45 #include "src/utils/SkOSPath.h"
46 #include "src/utils/SkShaderUtils.h"
47 #include "tools/AutoreleasePool.h"
48 #include "tools/CrashHandler.h"
49 #include "tools/MSKPPlayer.h"
50 #include "tools/ProcStats.h"
51 #include "tools/Stats.h"
52 #include "tools/ToolUtils.h"
53 #include "tools/flags/CommonFlags.h"
54 #include "tools/flags/CommonFlagsConfig.h"
55 #include "tools/flags/CommonFlagsGanesh.h"
56 #include "tools/fonts/FontToolUtils.h"
57 #include "tools/ios_utils.h"
58 #include "tools/trace/EventTracingPriv.h"
59 #include "tools/trace/SkDebugfTracer.h"
60
61 #if defined(SK_ENABLE_SVG)
62 #include "modules/skshaper/utils/FactoryHelpers.h"
63 #include "modules/svg/include/SkSVGDOM.h"
64 #include "modules/svg/include/SkSVGNode.h"
65 #endif
66
67 #ifdef SK_ENABLE_ANDROID_UTILS
68 #include "bench/BitmapRegionDecoderBench.h"
69 #include "client_utils/android/BitmapRegionDecoder.h"
70 #endif
71
72 #if defined(SK_GRAPHITE)
73 #include "include/gpu/graphite/Context.h"
74 #include "include/gpu/graphite/Recorder.h"
75 #include "include/gpu/graphite/Recording.h"
76 #include "include/gpu/graphite/Surface.h"
77 #include "tools/flags/CommonFlagsGraphite.h"
78 #include "tools/graphite/ContextFactory.h"
79 #include "tools/graphite/GraphiteTestContext.h"
80 #include "tools/graphite/GraphiteToolUtils.h"
81 #endif
82
83 #include <cinttypes>
84 #include <memory>
85 #include <optional>
86 #include <stdlib.h>
87 #include <thread>
88
89 extern bool gSkForceRasterPipelineBlitter;
90 extern bool gForceHighPrecisionRasterPipeline;
91
92 #ifndef SK_BUILD_FOR_WIN
93 #include <unistd.h>
94 #endif
95
96 #include "include/gpu/ganesh/GrDirectContext.h"
97 #include "include/gpu/ganesh/SkSurfaceGanesh.h"
98 #include "src/gpu/ganesh/GrCaps.h"
99 #include "src/gpu/ganesh/GrDirectContextPriv.h"
100 #include "src/gpu/ganesh/SkGr.h"
101 #include "tools/gpu/GrContextFactory.h"
102
103 using namespace skia_private;
104
105 using sk_gpu_test::ContextInfo;
106 using sk_gpu_test::GrContextFactory;
107 using sk_gpu_test::TestContext;
108
109 GrContextOptions grContextOpts;
110
111 #if defined(SK_GRAPHITE)
112 skiatest::graphite::TestOptions gTestOptions;
113 #endif
114
115 static const int kAutoTuneLoops = 0;
116
loops_help_txt()117 static SkString loops_help_txt() {
118 SkString help;
119 help.printf("Number of times to run each bench. Set this to %d to auto-"
120 "tune for each bench. Timings are only reported when auto-tuning.",
121 kAutoTuneLoops);
122 return help;
123 }
124
to_string(int n)125 static SkString to_string(int n) {
126 SkString str;
127 str.appendS32(n);
128 return str;
129 }
130
131 static DEFINE_int(loops, kAutoTuneLoops, loops_help_txt().c_str());
132
133 static DEFINE_int(samples, 10, "Number of samples to measure for each bench.");
134 static DEFINE_int(ms, 0, "If >0, run each bench for this many ms instead of obeying --samples.");
135 static DEFINE_int(overheadLoops, 100000, "Loops to estimate timer overhead.");
136 static DEFINE_double(overheadGoal, 0.0001,
137 "Loop until timer overhead is at most this fraction of our measurments.");
138 static DEFINE_double(gpuMs, 5, "Target bench time in millseconds for GPU.");
139 static DEFINE_int(gpuFrameLag, 5,
140 "If unknown, estimated maximum number of frames GPU allows to lag.");
141
142 static DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
143 static DEFINE_int(maxCalibrationAttempts, 3,
144 "Try up to this many times to guess loops for a bench, or skip the bench.");
145 static DEFINE_int(maxLoops, 1000000, "Never run a bench more times than this.");
146 static DEFINE_string(clip, "0,0,1000,1000", "Clip for SKPs.");
147 static DEFINE_string(scales, "1.0", "Space-separated scales for SKPs.");
148 static DEFINE_string(zoom, "1.0,0",
149 "Comma-separated zoomMax,zoomPeriodMs factors for a periodic SKP zoom "
150 "function that ping-pongs between 1.0 and zoomMax.");
151 static DEFINE_bool(bbh, true, "Build a BBH for SKPs?");
152 static DEFINE_bool(loopSKP, true, "Loop SKPs like we do for micro benches?");
153 static DEFINE_int(flushEvery, 10, "Flush --outResultsFile every Nth run.");
154 static DEFINE_bool(gpuStats, false, "Print GPU stats after each gpu benchmark?");
155 static DEFINE_bool(gpuStatsDump, false, "Dump GPU stats after each benchmark to json");
156 static DEFINE_bool(dmsaaStatsDump, false, "Dump DMSAA stats after each benchmark to json");
157 static DEFINE_bool(keepAlive, false, "Print a message every so often so that we don't time out");
158 static DEFINE_bool(csv, false, "Print status in CSV format");
159 static DEFINE_string(sourceType, "",
160 "Apply usual --match rules to source type: bench, gm, skp, image, etc.");
161 static DEFINE_string(benchType, "",
162 "Apply usual --match rules to bench type: micro, recording, "
163 "piping, playback, skcodec, etc.");
164
165 static DEFINE_bool(forceRasterPipeline, false, "sets gSkForceRasterPipelineBlitter");
166 static DEFINE_bool(forceRasterPipelineHP, false, "sets gSkForceRasterPipelineBlitter and gForceHighPrecisionRasterPipeline");
167
168 static DEFINE_bool2(pre_log, p, false,
169 "Log before running each test. May be incomprehensible when threading");
170
171 static DEFINE_bool(cpu, true, "Run CPU-bound work?");
172 static DEFINE_bool(gpu, true, "Run GPU-bound work?");
173 static DEFINE_bool(dryRun, false,
174 "just print the tests that would be run, without actually running them.");
175 static DEFINE_string(images, "",
176 "List of images and/or directories to decode. A directory with no images"
177 " is treated as a fatal error.");
178 static DEFINE_bool(simpleCodec, false,
179 "Runs of a subset of the codec tests, always N32, Premul or Opaque");
180
181 static DEFINE_string2(match, m, nullptr,
182 "[~][^]substring[$] [...] of name to run.\n"
183 "Multiple matches may be separated by spaces.\n"
184 "~ causes a matching name to always be skipped\n"
185 "^ requires the start of the name to match\n"
186 "$ requires the end of the name to match\n"
187 "^ and $ requires an exact match\n"
188 "If a name does not match any list entry,\n"
189 "it is skipped unless some list entry starts with ~");
190
191 static DEFINE_bool2(quiet, q, false, "if true, don't print status updates.");
192 static DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver.");
193
194
195 static DEFINE_string(skps, "skps", "Directory to read skps from.");
196 static DEFINE_string(mskps, "mskps", "Directory to read mskps from.");
197 static DEFINE_string(svgs, "", "Directory to read SVGs from, or a single SVG file.");
198 static DEFINE_string(texttraces, "", "Directory to read TextBlobTrace files from.");
199
200 static DEFINE_int_2(threads, j, -1,
201 "Run threadsafe tests on a threadpool with this many extra threads, "
202 "defaulting to one extra thread per core.");
203
204 static DEFINE_string2(writePath, w, "", "If set, write bitmaps here as .pngs.");
205
206 static DEFINE_string(key, "",
207 "Space-separated key/value pairs to add to JSON identifying this builder.");
208 static DEFINE_string(properties, "",
209 "Space-separated key/value pairs to add to JSON identifying this run.");
210
211 static DEFINE_bool(purgeBetweenBenches, false,
212 "Call SkGraphics::PurgeAllCaches() between each benchmark?");
213
214 static DEFINE_bool(splitPerfettoTracesByBenchmark, true,
215 "Create separate perfetto trace files for each benchmark?\n"
216 "Will only take effect if perfetto tracing is enabled. See --trace.");
217
218 static DEFINE_bool(runtimeCPUDetection, true, "Skip runtime CPU detection and optimization");
219
now_ms()220 static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
221
humanize(double ms)222 static SkString humanize(double ms) {
223 if (FLAGS_verbose) return SkStringPrintf("%" PRIu64, (uint64_t)(ms*1e6));
224 return HumanizeMs(ms);
225 }
226 #define HUMANIZE(ms) humanize(ms).c_str()
227
init(SkImageInfo info,Benchmark * bench)228 bool Target::init(SkImageInfo info, Benchmark* bench) {
229 if (Benchmark::Backend::kRaster == config.backend) {
230 this->surface = SkSurfaces::Raster(info);
231 if (!this->surface) {
232 return false;
233 }
234 }
235 return true;
236 }
capturePixels(SkBitmap * bmp)237 bool Target::capturePixels(SkBitmap* bmp) {
238 SkCanvas* canvas = this->getCanvas();
239 if (!canvas) {
240 return false;
241 }
242 bmp->allocPixels(canvas->imageInfo());
243 if (!canvas->readPixels(*bmp, 0, 0)) {
244 SkDebugf("Can't read canvas pixels.\n");
245 return false;
246 }
247 return true;
248 }
249
250 struct GPUTarget : public Target {
GPUTargetGPUTarget251 explicit GPUTarget(const Config& c) : Target(c) {}
252 ContextInfo contextInfo;
253 std::unique_ptr<GrContextFactory> factory;
254
~GPUTargetGPUTarget255 ~GPUTarget() override {
256 // For Vulkan we need to release all our refs to the GrContext before destroy the vulkan
257 // context which happens at the end of this destructor. Thus we need to release the surface
258 // here which holds a ref to the GrContext.
259 surface.reset();
260 }
261
onSetupGPUTarget262 void onSetup() override {
263 this->contextInfo.testContext()->makeCurrent();
264 }
submitFrameGPUTarget265 void submitFrame() override {
266 if (this->contextInfo.testContext()) {
267 this->contextInfo.testContext()->flushAndWaitOnSync(contextInfo.directContext());
268 }
269 }
submitWorkAndSyncCPUGPUTarget270 void submitWorkAndSyncCPU() override {
271 if (this->contextInfo.testContext()) {
272 this->contextInfo.testContext()->flushAndSyncCpu(contextInfo.directContext());
273 }
274 }
275
needsFrameTimingGPUTarget276 bool needsFrameTiming(int* maxFrameLag) const override {
277 if (!this->contextInfo.testContext()->getMaxGpuFrameLag(maxFrameLag)) {
278 // Frame lag is unknown.
279 *maxFrameLag = FLAGS_gpuFrameLag;
280 }
281 return true;
282 }
initGPUTarget283 bool init(SkImageInfo info, Benchmark* bench) override {
284 GrContextOptions options = grContextOpts;
285 bench->modifyGrContextOptions(&options);
286 this->factory = std::make_unique<GrContextFactory>(options);
287 SkSurfaceProps props(this->config.surfaceFlags, kRGB_H_SkPixelGeometry);
288 this->surface = SkSurfaces::RenderTarget(
289 this->factory->get(this->config.ctxType, this->config.ctxOverrides),
290 skgpu::Budgeted::kNo,
291 info,
292 this->config.samples,
293 &props);
294 this->contextInfo =
295 this->factory->getContextInfo(this->config.ctxType, this->config.ctxOverrides);
296 if (!this->surface) {
297 return false;
298 }
299 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
300 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
301 "Timings might not be accurate.\n", this->config.name.c_str());
302 }
303 return true;
304 }
305
dumpStatsGPUTarget306 void dumpStats() override {
307 auto context = this->contextInfo.directContext();
308
309 context->priv().printCacheStats();
310 context->priv().printGpuStats();
311 context->priv().printContextStats();
312 }
313 };
314
315 #if defined(SK_GRAPHITE)
316 struct GraphiteTarget : public Target {
GraphiteTargetGraphiteTarget317 explicit GraphiteTarget(const Config& c) : Target(c) {}
318 using TestContext = skiatest::graphite::GraphiteTestContext;
319 using ContextFactory = skiatest::graphite::ContextFactory;
320
321 std::unique_ptr<ContextFactory> factory;
322
323 TestContext* testContext;
324 skgpu::graphite::Context* context;
325 std::unique_ptr<skgpu::graphite::Recorder> recorder;
326
~GraphiteTargetGraphiteTarget327 ~GraphiteTarget() override {
328 // For Vulkan we need to release all our refs before we destroy the vulkan context which
329 // happens at the end of this destructor. Thus we need to release the surface here which
330 // holds a ref to the Graphite device
331 surface.reset();
332 }
submitFrameGraphiteTarget333 void submitFrame() override {
334 if (context && recorder) {
335 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
336 if (recording) {
337 this->testContext->submitRecordingAndWaitOnSync(this->context, recording.get());
338 }
339 }
340 }
submitWorkAndSyncCPUGraphiteTarget341 void submitWorkAndSyncCPU() override {
342 if (context && recorder) {
343 // TODO: have a way to sync work with out submitting a Recording which is currently
344 // required. Probably need to get to the point where the backend command buffers are
345 // stored on the Context and not Recordings before this is feasible.
346 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
347 if (recording) {
348 skgpu::graphite::InsertRecordingInfo info;
349 info.fRecording = recording.get();
350 this->context->insertRecording(info);
351 }
352 this->context->submit(skgpu::graphite::SyncToCpu::kYes);
353 }
354 }
355
needsFrameTimingGraphiteTarget356 bool needsFrameTiming(int* maxFrameLag) const override {
357 SkAssertResult(this->testContext->getMaxGpuFrameLag(maxFrameLag));
358 return true;
359 }
initGraphiteTarget360 bool init(SkImageInfo info, Benchmark* bench) override {
361 skiatest::graphite::TestOptions testOptions = gTestOptions;
362 testOptions.fContextOptions.fRequireOrderedRecordings = true;
363 bench->modifyGraphiteContextOptions(&testOptions.fContextOptions);
364
365 this->factory = std::make_unique<ContextFactory>(testOptions);
366
367 skiatest::graphite::ContextInfo ctxInfo =
368 this->factory->getContextInfo(this->config.ctxType);
369 if (!ctxInfo.fContext) {
370 return false;
371 }
372 this->testContext = ctxInfo.fTestContext;
373 this->context = ctxInfo.fContext;
374
375 this->recorder = this->context->makeRecorder(ToolUtils::CreateTestingRecorderOptions());
376 if (!this->recorder) {
377 return false;
378 }
379
380 this->surface = SkSurfaces::RenderTarget(this->recorder.get(), info);
381 if (!this->surface) {
382 return false;
383 }
384 // TODO: get fence stuff working
385 #if 0
386 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
387 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
388 "Timings might not be accurate.\n", this->config.name.c_str());
389 }
390 #endif
391 return true;
392 }
393
dumpStatsGraphiteTarget394 void dumpStats() override {
395 }
396 };
397 #endif // SK_GRAPHITE
398
time(int loops,Benchmark * bench,Target * target)399 static double time(int loops, Benchmark* bench, Target* target) {
400 SkCanvas* canvas = target->getCanvas();
401 if (canvas) {
402 canvas->clear(SK_ColorWHITE);
403 }
404 bench->preDraw(canvas);
405 double start = now_ms();
406 canvas = target->beginTiming(canvas);
407
408 auto submitFrame = [target]() { target->submitFrame(); };
409
410 bench->draw(loops, canvas, submitFrame);
411
412 double elapsed = now_ms() - start;
413 bench->postDraw(canvas);
414 return elapsed;
415 }
416
estimate_timer_overhead()417 static double estimate_timer_overhead() {
418 double overhead = 0;
419 for (int i = 0; i < FLAGS_overheadLoops; i++) {
420 double start = now_ms();
421 overhead += now_ms() - start;
422 }
423 return overhead / FLAGS_overheadLoops;
424 }
425
detect_forever_loops(int loops)426 static int detect_forever_loops(int loops) {
427 // look for a magic run-forever value
428 if (loops < 0) {
429 loops = SK_MaxS32;
430 }
431 return loops;
432 }
433
clamp_loops(int loops)434 static int clamp_loops(int loops) {
435 if (loops < 1) {
436 SkDebugf("ERROR: clamping loops from %d to 1. "
437 "There's probably something wrong with the bench.\n", loops);
438 return 1;
439 }
440 if (loops > FLAGS_maxLoops) {
441 SkDebugf("WARNING: clamping loops from %d to FLAGS_maxLoops, %d.\n", loops, FLAGS_maxLoops);
442 return FLAGS_maxLoops;
443 }
444 return loops;
445 }
446
write_canvas_png(Target * target,const SkString & filename)447 static bool write_canvas_png(Target* target, const SkString& filename) {
448
449 if (filename.isEmpty()) {
450 return false;
451 }
452 if (target->getCanvas() &&
453 kUnknown_SkColorType == target->getCanvas()->imageInfo().colorType()) {
454 return false;
455 }
456
457 SkBitmap bmp;
458
459 if (!target->capturePixels(&bmp)) {
460 return false;
461 }
462
463 SkString dir = SkOSPath::Dirname(filename.c_str());
464 if (!sk_mkdir(dir.c_str())) {
465 SkDebugf("Can't make dir %s.\n", dir.c_str());
466 return false;
467 }
468 SkFILEWStream stream(filename.c_str());
469 if (!stream.isValid()) {
470 SkDebugf("Can't write %s.\n", filename.c_str());
471 return false;
472 }
473 if (!SkPngEncoder::Encode(&stream, bmp.pixmap(), {})) {
474 SkDebugf("Can't encode a PNG.\n");
475 return false;
476 }
477 return true;
478 }
479
480 static int kFailedLoops = -2;
setup_cpu_bench(const double overhead,Target * target,Benchmark * bench)481 static int setup_cpu_bench(const double overhead, Target* target, Benchmark* bench) {
482 // First figure out approximately how many loops of bench it takes to make overhead negligible.
483 double bench_plus_overhead = 0.0;
484 int round = 0;
485 int loops = bench->shouldLoop() ? FLAGS_loops : 1;
486 if (kAutoTuneLoops == loops) {
487 while (bench_plus_overhead < overhead) {
488 if (round++ == FLAGS_maxCalibrationAttempts) {
489 SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
490 bench->getUniqueName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
491 return kFailedLoops;
492 }
493 bench_plus_overhead = time(1, bench, target);
494 }
495 }
496
497 // Later we'll just start and stop the timer once but loop N times.
498 // We'll pick N to make timer overhead negligible:
499 //
500 // overhead
501 // ------------------------- < FLAGS_overheadGoal
502 // overhead + N * Bench Time
503 //
504 // where bench_plus_overhead ~=~ overhead + Bench Time.
505 //
506 // Doing some math, we get:
507 //
508 // (overhead / FLAGS_overheadGoal) - overhead
509 // ------------------------------------------ < N
510 // bench_plus_overhead - overhead)
511 //
512 // Luckily, this also works well in practice. :)
513 if (kAutoTuneLoops == loops) {
514 const double numer = overhead / FLAGS_overheadGoal - overhead;
515 const double denom = bench_plus_overhead - overhead;
516 loops = (int)ceil(numer / denom);
517 loops = clamp_loops(loops);
518 } else {
519 loops = detect_forever_loops(loops);
520 }
521
522 return loops;
523 }
524
setup_gpu_bench(Target * target,Benchmark * bench,int maxGpuFrameLag)525 static int setup_gpu_bench(Target* target, Benchmark* bench, int maxGpuFrameLag) {
526 // First, figure out how many loops it'll take to get a frame up to FLAGS_gpuMs.
527 int loops = bench->shouldLoop() ? FLAGS_loops : 1;
528 if (kAutoTuneLoops == loops) {
529 loops = 1;
530 double elapsed = 0;
531 do {
532 if (1<<30 == loops) {
533 // We're about to wrap. Something's wrong with the bench.
534 loops = 0;
535 break;
536 }
537 loops *= 2;
538 // If the GPU lets frames lag at all, we need to make sure we're timing
539 // _this_ round, not still timing last round.
540 for (int i = 0; i < maxGpuFrameLag; i++) {
541 elapsed = time(loops, bench, target);
542 }
543 } while (elapsed < FLAGS_gpuMs);
544
545 // We've overshot at least a little. Scale back linearly.
546 loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
547 loops = clamp_loops(loops);
548
549 // Make sure we're not still timing our calibration.
550 target->submitWorkAndSyncCPU();
551 } else {
552 loops = detect_forever_loops(loops);
553 }
554 // Pretty much the same deal as the calibration: do some warmup to make
555 // sure we're timing steady-state pipelined frames.
556 for (int i = 0; i < maxGpuFrameLag; i++) {
557 time(loops, bench, target);
558 }
559
560 return loops;
561 }
562
563 #define kBogusContextType skgpu::ContextType::kGL
564 #define kBogusContextOverrides GrContextFactory::ContextOverrides::kNone
565
create_config(const SkCommandLineConfig * config)566 static std::optional<Config> create_config(const SkCommandLineConfig* config) {
567 if (const auto* gpuConfig = config->asConfigGpu()) {
568 if (!FLAGS_gpu) {
569 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
570 return std::nullopt;
571 }
572
573 const auto ctxType = gpuConfig->getContextType();
574 const auto ctxOverrides = gpuConfig->getContextOverrides();
575 const auto sampleCount = gpuConfig->getSamples();
576 const auto colorType = gpuConfig->getColorType();
577 if (gpuConfig->getSurfType() != SkCommandLineConfigGpu::SurfType::kDefault) {
578 SkDebugf("This tool only supports the default surface type.");
579 return std::nullopt;
580 }
581
582 GrContextFactory factory(grContextOpts);
583 if (const auto ctx = factory.get(ctxType, ctxOverrides)) {
584 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
585 int supportedSampleCount =
586 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
587 if (sampleCount != supportedSampleCount) {
588 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
589 config->getTag().c_str(),
590 sampleCount);
591 return std::nullopt;
592 }
593 } else {
594 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
595 return std::nullopt;
596 }
597
598 return Config{gpuConfig->getTag(),
599 Benchmark::Backend::kGanesh,
600 colorType,
601 kPremul_SkAlphaType,
602 config->refColorSpace(),
603 sampleCount,
604 ctxType,
605 ctxOverrides,
606 gpuConfig->getSurfaceFlags()};
607 }
608 #if defined(SK_GRAPHITE)
609 if (const auto* gpuConfig = config->asConfigGraphite()) {
610 if (!FLAGS_gpu) {
611 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
612 return std::nullopt;
613 }
614
615 const auto graphiteCtxType = gpuConfig->getContextType();
616 const auto sampleCount = 1; // TODO: gpuConfig->getSamples();
617 const auto colorType = gpuConfig->getColorType();
618
619 using ContextFactory = skiatest::graphite::ContextFactory;
620
621 ContextFactory factory(gTestOptions);
622 skiatest::graphite::ContextInfo ctxInfo = factory.getContextInfo(graphiteCtxType);
623 skgpu::graphite::Context* ctx = ctxInfo.fContext;
624 if (ctx) {
625 // TODO: Add graphite ctx queries for supported sample count by color type.
626 #if 0
627 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
628 int supportedSampleCount =
629 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
630 if (sampleCount != supportedSampleCount) {
631 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
632 config->getTag().c_str(),
633 sampleCount);
634 return std::nullopt;
635 }
636 #else
637 if (sampleCount > 1) {
638 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
639 config->getTag().c_str(),
640 sampleCount);
641 return std::nullopt;
642 }
643 #endif
644 } else {
645 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
646 return std::nullopt;
647 }
648
649 return Config{gpuConfig->getTag(),
650 Benchmark::Backend::kGraphite,
651 colorType,
652 kPremul_SkAlphaType,
653 config->refColorSpace(),
654 sampleCount,
655 graphiteCtxType,
656 kBogusContextOverrides,
657 0};
658 }
659 #endif
660
661 #define CPU_CONFIG(name, backend, color, alpha) \
662 if (config->getBackend().equals(name)) { \
663 if (!FLAGS_cpu) { \
664 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str()); \
665 return std::nullopt; \
666 } \
667 return Config{SkString(name), \
668 Benchmark::backend, \
669 color, \
670 alpha, \
671 config->refColorSpace(), \
672 0, \
673 kBogusContextType, \
674 kBogusContextOverrides, \
675 0}; \
676 }
677
678 CPU_CONFIG("nonrendering", Backend::kNonRendering, kUnknown_SkColorType, kUnpremul_SkAlphaType)
679
680 CPU_CONFIG("a8", Backend::kRaster, kAlpha_8_SkColorType, kPremul_SkAlphaType)
681 CPU_CONFIG("565", Backend::kRaster, kRGB_565_SkColorType, kOpaque_SkAlphaType)
682 CPU_CONFIG("8888", Backend::kRaster, kN32_SkColorType, kPremul_SkAlphaType)
683 CPU_CONFIG("rgba", Backend::kRaster, kRGBA_8888_SkColorType, kPremul_SkAlphaType)
684 CPU_CONFIG("bgra", Backend::kRaster, kBGRA_8888_SkColorType, kPremul_SkAlphaType)
685 CPU_CONFIG("f16", Backend::kRaster, kRGBA_F16_SkColorType, kPremul_SkAlphaType)
686 CPU_CONFIG("srgba", Backend::kRaster, kSRGBA_8888_SkColorType, kPremul_SkAlphaType)
687
688 #undef CPU_CONFIG
689
690 SkDebugf("Unknown config '%s'.\n", config->getTag().c_str());
691 return std::nullopt;
692 }
693
694 // Append all configs that are enabled and supported.
create_configs(TArray<Config> * configs)695 void create_configs(TArray<Config>* configs) {
696 SkCommandLineConfigArray array;
697 ParseConfigs(FLAGS_config, &array);
698 for (int i = 0; i < array.size(); ++i) {
699 if (std::optional<Config> config = create_config(array[i].get())) {
700 configs->push_back(*config);
701 }
702 }
703
704 // If no just default configs were requested, then we're okay.
705 if (array.size() == 0 || FLAGS_config.size() == 0 ||
706 // Otherwise, make sure that all specified configs have been created.
707 array.size() == configs->size()) {
708 return;
709 }
710 exit(1);
711 }
712
713 // disable warning : switch statement contains default but no 'case' labels
714 #if defined _WIN32
715 #pragma warning ( push )
716 #pragma warning ( disable : 4065 )
717 #endif
718
719 // If bench is enabled for config, returns a Target* for it, otherwise nullptr.
is_enabled(Benchmark * bench,const Config & config)720 static Target* is_enabled(Benchmark* bench, const Config& config) {
721 if (!bench->isSuitableFor(config.backend)) {
722 return nullptr;
723 }
724
725 SkImageInfo info =
726 SkImageInfo::Make(bench->getSize(), config.color, config.alpha, config.colorSpace);
727
728 Target* target = nullptr;
729
730 switch (config.backend) {
731 case Benchmark::Backend::kGanesh:
732 target = new GPUTarget(config);
733 break;
734 #if defined(SK_GRAPHITE)
735 case Benchmark::Backend::kGraphite:
736 target = new GraphiteTarget(config);
737 break;
738 #endif
739 default:
740 target = new Target(config);
741 break;
742 }
743
744 if (!target->init(info, bench)) {
745 delete target;
746 return nullptr;
747 }
748 return target;
749 }
750
751 #if defined _WIN32
752 #pragma warning ( pop )
753 #endif
754
755 #ifdef SK_ENABLE_ANDROID_UTILS
valid_brd_bench(sk_sp<SkData> encoded,SkColorType colorType,uint32_t sampleSize,uint32_t minOutputSize,int * width,int * height)756 static bool valid_brd_bench(sk_sp<SkData> encoded, SkColorType colorType, uint32_t sampleSize,
757 uint32_t minOutputSize, int* width, int* height) {
758 auto brd = android::skia::BitmapRegionDecoder::Make(encoded);
759 if (nullptr == brd) {
760 // This is indicates that subset decoding is not supported for a particular image format.
761 return false;
762 }
763
764 if (sampleSize * minOutputSize > (uint32_t) brd->width() || sampleSize * minOutputSize >
765 (uint32_t) brd->height()) {
766 // This indicates that the image is not large enough to decode a
767 // minOutputSize x minOutputSize subset at the given sampleSize.
768 return false;
769 }
770
771 // Set the image width and height. The calling code will use this to choose subsets to decode.
772 *width = brd->width();
773 *height = brd->height();
774 return true;
775 }
776 #endif
777
cleanup_run(Target * target)778 static void cleanup_run(Target* target) {
779 delete target;
780 }
781
collect_files(const CommandLineFlags::StringArray & paths,const char * ext,TArray<SkString> * list)782 static void collect_files(const CommandLineFlags::StringArray& paths,
783 const char* ext,
784 TArray<SkString>* list) {
785 for (int i = 0; i < paths.size(); ++i) {
786 if (SkStrEndsWith(paths[i], ext)) {
787 list->push_back(SkString(paths[i]));
788 } else {
789 SkOSFile::Iter it(paths[i], ext);
790 SkString path;
791 while (it.next(&path)) {
792 list->push_back(SkOSPath::Join(paths[i], path.c_str()));
793 }
794 }
795 }
796 }
797
798 class BenchmarkStream {
799 public:
BenchmarkStream()800 BenchmarkStream() : fBenches(BenchRegistry::Head())
801 , fGMs(skiagm::GMRegistry::Head()) {
802 collect_files(FLAGS_skps, ".skp", &fSKPs);
803 collect_files(FLAGS_mskps, ".mskp", &fMSKPs);
804 collect_files(FLAGS_svgs, ".svg", &fSVGs);
805 collect_files(FLAGS_texttraces, ".trace", &fTextBlobTraces);
806
807 if (4 != sscanf(FLAGS_clip[0], "%d,%d,%d,%d",
808 &fClip.fLeft, &fClip.fTop, &fClip.fRight, &fClip.fBottom)) {
809 SkDebugf("Can't parse %s from --clip as an SkIRect.\n", FLAGS_clip[0]);
810 exit(1);
811 }
812
813 for (int i = 0; i < FLAGS_scales.size(); i++) {
814 if (1 != sscanf(FLAGS_scales[i], "%f", &fScales.push_back())) {
815 SkDebugf("Can't parse %s from --scales as an SkScalar.\n", FLAGS_scales[i]);
816 exit(1);
817 }
818 }
819
820 if (2 != sscanf(FLAGS_zoom[0], "%f,%lf", &fZoomMax, &fZoomPeriodMs)) {
821 SkDebugf("Can't parse %s from --zoom as a zoomMax,zoomPeriodMs.\n", FLAGS_zoom[0]);
822 exit(1);
823 }
824
825 // Prepare the images for decoding
826 if (!CommonFlags::CollectImages(FLAGS_images, &fImages)) {
827 exit(1);
828 }
829
830 // Choose the candidate color types for image decoding
831 fColorTypes.push_back(kN32_SkColorType);
832 if (!FLAGS_simpleCodec) {
833 fColorTypes.push_back(kRGB_565_SkColorType);
834 fColorTypes.push_back(kAlpha_8_SkColorType);
835 fColorTypes.push_back(kGray_8_SkColorType);
836 }
837 }
838
ReadPicture(const char * path)839 static sk_sp<SkPicture> ReadPicture(const char* path) {
840 // Not strictly necessary, as it will be checked again later,
841 // but helps to avoid a lot of pointless work if we're going to skip it.
842 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
843 return nullptr;
844 }
845
846 std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(path);
847 if (!stream) {
848 SkDebugf("Could not read %s.\n", path);
849 return nullptr;
850 }
851
852 return SkPicture::MakeFromStream(stream.get());
853 }
854
ReadMSKP(const char * path)855 static std::unique_ptr<MSKPPlayer> ReadMSKP(const char* path) {
856 // Not strictly necessary, as it will be checked again later,
857 // but helps to avoid a lot of pointless work if we're going to skip it.
858 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
859 return nullptr;
860 }
861
862 std::unique_ptr<SkStreamSeekable> stream = SkStream::MakeFromFile(path);
863 if (!stream) {
864 SkDebugf("Could not read %s.\n", path);
865 return nullptr;
866 }
867
868 return MSKPPlayer::Make(stream.get());
869 }
870
ReadSVGPicture(const char * path)871 static sk_sp<SkPicture> ReadSVGPicture(const char* path) {
872 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
873 return nullptr;
874 }
875 sk_sp<SkData> data(SkData::MakeFromFileName(path));
876 if (!data) {
877 SkDebugf("Could not read %s.\n", path);
878 return nullptr;
879 }
880
881 #if defined(SK_ENABLE_SVG)
882 SkMemoryStream stream(std::move(data));
883 sk_sp<SkSVGDOM> svgDom = SkSVGDOM::Builder()
884 .setFontManager(ToolUtils::TestFontMgr())
885 .setTextShapingFactory(SkShapers::BestAvailable())
886 .make(stream);
887 if (!svgDom) {
888 SkDebugf("Could not parse %s.\n", path);
889 return nullptr;
890 }
891
892 // Use the intrinsic SVG size if available, otherwise fall back to a default value.
893 static const SkSize kDefaultContainerSize = SkSize::Make(128, 128);
894 if (svgDom->containerSize().isEmpty()) {
895 svgDom->setContainerSize(kDefaultContainerSize);
896 }
897
898 SkPictureRecorder recorder;
899 svgDom->render(recorder.beginRecording(svgDom->containerSize().width(),
900 svgDom->containerSize().height()));
901 return recorder.finishRecordingAsPicture();
902 #else
903 return nullptr;
904 #endif // defined(SK_ENABLE_SVG)
905 }
906
next()907 Benchmark* next() {
908 std::unique_ptr<Benchmark> bench;
909 do {
910 bench.reset(this->rawNext());
911 if (!bench) {
912 return nullptr;
913 }
914 } while (CommandLineFlags::ShouldSkip(FLAGS_sourceType, fSourceType) ||
915 CommandLineFlags::ShouldSkip(FLAGS_benchType, fBenchType));
916 return bench.release();
917 }
918
rawNext()919 Benchmark* rawNext() {
920 if (fBenches) {
921 Benchmark* bench = fBenches->get()(nullptr);
922 fBenches = fBenches->next();
923 fSourceType = "bench";
924 fBenchType = "micro";
925 return bench;
926 }
927
928 while (fGMs) {
929 std::unique_ptr<skiagm::GM> gm = fGMs->get()();
930 if (gm->isBazelOnly()) {
931 // We skip Bazel-only GMs because they might not be regular GMs. The Bazel build
932 // reuses the notion of GMs to replace the notion of DM sources of various kinds,
933 // such as codec sources and image generation sources. See comments in the
934 // skiagm::GM::isBazelOnly function declaration for context.
935 continue;
936 }
937 fGMs = fGMs->next();
938 if (gm->runAsBench()) {
939 fSourceType = "gm";
940 fBenchType = "micro";
941 return new GMBench(std::move(gm));
942 }
943 }
944
945 while (fCurrentTextBlobTrace < fTextBlobTraces.size()) {
946 SkString path = fTextBlobTraces[fCurrentTextBlobTrace++];
947 SkString basename = SkOSPath::Basename(path.c_str());
948 static constexpr char kEnding[] = ".trace";
949 if (basename.endsWith(kEnding)) {
950 basename.remove(basename.size() - strlen(kEnding), strlen(kEnding));
951 }
952 fSourceType = "texttrace";
953 fBenchType = "micro";
954 return CreateDiffCanvasBench(
955 SkStringPrintf("SkDiffBench-%s", basename.c_str()),
956 [path](){ return SkStream::MakeFromFile(path.c_str()); });
957 }
958
959 // First add all .skps as RecordingBenches.
960 while (fCurrentRecording < fSKPs.size()) {
961 const SkString& path = fSKPs[fCurrentRecording++];
962 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
963 if (!pic) {
964 continue;
965 }
966 SkString name = SkOSPath::Basename(path.c_str());
967 fSourceType = "skp";
968 fBenchType = "recording";
969 fSKPBytes = static_cast<double>(pic->approximateBytesUsed());
970 fSKPOps = pic->approximateOpCount();
971 return new RecordingBench(name.c_str(), pic.get(), FLAGS_bbh);
972 }
973
974 // Add all .skps as DeserializePictureBenchs.
975 while (fCurrentDeserialPicture < fSKPs.size()) {
976 const SkString& path = fSKPs[fCurrentDeserialPicture++];
977 sk_sp<SkData> data = SkData::MakeFromFileName(path.c_str());
978 if (!data) {
979 continue;
980 }
981 SkString name = SkOSPath::Basename(path.c_str());
982 fSourceType = "skp";
983 fBenchType = "deserial";
984 fSKPBytes = static_cast<double>(data->size());
985 fSKPOps = 0;
986 return new DeserializePictureBench(name.c_str(), std::move(data));
987 }
988
989 // Then once each for each scale as SKPBenches (playback).
990 while (fCurrentScale < fScales.size()) {
991 while (fCurrentSKP < fSKPs.size()) {
992 const SkString& path = fSKPs[fCurrentSKP++];
993 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
994 if (!pic) {
995 continue;
996 }
997
998 if (FLAGS_bbh) {
999 // The SKP we read off disk doesn't have a BBH. Re-record so it grows one.
1000 SkRTreeFactory factory;
1001 SkPictureRecorder recorder;
1002 pic->playback(recorder.beginRecording(pic->cullRect().width(),
1003 pic->cullRect().height(),
1004 &factory));
1005 pic = recorder.finishRecordingAsPicture();
1006 }
1007 SkString name = SkOSPath::Basename(path.c_str());
1008 fSourceType = "skp";
1009 fBenchType = "playback";
1010 return new SKPBench(name.c_str(), pic.get(), fClip, fScales[fCurrentScale],
1011 FLAGS_loopSKP);
1012 }
1013
1014 while (fCurrentSVG < fSVGs.size()) {
1015 const char* path = fSVGs[fCurrentSVG++].c_str();
1016 if (sk_sp<SkPicture> pic = ReadSVGPicture(path)) {
1017 fSourceType = "svg";
1018 fBenchType = "playback";
1019 return new SKPBench(SkOSPath::Basename(path).c_str(), pic.get(), fClip,
1020 fScales[fCurrentScale], FLAGS_loopSKP);
1021 }
1022 }
1023
1024 fCurrentSKP = 0;
1025 fCurrentSVG = 0;
1026 fCurrentScale++;
1027 }
1028
1029 // Now loop over each skp again if we have an animation
1030 if (fZoomMax != 1.0f && fZoomPeriodMs > 0) {
1031 while (fCurrentAnimSKP < fSKPs.size()) {
1032 const SkString& path = fSKPs[fCurrentAnimSKP];
1033 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
1034 if (!pic) {
1035 fCurrentAnimSKP++;
1036 continue;
1037 }
1038
1039 fCurrentAnimSKP++;
1040 SkString name = SkOSPath::Basename(path.c_str());
1041 sk_sp<SKPAnimationBench::Animation> animation =
1042 SKPAnimationBench::MakeZoomAnimation(fZoomMax, fZoomPeriodMs);
1043 return new SKPAnimationBench(name.c_str(), pic.get(), fClip, std::move(animation),
1044 FLAGS_loopSKP);
1045 }
1046 }
1047
1048 // Read all MSKPs as benches
1049 while (fCurrentMSKP < fMSKPs.size()) {
1050 const SkString& path = fMSKPs[fCurrentMSKP++];
1051 std::unique_ptr<MSKPPlayer> player = ReadMSKP(path.c_str());
1052 if (!player) {
1053 continue;
1054 }
1055 SkString name = SkOSPath::Basename(path.c_str());
1056 fSourceType = "mskp";
1057 fBenchType = "mskp";
1058 return new MSKPBench(std::move(name), std::move(player));
1059 }
1060
1061 for (; fCurrentCodec < fImages.size(); fCurrentCodec++) {
1062 fSourceType = "image";
1063 fBenchType = "skcodec";
1064 const SkString& path = fImages[fCurrentCodec];
1065 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1066 continue;
1067 }
1068 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1069 std::unique_ptr<SkCodec> codec(SkCodec::MakeFromData(encoded));
1070 if (!codec) {
1071 // Nothing to time.
1072 SkDebugf("Cannot find codec for %s\n", path.c_str());
1073 continue;
1074 }
1075
1076 while (fCurrentColorType < fColorTypes.size()) {
1077 const SkColorType colorType = fColorTypes[fCurrentColorType];
1078
1079 SkAlphaType alphaType = codec->getInfo().alphaType();
1080 if (FLAGS_simpleCodec) {
1081 if (kUnpremul_SkAlphaType == alphaType) {
1082 alphaType = kPremul_SkAlphaType;
1083 }
1084
1085 fCurrentColorType++;
1086 } else {
1087 switch (alphaType) {
1088 case kOpaque_SkAlphaType:
1089 // We only need to test one alpha type (opaque).
1090 fCurrentColorType++;
1091 break;
1092 case kUnpremul_SkAlphaType:
1093 case kPremul_SkAlphaType:
1094 if (0 == fCurrentAlphaType) {
1095 // Test unpremul first.
1096 alphaType = kUnpremul_SkAlphaType;
1097 fCurrentAlphaType++;
1098 } else {
1099 // Test premul.
1100 alphaType = kPremul_SkAlphaType;
1101 fCurrentAlphaType = 0;
1102 fCurrentColorType++;
1103 }
1104 break;
1105 default:
1106 SkASSERT(false);
1107 fCurrentColorType++;
1108 break;
1109 }
1110 }
1111
1112 // Make sure we can decode to this color type and alpha type.
1113 SkImageInfo info =
1114 codec->getInfo().makeColorType(colorType).makeAlphaType(alphaType);
1115 const size_t rowBytes = info.minRowBytes();
1116 SkAutoMalloc storage(info.computeByteSize(rowBytes));
1117
1118 const SkCodec::Result result = codec->getPixels(
1119 info, storage.get(), rowBytes);
1120 switch (result) {
1121 case SkCodec::kSuccess:
1122 case SkCodec::kIncompleteInput:
1123 return new CodecBench(SkOSPath::Basename(path.c_str()),
1124 encoded.get(), colorType, alphaType);
1125 case SkCodec::kInvalidConversion:
1126 // This is okay. Not all conversions are valid.
1127 break;
1128 default:
1129 // This represents some sort of failure.
1130 SkASSERT(false);
1131 break;
1132 }
1133 }
1134 fCurrentColorType = 0;
1135 }
1136
1137 // Run AndroidCodecBenches
1138 const int sampleSizes[] = { 2, 4, 8 };
1139 for (; fCurrentAndroidCodec < fImages.size(); fCurrentAndroidCodec++) {
1140 fSourceType = "image";
1141 fBenchType = "skandroidcodec";
1142
1143 const SkString& path = fImages[fCurrentAndroidCodec];
1144 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1145 continue;
1146 }
1147 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1148 std::unique_ptr<SkAndroidCodec> codec(SkAndroidCodec::MakeFromData(encoded));
1149 if (!codec) {
1150 // Nothing to time.
1151 SkDebugf("Cannot find codec for %s\n", path.c_str());
1152 continue;
1153 }
1154
1155 while (fCurrentSampleSize < (int) std::size(sampleSizes)) {
1156 int sampleSize = sampleSizes[fCurrentSampleSize];
1157 fCurrentSampleSize++;
1158 if (10 * sampleSize > std::min(codec->getInfo().width(), codec->getInfo().height())) {
1159 // Avoid benchmarking scaled decodes of already small images.
1160 break;
1161 }
1162
1163 return new AndroidCodecBench(SkOSPath::Basename(path.c_str()),
1164 encoded.get(), sampleSize);
1165 }
1166 fCurrentSampleSize = 0;
1167 }
1168
1169 #ifdef SK_ENABLE_ANDROID_UTILS
1170 // Run the BRDBenches
1171 // We intend to create benchmarks that model the use cases in
1172 // android/libraries/social/tiledimage. In this library, an image is decoded in 512x512
1173 // tiles. The image can be translated freely, so the location of a tile may be anywhere in
1174 // the image. For that reason, we will benchmark decodes in five representative locations
1175 // in the image. Additionally, this use case utilizes power of two scaling, so we will
1176 // test on power of two sample sizes. The output tile is always 512x512, so, when a
1177 // sampleSize is used, the size of the subset that is decoded is always
1178 // (sampleSize*512)x(sampleSize*512).
1179 // There are a few good reasons to only test on power of two sample sizes at this time:
1180 // All use cases we are aware of only scale by powers of two.
1181 // PNG decodes use the indicated sampling strategy regardless of the sample size, so
1182 // these tests are sufficient to provide good coverage of our scaling options.
1183 const uint32_t brdSampleSizes[] = { 1, 2, 4, 8, 16 };
1184 const uint32_t minOutputSize = 512;
1185 for (; fCurrentBRDImage < fImages.size(); fCurrentBRDImage++) {
1186 fSourceType = "image";
1187 fBenchType = "BRD";
1188
1189 const SkString& path = fImages[fCurrentBRDImage];
1190 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1191 continue;
1192 }
1193
1194 while (fCurrentColorType < fColorTypes.size()) {
1195 while (fCurrentSampleSize < (int) std::size(brdSampleSizes)) {
1196 while (fCurrentSubsetType <= kLastSingle_SubsetType) {
1197
1198 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1199 const SkColorType colorType = fColorTypes[fCurrentColorType];
1200 uint32_t sampleSize = brdSampleSizes[fCurrentSampleSize];
1201 int currentSubsetType = fCurrentSubsetType++;
1202
1203 int width = 0;
1204 int height = 0;
1205 if (!valid_brd_bench(encoded, colorType, sampleSize, minOutputSize,
1206 &width, &height)) {
1207 break;
1208 }
1209
1210 SkString basename = SkOSPath::Basename(path.c_str());
1211 SkIRect subset;
1212 const uint32_t subsetSize = sampleSize * minOutputSize;
1213 switch (currentSubsetType) {
1214 case kTopLeft_SubsetType:
1215 basename.append("_TopLeft");
1216 subset = SkIRect::MakeXYWH(0, 0, subsetSize, subsetSize);
1217 break;
1218 case kTopRight_SubsetType:
1219 basename.append("_TopRight");
1220 subset = SkIRect::MakeXYWH(width - subsetSize, 0, subsetSize,
1221 subsetSize);
1222 break;
1223 case kMiddle_SubsetType:
1224 basename.append("_Middle");
1225 subset = SkIRect::MakeXYWH((width - subsetSize) / 2,
1226 (height - subsetSize) / 2, subsetSize, subsetSize);
1227 break;
1228 case kBottomLeft_SubsetType:
1229 basename.append("_BottomLeft");
1230 subset = SkIRect::MakeXYWH(0, height - subsetSize, subsetSize,
1231 subsetSize);
1232 break;
1233 case kBottomRight_SubsetType:
1234 basename.append("_BottomRight");
1235 subset = SkIRect::MakeXYWH(width - subsetSize,
1236 height - subsetSize, subsetSize, subsetSize);
1237 break;
1238 default:
1239 SkASSERT(false);
1240 }
1241
1242 return new BitmapRegionDecoderBench(basename.c_str(), encoded.get(),
1243 colorType, sampleSize, subset);
1244 }
1245 fCurrentSubsetType = 0;
1246 fCurrentSampleSize++;
1247 }
1248 fCurrentSampleSize = 0;
1249 fCurrentColorType++;
1250 }
1251 fCurrentColorType = 0;
1252 }
1253 #endif // SK_ENABLE_ANDROID_UTILS
1254
1255 return nullptr;
1256 }
1257
fillCurrentOptions(NanoJSONResultsWriter & log) const1258 void fillCurrentOptions(NanoJSONResultsWriter& log) const {
1259 log.appendCString("source_type", fSourceType);
1260 log.appendCString("bench_type", fBenchType);
1261 if (0 == strcmp(fSourceType, "skp")) {
1262 log.appendString("clip",
1263 SkStringPrintf("%d %d %d %d", fClip.fLeft, fClip.fTop,
1264 fClip.fRight, fClip.fBottom));
1265 SkASSERT_RELEASE(fCurrentScale < fScales.size()); // debugging paranoia
1266 log.appendString("scale", SkStringPrintf("%.2g", fScales[fCurrentScale]));
1267 }
1268 }
1269
fillCurrentMetrics(NanoJSONResultsWriter & log) const1270 void fillCurrentMetrics(NanoJSONResultsWriter& log) const {
1271 if (0 == strcmp(fBenchType, "recording")) {
1272 log.appendMetric("bytes", fSKPBytes);
1273 log.appendMetric("ops", fSKPOps);
1274 }
1275 }
1276
1277 private:
1278 #ifdef SK_ENABLE_ANDROID_UTILS
1279 enum SubsetType {
1280 kTopLeft_SubsetType = 0,
1281 kTopRight_SubsetType = 1,
1282 kMiddle_SubsetType = 2,
1283 kBottomLeft_SubsetType = 3,
1284 kBottomRight_SubsetType = 4,
1285 kTranslate_SubsetType = 5,
1286 kZoom_SubsetType = 6,
1287 kLast_SubsetType = kZoom_SubsetType,
1288 kLastSingle_SubsetType = kBottomRight_SubsetType,
1289 };
1290 #endif
1291
1292 const BenchRegistry* fBenches;
1293 const skiagm::GMRegistry* fGMs;
1294 SkIRect fClip;
1295 TArray<SkScalar> fScales;
1296 TArray<SkString> fSKPs;
1297 TArray<SkString> fMSKPs;
1298 TArray<SkString> fSVGs;
1299 TArray<SkString> fTextBlobTraces;
1300 TArray<SkString> fImages;
1301 TArray<SkColorType, true> fColorTypes;
1302 SkScalar fZoomMax;
1303 double fZoomPeriodMs;
1304
1305 double fSKPBytes, fSKPOps;
1306
1307 const char* fSourceType; // What we're benching: bench, GM, SKP, ...
1308 const char* fBenchType; // How we bench it: micro, recording, playback, ...
1309 int fCurrentRecording = 0;
1310 int fCurrentDeserialPicture = 0;
1311 int fCurrentMSKP = 0;
1312 int fCurrentScale = 0;
1313 int fCurrentSKP = 0;
1314 int fCurrentSVG = 0;
1315 int fCurrentTextBlobTrace = 0;
1316 int fCurrentCodec = 0;
1317 int fCurrentAndroidCodec = 0;
1318 #ifdef SK_ENABLE_ANDROID_UTILS
1319 int fCurrentBRDImage = 0;
1320 int fCurrentSubsetType = 0;
1321 #endif
1322 int fCurrentColorType = 0;
1323 int fCurrentAlphaType = 0;
1324 int fCurrentSampleSize = 0;
1325 int fCurrentAnimSKP = 0;
1326 };
1327
1328 // Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
1329 // This prints something every once in a while so that it knows we're still working.
start_keepalive()1330 static void start_keepalive() {
1331 static std::thread* intentionallyLeaked = new std::thread([]{
1332 for (;;) {
1333 static const int kSec = 1200;
1334 #if defined(SK_BUILD_FOR_WIN)
1335 Sleep(kSec * 1000);
1336 #else
1337 sleep(kSec);
1338 #endif
1339 SkDebugf("\nBenchmarks still running...\n");
1340 }
1341 });
1342 (void)intentionallyLeaked;
1343 SK_INTENTIONALLY_LEAKED(intentionallyLeaked);
1344 }
1345
1346 class NanobenchShaderErrorHandler : public GrContextOptions::ShaderErrorHandler {
compileError(const char * shader,const char * errors)1347 void compileError(const char* shader, const char* errors) override {
1348 // Nanobench should abort if any shader can't compile. Failure is much better than
1349 // reporting meaningless performance metrics.
1350 std::string message = SkShaderUtils::BuildShaderErrorMessage(shader, errors);
1351 SK_ABORT("\n%s", message.c_str());
1352 }
1353 };
1354
main(int argc,char ** argv)1355 int main(int argc, char** argv) {
1356 CommandLineFlags::Parse(argc, argv);
1357
1358 initializeEventTracingForTools();
1359
1360 #if defined(SK_BUILD_FOR_IOS)
1361 cd_Documents();
1362 #endif
1363 SetupCrashHandler();
1364 if (FLAGS_runtimeCPUDetection) {
1365 SkGraphics::Init();
1366 }
1367
1368 // Our benchmarks only currently decode .png or .jpg files
1369 SkCodecs::Register(SkPngDecoder::Decoder());
1370 SkCodecs::Register(SkJpegDecoder::Decoder());
1371
1372 SkTaskGroup::Enabler enabled(FLAGS_threads);
1373
1374 CommonFlags::SetCtxOptions(&grContextOpts);
1375
1376 #if defined(SK_GRAPHITE)
1377 CommonFlags::SetTestOptions(&gTestOptions);
1378 #endif
1379
1380 NanobenchShaderErrorHandler errorHandler;
1381 grContextOpts.fShaderErrorHandler = &errorHandler;
1382
1383 if (kAutoTuneLoops != FLAGS_loops) {
1384 FLAGS_samples = 1;
1385 FLAGS_gpuFrameLag = 0;
1386 }
1387
1388 if (!FLAGS_writePath.isEmpty()) {
1389 SkDebugf("Writing files to %s.\n", FLAGS_writePath[0]);
1390 if (!sk_mkdir(FLAGS_writePath[0])) {
1391 SkDebugf("Could not create %s. Files won't be written.\n", FLAGS_writePath[0]);
1392 FLAGS_writePath.set(0, nullptr);
1393 }
1394 }
1395
1396 std::unique_ptr<SkWStream> logStream(new SkNullWStream);
1397 if (!FLAGS_outResultsFile.isEmpty()) {
1398 #if defined(SK_RELEASE)
1399 logStream.reset(new SkFILEWStream(FLAGS_outResultsFile[0]));
1400 #else
1401 SkDebugf("I'm ignoring --outResultsFile because this is a Debug build.");
1402 return 1;
1403 #endif
1404 }
1405 NanoJSONResultsWriter log(logStream.get(), SkJSONWriter::Mode::kPretty);
1406 log.beginObject(); // root
1407
1408 if (1 == FLAGS_properties.size() % 2) {
1409 SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
1410 return 1;
1411 }
1412 for (int i = 1; i < FLAGS_properties.size(); i += 2) {
1413 log.appendCString(FLAGS_properties[i-1], FLAGS_properties[i]);
1414 }
1415
1416 if (1 == FLAGS_key.size() % 2) {
1417 SkDebugf("ERROR: --key must be passed with an even number of arguments.\n");
1418 return 1;
1419 }
1420 if (FLAGS_key.size()) {
1421 log.beginObject("key");
1422 for (int i = 1; i < FLAGS_key.size(); i += 2) {
1423 log.appendCString(FLAGS_key[i - 1], FLAGS_key[i]);
1424 }
1425 log.endObject(); // key
1426 }
1427
1428 const double overhead = estimate_timer_overhead();
1429 if (!FLAGS_quiet && !FLAGS_csv) {
1430 SkDebugf("Timer overhead: %s\n", HUMANIZE(overhead));
1431 }
1432
1433 TArray<double> samples;
1434
1435 if (kAutoTuneLoops != FLAGS_loops) {
1436 SkDebugf("Fixed number of loops; times would only be misleading so we won't print them.\n");
1437 } else if (FLAGS_quiet) {
1438 SkDebugf("! -> high variance, ? -> moderate variance\n");
1439 SkDebugf(" micros \tbench\n");
1440 } else if (FLAGS_csv) {
1441 SkDebugf("min,median,mean,max,stddev,config,bench\n");
1442 } else if (FLAGS_ms) {
1443 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\tsamples\tconfig\tbench\n");
1444 } else {
1445 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\t%-*s\tconfig\tbench\n",
1446 FLAGS_samples, "samples");
1447 }
1448
1449 GrRecordingContextPriv::DMSAAStats combinedDMSAAStats;
1450
1451 TArray<Config> configs;
1452 create_configs(&configs);
1453
1454 if (FLAGS_keepAlive) {
1455 start_keepalive();
1456 }
1457
1458 gSkForceRasterPipelineBlitter = FLAGS_forceRasterPipelineHP || FLAGS_forceRasterPipeline;
1459 gForceHighPrecisionRasterPipeline = FLAGS_forceRasterPipelineHP;
1460
1461 // The SkSL memory benchmark must run before any GPU painting occurs. SkSL allocates memory for
1462 // its modules the first time they are accessed, and this test is trying to measure the size of
1463 // those allocations. If a paint has already occurred, some modules will have already been
1464 // loaded, so we won't be able to capture a delta for them.
1465 log.beginObject("results");
1466 RunSkSLModuleBenchmarks(&log);
1467
1468 int runs = 0;
1469 BenchmarkStream benchStream;
1470 AutoreleasePool pool;
1471 while (Benchmark* b = benchStream.next()) {
1472 std::unique_ptr<Benchmark> bench(b);
1473 if (CommandLineFlags::ShouldSkip(FLAGS_match, bench->getUniqueName())) {
1474 continue;
1475 }
1476
1477 if (!configs.empty()) {
1478 log.beginBench(
1479 bench->getUniqueName(), bench->getSize().width(), bench->getSize().height());
1480 bench->delayedSetup();
1481 }
1482 for (int i = 0; i < configs.size(); ++i) {
1483 Target* target = is_enabled(b, configs[i]);
1484 if (!target) {
1485 continue;
1486 }
1487
1488 // During HWUI output this canvas may be nullptr.
1489 SkCanvas* canvas = target->getCanvas();
1490 const char* config = target->config.name.c_str();
1491
1492 if (FLAGS_pre_log || FLAGS_dryRun) {
1493 SkDebugf("Running %s\t%s\n"
1494 , bench->getUniqueName()
1495 , config);
1496 if (FLAGS_dryRun) {
1497 continue;
1498 }
1499 }
1500
1501 if (FLAGS_purgeBetweenBenches) {
1502 SkGraphics::PurgeAllCaches();
1503 }
1504
1505 if (FLAGS_splitPerfettoTracesByBenchmark) {
1506 TRACE_EVENT_API_NEW_TRACE_SECTION(TRACE_STR_COPY(bench->getUniqueName()));
1507 }
1508 TRACE_EVENT2("skia", "Benchmark", "name", TRACE_STR_COPY(bench->getUniqueName()),
1509 "config", TRACE_STR_COPY(config));
1510
1511 target->setup();
1512 bench->perCanvasPreDraw(canvas);
1513
1514 int maxFrameLag;
1515 int loops = target->needsFrameTiming(&maxFrameLag)
1516 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
1517 : setup_cpu_bench(overhead, target, bench.get());
1518
1519 if (kFailedLoops == loops) {
1520 // Can't be timed. A warning note has already been printed.
1521 cleanup_run(target);
1522 continue;
1523 }
1524
1525 if (runs == 0 && FLAGS_ms < 1000) {
1526 // Run the first bench for 1000ms to warm up the nanobench if FLAGS_ms < 1000.
1527 // Otherwise, the first few benches' measurements will be inaccurate.
1528 auto stop = now_ms() + 1000;
1529 do {
1530 time(loops, bench.get(), target);
1531 pool.drain();
1532 } while (now_ms() < stop);
1533 }
1534
1535 if (FLAGS_ms) {
1536 samples.clear();
1537 auto stop = now_ms() + FLAGS_ms;
1538 do {
1539 samples.push_back(time(loops, bench.get(), target) / loops);
1540 pool.drain();
1541 } while (now_ms() < stop);
1542 } else {
1543 samples.reset(FLAGS_samples);
1544 for (int s = 0; s < FLAGS_samples; s++) {
1545 samples[s] = time(loops, bench.get(), target) / loops;
1546 pool.drain();
1547 }
1548 }
1549
1550 // Scale each result to the benchmark's own units, time/unit.
1551 for (double& sample : samples) {
1552 sample *= (1.0 / bench->getUnits());
1553 }
1554
1555 TArray<SkString> keys;
1556 TArray<double> values;
1557 if (configs[i].backend == Benchmark::Backend::kGanesh) {
1558 if (FLAGS_gpuStatsDump) {
1559 // TODO cache stats
1560 bench->getGpuStats(canvas, &keys, &values);
1561 }
1562 if (FLAGS_dmsaaStatsDump && bench->getDMSAAStats(canvas->recordingContext())) {
1563 const auto& dmsaaStats = canvas->recordingContext()->priv().dmsaaStats();
1564 dmsaaStats.dumpKeyValuePairs(&keys, &values);
1565 dmsaaStats.dump();
1566 combinedDMSAAStats.merge(dmsaaStats);
1567 }
1568 }
1569
1570 bench->perCanvasPostDraw(canvas);
1571
1572 if (Benchmark::Backend::kNonRendering != target->config.backend &&
1573 !FLAGS_writePath.isEmpty() && FLAGS_writePath[0]) {
1574 SkString pngFilename = SkOSPath::Join(FLAGS_writePath[0], config);
1575 pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getUniqueName());
1576 pngFilename.append(".png");
1577 write_canvas_png(target, pngFilename);
1578 }
1579
1580 // Building stats.plot often shows up in profiles,
1581 // so skip building it when we're not going to print it anyway.
1582 const bool want_plot = !FLAGS_quiet && !FLAGS_ms;
1583
1584 Stats stats(samples, want_plot);
1585 log.beginObject(config);
1586
1587 log.beginObject("options");
1588 log.appendCString("name", bench->getName());
1589 benchStream.fillCurrentOptions(log);
1590 log.endObject(); // options
1591
1592 // Metrics
1593 log.appendMetric("min_ms", stats.min);
1594 log.appendMetric("min_ratio", sk_ieee_double_divide(stats.median, stats.min));
1595 log.beginArray("samples");
1596 for (double sample : samples) {
1597 log.appendDoubleDigits(sample, 16);
1598 }
1599 log.endArray(); // samples
1600 benchStream.fillCurrentMetrics(log);
1601 if (!keys.empty()) {
1602 // dump to json, only SKPBench currently returns valid keys / values
1603 SkASSERT(keys.size() == values.size());
1604 for (int j = 0; j < keys.size(); j++) {
1605 log.appendMetric(keys[j].c_str(), values[j]);
1606 }
1607 }
1608
1609 log.endObject(); // config
1610
1611 if (runs++ % FLAGS_flushEvery == 0) {
1612 log.flush();
1613 }
1614
1615 if (kAutoTuneLoops != FLAGS_loops) {
1616 if (configs.size() == 1) {
1617 config = ""; // Only print the config if we run the same bench on more than one.
1618 }
1619 SkDebugf("%4d/%-4dMB\t%s\t%s "
1620 , sk_tools::getCurrResidentSetSizeMB()
1621 , sk_tools::getMaxResidentSetSizeMB()
1622 , bench->getUniqueName()
1623 , config);
1624 SkDebugf("\n");
1625 } else if (FLAGS_quiet) {
1626 const char* mark = " ";
1627 const double stddev_percent =
1628 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1629 if (stddev_percent > 5) mark = "?";
1630 if (stddev_percent > 10) mark = "!";
1631
1632 SkDebugf("%10.2f %s\t%s\t%s\n",
1633 stats.median*1e3, mark, bench->getUniqueName(), config);
1634 } else if (FLAGS_csv) {
1635 const double stddev_percent =
1636 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1637 SkDebugf("%g,%g,%g,%g,%g,%s,%s\n"
1638 , stats.min
1639 , stats.median
1640 , stats.mean
1641 , stats.max
1642 , stddev_percent
1643 , config
1644 , bench->getUniqueName()
1645 );
1646 } else {
1647 const double stddev_percent =
1648 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1649 SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n"
1650 , sk_tools::getCurrResidentSetSizeMB()
1651 , sk_tools::getMaxResidentSetSizeMB()
1652 , loops
1653 , HUMANIZE(stats.min)
1654 , HUMANIZE(stats.median)
1655 , HUMANIZE(stats.mean)
1656 , HUMANIZE(stats.max)
1657 , stddev_percent
1658 , FLAGS_ms ? to_string(samples.size()).c_str() : stats.plot.c_str()
1659 , config
1660 , bench->getUniqueName()
1661 );
1662 }
1663
1664 if (FLAGS_gpuStats && Benchmark::Backend::kGanesh == configs[i].backend) {
1665 target->dumpStats();
1666 }
1667
1668 if (FLAGS_verbose) {
1669 SkDebugf("Samples: ");
1670 for (int j = 0; j < samples.size(); j++) {
1671 SkDebugf("%s ", HUMANIZE(samples[j]));
1672 }
1673 SkDebugf("%s\n", bench->getUniqueName());
1674 }
1675 cleanup_run(target);
1676 pool.drain();
1677 }
1678 if (!configs.empty()) {
1679 log.endBench();
1680 }
1681 }
1682
1683 if (FLAGS_dmsaaStatsDump) {
1684 SkDebugf("<<Total Combined DMSAA Stats>>\n");
1685 combinedDMSAAStats.dump();
1686 }
1687
1688 SkGraphics::PurgeAllCaches();
1689
1690 log.beginBench("memory_usage", 0, 0);
1691 log.beginObject("meta"); // config
1692 log.appendS32("max_rss_mb", sk_tools::getMaxResidentSetSizeMB());
1693 log.endObject(); // config
1694 log.endBench();
1695
1696 log.endObject(); // results
1697 log.endObject(); // root
1698 log.flush();
1699
1700 return 0;
1701 }
1702