1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <ctype.h>
9
10 #include "bench/nanobench.h"
11
12 #include "bench/AndroidCodecBench.h"
13 #include "bench/Benchmark.h"
14 #include "bench/CodecBench.h"
15 #include "bench/CodecBenchPriv.h"
16 #include "bench/GMBench.h"
17 #include "bench/MSKPBench.h"
18 #include "bench/RecordingBench.h"
19 #include "bench/ResultsWriter.h"
20 #include "bench/SKPAnimationBench.h"
21 #include "bench/SKPBench.h"
22 #include "bench/SkGlyphCacheBench.h"
23 #include "bench/SkSLBench.h"
24 #include "include/codec/SkAndroidCodec.h"
25 #include "include/codec/SkCodec.h"
26 #include "include/codec/SkJpegDecoder.h"
27 #include "include/codec/SkPngDecoder.h"
28 #include "include/core/SkBBHFactory.h"
29 #include "include/core/SkBitmap.h"
30 #include "include/core/SkCanvas.h"
31 #include "include/core/SkData.h"
32 #include "include/core/SkGraphics.h"
33 #include "include/core/SkPictureRecorder.h"
34 #include "include/core/SkString.h"
35 #include "include/core/SkSurface.h"
36 #include "include/encode/SkPngEncoder.h"
37 #include "include/private/base/SkMacros.h"
38 #include "src/base/SkAutoMalloc.h"
39 #include "src/base/SkLeanWindows.h"
40 #include "src/base/SkTime.h"
41 #include "src/core/SkColorSpacePriv.h"
42 #include "src/core/SkOSFile.h"
43 #include "src/core/SkTaskGroup.h"
44 #include "src/core/SkTraceEvent.h"
45 #include "src/utils/SkJSONWriter.h"
46 #include "src/utils/SkOSPath.h"
47 #include "src/utils/SkShaderUtils.h"
48 #include "tools/AutoreleasePool.h"
49 #include "tools/CrashHandler.h"
50 #include "tools/MSKPPlayer.h"
51 #include "tools/ProcStats.h"
52 #include "tools/Stats.h"
53 #include "tools/ToolUtils.h"
54 #include "tools/flags/CommonFlags.h"
55 #include "tools/flags/CommonFlagsConfig.h"
56 #include "tools/flags/CommonFlagsGanesh.h"
57 #include "tools/fonts/FontToolUtils.h"
58 #include "tools/ios_utils.h"
59 #include "tools/trace/EventTracingPriv.h"
60 #include "tools/trace/SkDebugfTracer.h"
61
62 #if defined(SK_ENABLE_SVG)
63 #include "modules/skshaper/utils/FactoryHelpers.h"
64 #include "modules/svg/include/SkSVGDOM.h"
65 #include "modules/svg/include/SkSVGNode.h"
66 #endif
67
68 #ifdef SK_ENABLE_ANDROID_UTILS
69 #include "bench/BitmapRegionDecoderBench.h"
70 #include "client_utils/android/BitmapRegionDecoder.h"
71 #endif
72
73 #if defined(SK_GRAPHITE)
74 #include "include/gpu/graphite/Context.h"
75 #include "include/gpu/graphite/Recorder.h"
76 #include "include/gpu/graphite/Recording.h"
77 #include "include/gpu/graphite/Surface.h"
78 #include "tools/flags/CommonFlagsGraphite.h"
79 #include "tools/graphite/ContextFactory.h"
80 #include "tools/graphite/GraphiteTestContext.h"
81 #include "tools/graphite/GraphiteToolUtils.h"
82 #endif
83
84 #include <cinttypes>
85 #include <memory>
86 #include <optional>
87 #include <stdlib.h>
88 #include <thread>
89
90 extern bool gSkForceRasterPipelineBlitter;
91 extern bool gForceHighPrecisionRasterPipeline;
92
93 #ifndef SK_BUILD_FOR_WIN
94 #include <unistd.h>
95 #endif
96
97 #include "include/gpu/ganesh/GrDirectContext.h"
98 #include "include/gpu/ganesh/SkSurfaceGanesh.h"
99 #include "src/gpu/ganesh/GrCaps.h"
100 #include "src/gpu/ganesh/GrDirectContextPriv.h"
101 #include "src/gpu/ganesh/SkGr.h"
102 #include "tools/gpu/GrContextFactory.h"
103
104 using namespace skia_private;
105
106 using sk_gpu_test::ContextInfo;
107 using sk_gpu_test::GrContextFactory;
108 using sk_gpu_test::TestContext;
109
110 GrContextOptions grContextOpts;
111
112 #if defined(SK_GRAPHITE)
113 skiatest::graphite::TestOptions gTestOptions;
114 #endif
115
116 static const int kAutoTuneLoops = 0;
117
loops_help_txt()118 static SkString loops_help_txt() {
119 SkString help;
120 help.printf("Number of times to run each bench. Set this to %d to auto-"
121 "tune for each bench. Timings are only reported when auto-tuning.",
122 kAutoTuneLoops);
123 return help;
124 }
125
to_string(int n)126 static SkString to_string(int n) {
127 SkString str;
128 str.appendS32(n);
129 return str;
130 }
131
132 static DEFINE_int(loops, kAutoTuneLoops, loops_help_txt().c_str());
133
134 static DEFINE_int(samples, 10, "Number of samples to measure for each bench.");
135 static DEFINE_int(ms, 0, "If >0, run each bench for this many ms instead of obeying --samples.");
136 static DEFINE_int(overheadLoops, 100000, "Loops to estimate timer overhead.");
137 static DEFINE_double(overheadGoal, 0.0001,
138 "Loop until timer overhead is at most this fraction of our measurments.");
139 static DEFINE_double(gpuMs, 5, "Target bench time in millseconds for GPU.");
140 static DEFINE_int(gpuFrameLag, 5,
141 "If unknown, estimated maximum number of frames GPU allows to lag.");
142
143 static DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
144 static DEFINE_int(maxCalibrationAttempts, 3,
145 "Try up to this many times to guess loops for a bench, or skip the bench.");
146 static DEFINE_int(maxLoops, 1000000, "Never run a bench more times than this.");
147 static DEFINE_string(clip, "0,0,1000,1000", "Clip for SKPs.");
148 static DEFINE_string(scales, "1.0", "Space-separated scales for SKPs.");
149 static DEFINE_string(zoom, "1.0,0",
150 "Comma-separated zoomMax,zoomPeriodMs factors for a periodic SKP zoom "
151 "function that ping-pongs between 1.0 and zoomMax.");
152 static DEFINE_bool(bbh, true, "Build a BBH for SKPs?");
153 static DEFINE_bool(loopSKP, true, "Loop SKPs like we do for micro benches?");
154 static DEFINE_int(flushEvery, 10, "Flush --outResultsFile every Nth run.");
155 static DEFINE_bool(gpuStats, false, "Print GPU stats after each gpu benchmark?");
156 static DEFINE_bool(gpuStatsDump, false, "Dump GPU stats after each benchmark to json");
157 static DEFINE_bool(dmsaaStatsDump, false, "Dump DMSAA stats after each benchmark to json");
158 static DEFINE_bool(keepAlive, false, "Print a message every so often so that we don't time out");
159 static DEFINE_bool(csv, false, "Print status in CSV format");
160 static DEFINE_string(sourceType, "",
161 "Apply usual --match rules to source type: bench, gm, skp, image, etc.");
162 static DEFINE_string(benchType, "",
163 "Apply usual --match rules to bench type: micro, recording, "
164 "piping, playback, skcodec, etc.");
165
166 static DEFINE_bool(forceRasterPipeline, false, "sets gSkForceRasterPipelineBlitter");
167 static DEFINE_bool(forceRasterPipelineHP, false, "sets gSkForceRasterPipelineBlitter and gForceHighPrecisionRasterPipeline");
168
169 static DEFINE_bool2(pre_log, p, false,
170 "Log before running each test. May be incomprehensible when threading");
171
172 static DEFINE_bool(cpu, true, "Run CPU-bound work?");
173 static DEFINE_bool(gpu, true, "Run GPU-bound work?");
174 static DEFINE_bool(dryRun, false,
175 "just print the tests that would be run, without actually running them.");
176 static DEFINE_string(images, "",
177 "List of images and/or directories to decode. A directory with no images"
178 " is treated as a fatal error.");
179 static DEFINE_bool(simpleCodec, false,
180 "Runs of a subset of the codec tests, always N32, Premul or Opaque");
181
182 static DEFINE_string2(match, m, nullptr,
183 "[~][^]substring[$] [...] of name to run.\n"
184 "Multiple matches may be separated by spaces.\n"
185 "~ causes a matching name to always be skipped\n"
186 "^ requires the start of the name to match\n"
187 "$ requires the end of the name to match\n"
188 "^ and $ requires an exact match\n"
189 "If a name does not match any list entry,\n"
190 "it is skipped unless some list entry starts with ~");
191
192 static DEFINE_bool2(quiet, q, false, "if true, don't print status updates.");
193 static DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver.");
194
195
196 static DEFINE_string(skps, "skps", "Directory to read skps from.");
197 static DEFINE_string(mskps, "mskps", "Directory to read mskps from.");
198 static DEFINE_string(svgs, "", "Directory to read SVGs from, or a single SVG file.");
199 static DEFINE_string(texttraces, "", "Directory to read TextBlobTrace files from.");
200
201 static DEFINE_int_2(threads, j, -1,
202 "Run threadsafe tests on a threadpool with this many extra threads, "
203 "defaulting to one extra thread per core.");
204
205 static DEFINE_string2(writePath, w, "", "If set, write bitmaps here as .pngs.");
206
207 static DEFINE_string(key, "",
208 "Space-separated key/value pairs to add to JSON identifying this builder.");
209 static DEFINE_string(properties, "",
210 "Space-separated key/value pairs to add to JSON identifying this run.");
211
212 static DEFINE_bool(purgeBetweenBenches, false,
213 "Call SkGraphics::PurgeAllCaches() between each benchmark?");
214
215 static DEFINE_bool(splitPerfettoTracesByBenchmark, true,
216 "Create separate perfetto trace files for each benchmark?\n"
217 "Will only take effect if perfetto tracing is enabled. See --trace.");
218
219 static DEFINE_bool(runtimeCPUDetection, true, "Skip runtime CPU detection and optimization");
220
now_ms()221 static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
222
humanize(double ms)223 static SkString humanize(double ms) {
224 if (FLAGS_verbose) return SkStringPrintf("%" PRIu64, (uint64_t)(ms*1e6));
225 return HumanizeMs(ms);
226 }
227 #define HUMANIZE(ms) humanize(ms).c_str()
228
init(SkImageInfo info,Benchmark * bench)229 bool Target::init(SkImageInfo info, Benchmark* bench) {
230 if (Benchmark::Backend::kRaster == config.backend) {
231 this->surface = SkSurfaces::Raster(info);
232 if (!this->surface) {
233 return false;
234 }
235 }
236 return true;
237 }
capturePixels(SkBitmap * bmp)238 bool Target::capturePixels(SkBitmap* bmp) {
239 SkCanvas* canvas = this->getCanvas();
240 if (!canvas) {
241 return false;
242 }
243 bmp->allocPixels(canvas->imageInfo());
244 if (!canvas->readPixels(*bmp, 0, 0)) {
245 SkDebugf("Can't read canvas pixels.\n");
246 return false;
247 }
248 return true;
249 }
250
251 struct GPUTarget : public Target {
GPUTargetGPUTarget252 explicit GPUTarget(const Config& c) : Target(c) {}
253 ContextInfo contextInfo;
254 std::unique_ptr<GrContextFactory> factory;
255
~GPUTargetGPUTarget256 ~GPUTarget() override {
257 // For Vulkan we need to release all our refs to the GrContext before destroy the vulkan
258 // context which happens at the end of this destructor. Thus we need to release the surface
259 // here which holds a ref to the GrContext.
260 surface.reset();
261 }
262
onSetupGPUTarget263 void onSetup() override {
264 this->contextInfo.testContext()->makeCurrent();
265 }
submitFrameGPUTarget266 void submitFrame() override {
267 if (this->contextInfo.testContext()) {
268 this->contextInfo.testContext()->flushAndWaitOnSync(contextInfo.directContext());
269 }
270 }
submitWorkAndSyncCPUGPUTarget271 void submitWorkAndSyncCPU() override {
272 if (this->contextInfo.testContext()) {
273 this->contextInfo.testContext()->flushAndSyncCpu(contextInfo.directContext());
274 }
275 }
276
needsFrameTimingGPUTarget277 bool needsFrameTiming(int* maxFrameLag) const override {
278 if (!this->contextInfo.testContext()->getMaxGpuFrameLag(maxFrameLag)) {
279 // Frame lag is unknown.
280 *maxFrameLag = FLAGS_gpuFrameLag;
281 }
282 return true;
283 }
initGPUTarget284 bool init(SkImageInfo info, Benchmark* bench) override {
285 GrContextOptions options = grContextOpts;
286 bench->modifyGrContextOptions(&options);
287 this->factory = std::make_unique<GrContextFactory>(options);
288 SkSurfaceProps props(this->config.surfaceFlags, kRGB_H_SkPixelGeometry);
289 this->surface = SkSurfaces::RenderTarget(
290 this->factory->get(this->config.ctxType, this->config.ctxOverrides),
291 skgpu::Budgeted::kNo,
292 info,
293 this->config.samples,
294 &props);
295 this->contextInfo =
296 this->factory->getContextInfo(this->config.ctxType, this->config.ctxOverrides);
297 if (!this->surface) {
298 return false;
299 }
300 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
301 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
302 "Timings might not be accurate.\n", this->config.name.c_str());
303 }
304 return true;
305 }
306
dumpStatsGPUTarget307 void dumpStats() override {
308 auto context = this->contextInfo.directContext();
309
310 context->priv().printCacheStats();
311 context->priv().printGpuStats();
312 context->priv().printContextStats();
313 }
314 };
315
316 #if defined(SK_GRAPHITE)
317 struct GraphiteTarget : public Target {
GraphiteTargetGraphiteTarget318 explicit GraphiteTarget(const Config& c) : Target(c) {}
319 using TestContext = skiatest::graphite::GraphiteTestContext;
320 using ContextFactory = skiatest::graphite::ContextFactory;
321
322 std::unique_ptr<ContextFactory> factory;
323
324 TestContext* testContext;
325 skgpu::graphite::Context* context;
326 std::unique_ptr<skgpu::graphite::Recorder> recorder;
327
~GraphiteTargetGraphiteTarget328 ~GraphiteTarget() override {
329 // For Vulkan we need to release all our refs before we destroy the vulkan context which
330 // happens at the end of this destructor. Thus we need to release the surface here which
331 // holds a ref to the Graphite device
332 surface.reset();
333 }
submitFrameGraphiteTarget334 void submitFrame() override {
335 if (context && recorder) {
336 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
337 if (recording) {
338 this->testContext->submitRecordingAndWaitOnSync(this->context, recording.get());
339 }
340 }
341 }
submitWorkAndSyncCPUGraphiteTarget342 void submitWorkAndSyncCPU() override {
343 if (context && recorder) {
344 // TODO: have a way to sync work with out submitting a Recording which is currently
345 // required. Probably need to get to the point where the backend command buffers are
346 // stored on the Context and not Recordings before this is feasible.
347 std::unique_ptr<skgpu::graphite::Recording> recording = this->recorder->snap();
348 if (recording) {
349 skgpu::graphite::InsertRecordingInfo info;
350 info.fRecording = recording.get();
351 this->context->insertRecording(info);
352 }
353 this->context->submit(skgpu::graphite::SyncToCpu::kYes);
354 }
355 }
356
needsFrameTimingGraphiteTarget357 bool needsFrameTiming(int* maxFrameLag) const override {
358 SkAssertResult(this->testContext->getMaxGpuFrameLag(maxFrameLag));
359 return true;
360 }
initGraphiteTarget361 bool init(SkImageInfo info, Benchmark* bench) override {
362 skiatest::graphite::TestOptions testOptions = gTestOptions;
363 testOptions.fContextOptions.fRequireOrderedRecordings = true;
364 bench->modifyGraphiteContextOptions(&testOptions.fContextOptions);
365
366 this->factory = std::make_unique<ContextFactory>(testOptions);
367
368 skiatest::graphite::ContextInfo ctxInfo =
369 this->factory->getContextInfo(this->config.ctxType);
370 if (!ctxInfo.fContext) {
371 return false;
372 }
373 this->testContext = ctxInfo.fTestContext;
374 this->context = ctxInfo.fContext;
375
376 this->recorder = this->context->makeRecorder(ToolUtils::CreateTestingRecorderOptions());
377 if (!this->recorder) {
378 return false;
379 }
380
381 this->surface = SkSurfaces::RenderTarget(this->recorder.get(), info);
382 if (!this->surface) {
383 return false;
384 }
385 // TODO: get fence stuff working
386 #if 0
387 if (!this->contextInfo.testContext()->fenceSyncSupport()) {
388 SkDebugf("WARNING: GL context for config \"%s\" does not support fence sync. "
389 "Timings might not be accurate.\n", this->config.name.c_str());
390 }
391 #endif
392 return true;
393 }
394
dumpStatsGraphiteTarget395 void dumpStats() override {
396 }
397 };
398 #endif // SK_GRAPHITE
399
time(int loops,Benchmark * bench,Target * target)400 static double time(int loops, Benchmark* bench, Target* target) {
401 SkCanvas* canvas = target->getCanvas();
402 if (canvas) {
403 canvas->clear(SK_ColorWHITE);
404 }
405 bench->preDraw(canvas);
406 double start = now_ms();
407 canvas = target->beginTiming(canvas);
408
409 auto submitFrame = [target]() { target->submitFrame(); };
410
411 bench->draw(loops, canvas, submitFrame);
412
413 double elapsed = now_ms() - start;
414 bench->postDraw(canvas);
415 return elapsed;
416 }
417
estimate_timer_overhead()418 static double estimate_timer_overhead() {
419 double overhead = 0;
420 for (int i = 0; i < FLAGS_overheadLoops; i++) {
421 double start = now_ms();
422 overhead += now_ms() - start;
423 }
424 return overhead / FLAGS_overheadLoops;
425 }
426
detect_forever_loops(int loops)427 static int detect_forever_loops(int loops) {
428 // look for a magic run-forever value
429 if (loops < 0) {
430 loops = SK_MaxS32;
431 }
432 return loops;
433 }
434
clamp_loops(int loops)435 static int clamp_loops(int loops) {
436 if (loops < 1) {
437 SkDebugf("ERROR: clamping loops from %d to 1. "
438 "There's probably something wrong with the bench.\n", loops);
439 return 1;
440 }
441 if (loops > FLAGS_maxLoops) {
442 SkDebugf("WARNING: clamping loops from %d to FLAGS_maxLoops, %d.\n", loops, FLAGS_maxLoops);
443 return FLAGS_maxLoops;
444 }
445 return loops;
446 }
447
write_canvas_png(Target * target,const SkString & filename)448 static bool write_canvas_png(Target* target, const SkString& filename) {
449
450 if (filename.isEmpty()) {
451 return false;
452 }
453 if (target->getCanvas() &&
454 kUnknown_SkColorType == target->getCanvas()->imageInfo().colorType()) {
455 return false;
456 }
457
458 SkBitmap bmp;
459
460 if (!target->capturePixels(&bmp)) {
461 return false;
462 }
463
464 SkString dir = SkOSPath::Dirname(filename.c_str());
465 if (!sk_mkdir(dir.c_str())) {
466 SkDebugf("Can't make dir %s.\n", dir.c_str());
467 return false;
468 }
469 SkFILEWStream stream(filename.c_str());
470 if (!stream.isValid()) {
471 SkDebugf("Can't write %s.\n", filename.c_str());
472 return false;
473 }
474 if (!SkPngEncoder::Encode(&stream, bmp.pixmap(), {})) {
475 SkDebugf("Can't encode a PNG.\n");
476 return false;
477 }
478 return true;
479 }
480
481 static int kFailedLoops = -2;
setup_cpu_bench(const double overhead,Target * target,Benchmark * bench)482 static int setup_cpu_bench(const double overhead, Target* target, Benchmark* bench) {
483 // First figure out approximately how many loops of bench it takes to make overhead negligible.
484 double bench_plus_overhead = 0.0;
485 int round = 0;
486 int loops = bench->shouldLoop() ? FLAGS_loops : 1;
487 if (kAutoTuneLoops == loops) {
488 while (bench_plus_overhead < overhead) {
489 if (round++ == FLAGS_maxCalibrationAttempts) {
490 SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
491 bench->getUniqueName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
492 return kFailedLoops;
493 }
494 bench_plus_overhead = time(1, bench, target);
495 }
496 }
497
498 // Later we'll just start and stop the timer once but loop N times.
499 // We'll pick N to make timer overhead negligible:
500 //
501 // overhead
502 // ------------------------- < FLAGS_overheadGoal
503 // overhead + N * Bench Time
504 //
505 // where bench_plus_overhead ~=~ overhead + Bench Time.
506 //
507 // Doing some math, we get:
508 //
509 // (overhead / FLAGS_overheadGoal) - overhead
510 // ------------------------------------------ < N
511 // bench_plus_overhead - overhead)
512 //
513 // Luckily, this also works well in practice. :)
514 if (kAutoTuneLoops == loops) {
515 const double numer = overhead / FLAGS_overheadGoal - overhead;
516 const double denom = bench_plus_overhead - overhead;
517 loops = (int)ceil(numer / denom);
518 loops = clamp_loops(loops);
519 } else {
520 loops = detect_forever_loops(loops);
521 }
522
523 return loops;
524 }
525
setup_gpu_bench(Target * target,Benchmark * bench,int maxGpuFrameLag)526 static int setup_gpu_bench(Target* target, Benchmark* bench, int maxGpuFrameLag) {
527 // First, figure out how many loops it'll take to get a frame up to FLAGS_gpuMs.
528 int loops = bench->shouldLoop() ? FLAGS_loops : 1;
529 if (kAutoTuneLoops == loops) {
530 loops = 1;
531 double elapsed = 0;
532 do {
533 if (1<<30 == loops) {
534 // We're about to wrap. Something's wrong with the bench.
535 loops = 0;
536 break;
537 }
538 loops *= 2;
539 // If the GPU lets frames lag at all, we need to make sure we're timing
540 // _this_ round, not still timing last round.
541 for (int i = 0; i < maxGpuFrameLag; i++) {
542 elapsed = time(loops, bench, target);
543 }
544 } while (elapsed < FLAGS_gpuMs);
545
546 // We've overshot at least a little. Scale back linearly.
547 loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
548 loops = clamp_loops(loops);
549
550 // Make sure we're not still timing our calibration.
551 target->submitWorkAndSyncCPU();
552 } else {
553 loops = detect_forever_loops(loops);
554 }
555 // Pretty much the same deal as the calibration: do some warmup to make
556 // sure we're timing steady-state pipelined frames.
557 for (int i = 0; i < maxGpuFrameLag; i++) {
558 time(loops, bench, target);
559 }
560
561 return loops;
562 }
563
564 #define kBogusContextType skgpu::ContextType::kGL
565 #define kBogusContextOverrides GrContextFactory::ContextOverrides::kNone
566
create_config(const SkCommandLineConfig * config)567 static std::optional<Config> create_config(const SkCommandLineConfig* config) {
568 if (const auto* gpuConfig = config->asConfigGpu()) {
569 if (!FLAGS_gpu) {
570 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
571 return std::nullopt;
572 }
573
574 const auto ctxType = gpuConfig->getContextType();
575 const auto ctxOverrides = gpuConfig->getContextOverrides();
576 const auto sampleCount = gpuConfig->getSamples();
577 const auto colorType = gpuConfig->getColorType();
578 if (gpuConfig->getSurfType() != SkCommandLineConfigGpu::SurfType::kDefault) {
579 SkDebugf("This tool only supports the default surface type.");
580 return std::nullopt;
581 }
582
583 GrContextFactory factory(grContextOpts);
584 if (const auto ctx = factory.get(ctxType, ctxOverrides)) {
585 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
586 int supportedSampleCount =
587 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
588 if (sampleCount != supportedSampleCount) {
589 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
590 config->getTag().c_str(),
591 sampleCount);
592 return std::nullopt;
593 }
594 } else {
595 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
596 return std::nullopt;
597 }
598
599 return Config{gpuConfig->getTag(),
600 Benchmark::Backend::kGanesh,
601 colorType,
602 kPremul_SkAlphaType,
603 config->refColorSpace(),
604 sampleCount,
605 ctxType,
606 ctxOverrides,
607 gpuConfig->getSurfaceFlags()};
608 }
609 #if defined(SK_GRAPHITE)
610 if (const auto* gpuConfig = config->asConfigGraphite()) {
611 if (!FLAGS_gpu) {
612 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str());
613 return std::nullopt;
614 }
615
616 const auto graphiteCtxType = gpuConfig->getContextType();
617 const auto sampleCount = 1; // TODO: gpuConfig->getSamples();
618 const auto colorType = gpuConfig->getColorType();
619
620 using ContextFactory = skiatest::graphite::ContextFactory;
621
622 ContextFactory factory(gTestOptions);
623 skiatest::graphite::ContextInfo ctxInfo = factory.getContextInfo(graphiteCtxType);
624 skgpu::graphite::Context* ctx = ctxInfo.fContext;
625 if (ctx) {
626 // TODO: Add graphite ctx queries for supported sample count by color type.
627 #if 0
628 GrBackendFormat format = ctx->defaultBackendFormat(colorType, GrRenderable::kYes);
629 int supportedSampleCount =
630 ctx->priv().caps()->getRenderTargetSampleCount(sampleCount, format);
631 if (sampleCount != supportedSampleCount) {
632 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
633 config->getTag().c_str(),
634 sampleCount);
635 return std::nullopt;
636 }
637 #else
638 if (sampleCount > 1) {
639 SkDebugf("Configuration '%s' sample count %d is not a supported sample count.\n",
640 config->getTag().c_str(),
641 sampleCount);
642 return std::nullopt;
643 }
644 #endif
645 } else {
646 SkDebugf("No context was available matching config '%s'.\n", config->getTag().c_str());
647 return std::nullopt;
648 }
649
650 return Config{gpuConfig->getTag(),
651 Benchmark::Backend::kGraphite,
652 colorType,
653 kPremul_SkAlphaType,
654 config->refColorSpace(),
655 sampleCount,
656 graphiteCtxType,
657 kBogusContextOverrides,
658 0};
659 }
660 #endif
661
662 #define CPU_CONFIG(name, backend, color, alpha) \
663 if (config->getBackend().equals(name)) { \
664 if (!FLAGS_cpu) { \
665 SkDebugf("Skipping config '%s' as requested.\n", config->getTag().c_str()); \
666 return std::nullopt; \
667 } \
668 return Config{SkString(name), \
669 Benchmark::backend, \
670 color, \
671 alpha, \
672 config->refColorSpace(), \
673 0, \
674 kBogusContextType, \
675 kBogusContextOverrides, \
676 0}; \
677 }
678
679 CPU_CONFIG("nonrendering", Backend::kNonRendering, kUnknown_SkColorType, kUnpremul_SkAlphaType)
680
681 CPU_CONFIG("a8", Backend::kRaster, kAlpha_8_SkColorType, kPremul_SkAlphaType)
682 CPU_CONFIG("565", Backend::kRaster, kRGB_565_SkColorType, kOpaque_SkAlphaType)
683 CPU_CONFIG("8888", Backend::kRaster, kN32_SkColorType, kPremul_SkAlphaType)
684 CPU_CONFIG("rgba", Backend::kRaster, kRGBA_8888_SkColorType, kPremul_SkAlphaType)
685 CPU_CONFIG("bgra", Backend::kRaster, kBGRA_8888_SkColorType, kPremul_SkAlphaType)
686 CPU_CONFIG("f16", Backend::kRaster, kRGBA_F16_SkColorType, kPremul_SkAlphaType)
687 CPU_CONFIG("srgba", Backend::kRaster, kSRGBA_8888_SkColorType, kPremul_SkAlphaType)
688
689 #undef CPU_CONFIG
690
691 SkDebugf("Unknown config '%s'.\n", config->getTag().c_str());
692 return std::nullopt;
693 }
694
695 // Append all configs that are enabled and supported.
create_configs(TArray<Config> * configs)696 void create_configs(TArray<Config>* configs) {
697 SkCommandLineConfigArray array;
698 ParseConfigs(FLAGS_config, &array);
699 for (int i = 0; i < array.size(); ++i) {
700 if (std::optional<Config> config = create_config(array[i].get())) {
701 configs->push_back(*config);
702 }
703 }
704
705 // If no just default configs were requested, then we're okay.
706 if (array.size() == 0 || FLAGS_config.size() == 0 ||
707 // Otherwise, make sure that all specified configs have been created.
708 array.size() == configs->size()) {
709 return;
710 }
711 exit(1);
712 }
713
714 // disable warning : switch statement contains default but no 'case' labels
715 #if defined _WIN32
716 #pragma warning ( push )
717 #pragma warning ( disable : 4065 )
718 #endif
719
720 // If bench is enabled for config, returns a Target* for it, otherwise nullptr.
is_enabled(Benchmark * bench,const Config & config)721 static Target* is_enabled(Benchmark* bench, const Config& config) {
722 if (!bench->isSuitableFor(config.backend)) {
723 return nullptr;
724 }
725
726 SkImageInfo info =
727 SkImageInfo::Make(bench->getSize(), config.color, config.alpha, config.colorSpace);
728
729 Target* target = nullptr;
730
731 switch (config.backend) {
732 case Benchmark::Backend::kGanesh:
733 target = new GPUTarget(config);
734 break;
735 #if defined(SK_GRAPHITE)
736 case Benchmark::Backend::kGraphite:
737 target = new GraphiteTarget(config);
738 break;
739 #endif
740 default:
741 target = new Target(config);
742 break;
743 }
744
745 if (!target->init(info, bench)) {
746 delete target;
747 return nullptr;
748 }
749 return target;
750 }
751
752 #if defined _WIN32
753 #pragma warning ( pop )
754 #endif
755
756 #ifdef SK_ENABLE_ANDROID_UTILS
valid_brd_bench(sk_sp<SkData> encoded,SkColorType colorType,uint32_t sampleSize,uint32_t minOutputSize,int * width,int * height)757 static bool valid_brd_bench(sk_sp<SkData> encoded, SkColorType colorType, uint32_t sampleSize,
758 uint32_t minOutputSize, int* width, int* height) {
759 auto brd = android::skia::BitmapRegionDecoder::Make(encoded);
760 if (nullptr == brd) {
761 // This is indicates that subset decoding is not supported for a particular image format.
762 return false;
763 }
764
765 if (sampleSize * minOutputSize > (uint32_t) brd->width() || sampleSize * minOutputSize >
766 (uint32_t) brd->height()) {
767 // This indicates that the image is not large enough to decode a
768 // minOutputSize x minOutputSize subset at the given sampleSize.
769 return false;
770 }
771
772 // Set the image width and height. The calling code will use this to choose subsets to decode.
773 *width = brd->width();
774 *height = brd->height();
775 return true;
776 }
777 #endif
778
cleanup_run(Target * target)779 static void cleanup_run(Target* target) {
780 delete target;
781 }
782
collect_files(const CommandLineFlags::StringArray & paths,const char * ext,TArray<SkString> * list)783 static void collect_files(const CommandLineFlags::StringArray& paths,
784 const char* ext,
785 TArray<SkString>* list) {
786 for (int i = 0; i < paths.size(); ++i) {
787 if (SkStrEndsWith(paths[i], ext)) {
788 list->push_back(SkString(paths[i]));
789 } else {
790 SkOSFile::Iter it(paths[i], ext);
791 SkString path;
792 while (it.next(&path)) {
793 list->push_back(SkOSPath::Join(paths[i], path.c_str()));
794 }
795 }
796 }
797 }
798
799 class BenchmarkStream {
800 public:
BenchmarkStream()801 BenchmarkStream() : fBenches(BenchRegistry::Head())
802 , fGMs(skiagm::GMRegistry::Head()) {
803 collect_files(FLAGS_skps, ".skp", &fSKPs);
804 collect_files(FLAGS_mskps, ".mskp", &fMSKPs);
805 collect_files(FLAGS_svgs, ".svg", &fSVGs);
806 collect_files(FLAGS_texttraces, ".trace", &fTextBlobTraces);
807
808 if (4 != sscanf(FLAGS_clip[0], "%d,%d,%d,%d",
809 &fClip.fLeft, &fClip.fTop, &fClip.fRight, &fClip.fBottom)) {
810 SkDebugf("Can't parse %s from --clip as an SkIRect.\n", FLAGS_clip[0]);
811 exit(1);
812 }
813
814 for (int i = 0; i < FLAGS_scales.size(); i++) {
815 if (1 != sscanf(FLAGS_scales[i], "%f", &fScales.push_back())) {
816 SkDebugf("Can't parse %s from --scales as an SkScalar.\n", FLAGS_scales[i]);
817 exit(1);
818 }
819 }
820
821 if (2 != sscanf(FLAGS_zoom[0], "%f,%lf", &fZoomMax, &fZoomPeriodMs)) {
822 SkDebugf("Can't parse %s from --zoom as a zoomMax,zoomPeriodMs.\n", FLAGS_zoom[0]);
823 exit(1);
824 }
825
826 // Prepare the images for decoding
827 if (!CommonFlags::CollectImages(FLAGS_images, &fImages)) {
828 exit(1);
829 }
830
831 // Choose the candidate color types for image decoding
832 fColorTypes.push_back(kN32_SkColorType);
833 if (!FLAGS_simpleCodec) {
834 fColorTypes.push_back(kRGB_565_SkColorType);
835 fColorTypes.push_back(kAlpha_8_SkColorType);
836 fColorTypes.push_back(kGray_8_SkColorType);
837 }
838 }
839
ReadPicture(const char * path)840 static sk_sp<SkPicture> ReadPicture(const char* path) {
841 // Not strictly necessary, as it will be checked again later,
842 // but helps to avoid a lot of pointless work if we're going to skip it.
843 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
844 return nullptr;
845 }
846
847 std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(path);
848 if (!stream) {
849 SkDebugf("Could not read %s.\n", path);
850 return nullptr;
851 }
852
853 return SkPicture::MakeFromStream(stream.get());
854 }
855
ReadMSKP(const char * path)856 static std::unique_ptr<MSKPPlayer> ReadMSKP(const char* path) {
857 // Not strictly necessary, as it will be checked again later,
858 // but helps to avoid a lot of pointless work if we're going to skip it.
859 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
860 return nullptr;
861 }
862
863 std::unique_ptr<SkStreamSeekable> stream = SkStream::MakeFromFile(path);
864 if (!stream) {
865 SkDebugf("Could not read %s.\n", path);
866 return nullptr;
867 }
868
869 return MSKPPlayer::Make(stream.get());
870 }
871
ReadSVGPicture(const char * path)872 static sk_sp<SkPicture> ReadSVGPicture(const char* path) {
873 if (CommandLineFlags::ShouldSkip(FLAGS_match, SkOSPath::Basename(path).c_str())) {
874 return nullptr;
875 }
876 sk_sp<SkData> data(SkData::MakeFromFileName(path));
877 if (!data) {
878 SkDebugf("Could not read %s.\n", path);
879 return nullptr;
880 }
881
882 #if defined(SK_ENABLE_SVG)
883 SkMemoryStream stream(std::move(data));
884 sk_sp<SkSVGDOM> svgDom = SkSVGDOM::Builder()
885 .setFontManager(ToolUtils::TestFontMgr())
886 .setTextShapingFactory(SkShapers::BestAvailable())
887 .make(stream);
888 if (!svgDom) {
889 SkDebugf("Could not parse %s.\n", path);
890 return nullptr;
891 }
892
893 // Use the intrinsic SVG size if available, otherwise fall back to a default value.
894 static const SkSize kDefaultContainerSize = SkSize::Make(128, 128);
895 if (svgDom->containerSize().isEmpty()) {
896 svgDom->setContainerSize(kDefaultContainerSize);
897 }
898
899 SkPictureRecorder recorder;
900 svgDom->render(recorder.beginRecording(svgDom->containerSize().width(),
901 svgDom->containerSize().height()));
902 return recorder.finishRecordingAsPicture();
903 #else
904 return nullptr;
905 #endif // defined(SK_ENABLE_SVG)
906 }
907
next()908 Benchmark* next() {
909 std::unique_ptr<Benchmark> bench;
910 do {
911 bench.reset(this->rawNext());
912 if (!bench) {
913 return nullptr;
914 }
915 } while (CommandLineFlags::ShouldSkip(FLAGS_sourceType, fSourceType) ||
916 CommandLineFlags::ShouldSkip(FLAGS_benchType, fBenchType));
917 return bench.release();
918 }
919
rawNext()920 Benchmark* rawNext() {
921 if (fBenches) {
922 Benchmark* bench = fBenches->get()(nullptr);
923 fBenches = fBenches->next();
924 fSourceType = "bench";
925 fBenchType = "micro";
926 return bench;
927 }
928
929 while (fGMs) {
930 std::unique_ptr<skiagm::GM> gm = fGMs->get()();
931 if (gm->isBazelOnly()) {
932 // We skip Bazel-only GMs because they might not be regular GMs. The Bazel build
933 // reuses the notion of GMs to replace the notion of DM sources of various kinds,
934 // such as codec sources and image generation sources. See comments in the
935 // skiagm::GM::isBazelOnly function declaration for context.
936 continue;
937 }
938 fGMs = fGMs->next();
939 if (gm->runAsBench()) {
940 fSourceType = "gm";
941 fBenchType = "micro";
942 return new GMBench(std::move(gm));
943 }
944 }
945
946 while (fCurrentTextBlobTrace < fTextBlobTraces.size()) {
947 SkString path = fTextBlobTraces[fCurrentTextBlobTrace++];
948 SkString basename = SkOSPath::Basename(path.c_str());
949 static constexpr char kEnding[] = ".trace";
950 if (basename.endsWith(kEnding)) {
951 basename.remove(basename.size() - strlen(kEnding), strlen(kEnding));
952 }
953 fSourceType = "texttrace";
954 fBenchType = "micro";
955 return CreateDiffCanvasBench(
956 SkStringPrintf("SkDiffBench-%s", basename.c_str()),
957 [path](){ return SkStream::MakeFromFile(path.c_str()); });
958 }
959
960 // First add all .skps as RecordingBenches.
961 while (fCurrentRecording < fSKPs.size()) {
962 const SkString& path = fSKPs[fCurrentRecording++];
963 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
964 if (!pic) {
965 continue;
966 }
967 SkString name = SkOSPath::Basename(path.c_str());
968 fSourceType = "skp";
969 fBenchType = "recording";
970 fSKPBytes = static_cast<double>(pic->approximateBytesUsed());
971 fSKPOps = pic->approximateOpCount();
972 return new RecordingBench(name.c_str(), pic.get(), FLAGS_bbh);
973 }
974
975 // Add all .skps as DeserializePictureBenchs.
976 while (fCurrentDeserialPicture < fSKPs.size()) {
977 const SkString& path = fSKPs[fCurrentDeserialPicture++];
978 sk_sp<SkData> data = SkData::MakeFromFileName(path.c_str());
979 if (!data) {
980 continue;
981 }
982 SkString name = SkOSPath::Basename(path.c_str());
983 fSourceType = "skp";
984 fBenchType = "deserial";
985 fSKPBytes = static_cast<double>(data->size());
986 fSKPOps = 0;
987 return new DeserializePictureBench(name.c_str(), std::move(data));
988 }
989
990 // Then once each for each scale as SKPBenches (playback).
991 while (fCurrentScale < fScales.size()) {
992 while (fCurrentSKP < fSKPs.size()) {
993 const SkString& path = fSKPs[fCurrentSKP++];
994 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
995 if (!pic) {
996 continue;
997 }
998
999 if (FLAGS_bbh) {
1000 // The SKP we read off disk doesn't have a BBH. Re-record so it grows one.
1001 SkRTreeFactory factory;
1002 SkPictureRecorder recorder;
1003 pic->playback(recorder.beginRecording(pic->cullRect().width(),
1004 pic->cullRect().height(),
1005 &factory));
1006 pic = recorder.finishRecordingAsPicture();
1007 }
1008 SkString name = SkOSPath::Basename(path.c_str());
1009 fSourceType = "skp";
1010 fBenchType = "playback";
1011 return new SKPBench(name.c_str(), pic.get(), fClip, fScales[fCurrentScale],
1012 FLAGS_loopSKP);
1013 }
1014
1015 while (fCurrentSVG < fSVGs.size()) {
1016 const char* path = fSVGs[fCurrentSVG++].c_str();
1017 if (sk_sp<SkPicture> pic = ReadSVGPicture(path)) {
1018 fSourceType = "svg";
1019 fBenchType = "playback";
1020 return new SKPBench(SkOSPath::Basename(path).c_str(), pic.get(), fClip,
1021 fScales[fCurrentScale], FLAGS_loopSKP);
1022 }
1023 }
1024
1025 fCurrentSKP = 0;
1026 fCurrentSVG = 0;
1027 fCurrentScale++;
1028 }
1029
1030 // Now loop over each skp again if we have an animation
1031 if (fZoomMax != 1.0f && fZoomPeriodMs > 0) {
1032 while (fCurrentAnimSKP < fSKPs.size()) {
1033 const SkString& path = fSKPs[fCurrentAnimSKP];
1034 sk_sp<SkPicture> pic = ReadPicture(path.c_str());
1035 if (!pic) {
1036 fCurrentAnimSKP++;
1037 continue;
1038 }
1039
1040 fCurrentAnimSKP++;
1041 SkString name = SkOSPath::Basename(path.c_str());
1042 sk_sp<SKPAnimationBench::Animation> animation =
1043 SKPAnimationBench::MakeZoomAnimation(fZoomMax, fZoomPeriodMs);
1044 return new SKPAnimationBench(name.c_str(), pic.get(), fClip, std::move(animation),
1045 FLAGS_loopSKP);
1046 }
1047 }
1048
1049 // Read all MSKPs as benches
1050 while (fCurrentMSKP < fMSKPs.size()) {
1051 const SkString& path = fMSKPs[fCurrentMSKP++];
1052 std::unique_ptr<MSKPPlayer> player = ReadMSKP(path.c_str());
1053 if (!player) {
1054 continue;
1055 }
1056 SkString name = SkOSPath::Basename(path.c_str());
1057 fSourceType = "mskp";
1058 fBenchType = "mskp";
1059 return new MSKPBench(std::move(name), std::move(player));
1060 }
1061
1062 for (; fCurrentCodec < fImages.size(); fCurrentCodec++) {
1063 fSourceType = "image";
1064 fBenchType = "skcodec";
1065 const SkString& path = fImages[fCurrentCodec];
1066 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1067 continue;
1068 }
1069 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1070 std::unique_ptr<SkCodec> codec(SkCodec::MakeFromData(encoded));
1071 if (!codec) {
1072 // Nothing to time.
1073 SkDebugf("Cannot find codec for %s\n", path.c_str());
1074 continue;
1075 }
1076
1077 while (fCurrentColorType < fColorTypes.size()) {
1078 const SkColorType colorType = fColorTypes[fCurrentColorType];
1079
1080 SkAlphaType alphaType = codec->getInfo().alphaType();
1081 if (FLAGS_simpleCodec) {
1082 if (kUnpremul_SkAlphaType == alphaType) {
1083 alphaType = kPremul_SkAlphaType;
1084 }
1085
1086 fCurrentColorType++;
1087 } else {
1088 switch (alphaType) {
1089 case kOpaque_SkAlphaType:
1090 // We only need to test one alpha type (opaque).
1091 fCurrentColorType++;
1092 break;
1093 case kUnpremul_SkAlphaType:
1094 case kPremul_SkAlphaType:
1095 if (0 == fCurrentAlphaType) {
1096 // Test unpremul first.
1097 alphaType = kUnpremul_SkAlphaType;
1098 fCurrentAlphaType++;
1099 } else {
1100 // Test premul.
1101 alphaType = kPremul_SkAlphaType;
1102 fCurrentAlphaType = 0;
1103 fCurrentColorType++;
1104 }
1105 break;
1106 default:
1107 SkASSERT(false);
1108 fCurrentColorType++;
1109 break;
1110 }
1111 }
1112
1113 // Make sure we can decode to this color type and alpha type.
1114 SkImageInfo info =
1115 codec->getInfo().makeColorType(colorType).makeAlphaType(alphaType);
1116 const size_t rowBytes = info.minRowBytes();
1117 SkAutoMalloc storage(info.computeByteSize(rowBytes));
1118
1119 const SkCodec::Result result = codec->getPixels(
1120 info, storage.get(), rowBytes);
1121 switch (result) {
1122 case SkCodec::kSuccess:
1123 case SkCodec::kIncompleteInput:
1124 return new CodecBench(SkOSPath::Basename(path.c_str()),
1125 encoded.get(), colorType, alphaType);
1126 case SkCodec::kInvalidConversion:
1127 // This is okay. Not all conversions are valid.
1128 break;
1129 default:
1130 // This represents some sort of failure.
1131 SkASSERT(false);
1132 break;
1133 }
1134 }
1135 fCurrentColorType = 0;
1136 }
1137
1138 // Run AndroidCodecBenches
1139 const int sampleSizes[] = { 2, 4, 8 };
1140 for (; fCurrentAndroidCodec < fImages.size(); fCurrentAndroidCodec++) {
1141 fSourceType = "image";
1142 fBenchType = "skandroidcodec";
1143
1144 const SkString& path = fImages[fCurrentAndroidCodec];
1145 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1146 continue;
1147 }
1148 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1149 std::unique_ptr<SkAndroidCodec> codec(SkAndroidCodec::MakeFromData(encoded));
1150 if (!codec) {
1151 // Nothing to time.
1152 SkDebugf("Cannot find codec for %s\n", path.c_str());
1153 continue;
1154 }
1155
1156 while (fCurrentSampleSize < (int) std::size(sampleSizes)) {
1157 int sampleSize = sampleSizes[fCurrentSampleSize];
1158 fCurrentSampleSize++;
1159 if (10 * sampleSize > std::min(codec->getInfo().width(), codec->getInfo().height())) {
1160 // Avoid benchmarking scaled decodes of already small images.
1161 break;
1162 }
1163
1164 return new AndroidCodecBench(SkOSPath::Basename(path.c_str()),
1165 encoded.get(), sampleSize);
1166 }
1167 fCurrentSampleSize = 0;
1168 }
1169
1170 #ifdef SK_ENABLE_ANDROID_UTILS
1171 // Run the BRDBenches
1172 // We intend to create benchmarks that model the use cases in
1173 // android/libraries/social/tiledimage. In this library, an image is decoded in 512x512
1174 // tiles. The image can be translated freely, so the location of a tile may be anywhere in
1175 // the image. For that reason, we will benchmark decodes in five representative locations
1176 // in the image. Additionally, this use case utilizes power of two scaling, so we will
1177 // test on power of two sample sizes. The output tile is always 512x512, so, when a
1178 // sampleSize is used, the size of the subset that is decoded is always
1179 // (sampleSize*512)x(sampleSize*512).
1180 // There are a few good reasons to only test on power of two sample sizes at this time:
1181 // All use cases we are aware of only scale by powers of two.
1182 // PNG decodes use the indicated sampling strategy regardless of the sample size, so
1183 // these tests are sufficient to provide good coverage of our scaling options.
1184 const uint32_t brdSampleSizes[] = { 1, 2, 4, 8, 16 };
1185 const uint32_t minOutputSize = 512;
1186 for (; fCurrentBRDImage < fImages.size(); fCurrentBRDImage++) {
1187 fSourceType = "image";
1188 fBenchType = "BRD";
1189
1190 const SkString& path = fImages[fCurrentBRDImage];
1191 if (CommandLineFlags::ShouldSkip(FLAGS_match, path.c_str())) {
1192 continue;
1193 }
1194
1195 while (fCurrentColorType < fColorTypes.size()) {
1196 while (fCurrentSampleSize < (int) std::size(brdSampleSizes)) {
1197 while (fCurrentSubsetType <= kLastSingle_SubsetType) {
1198
1199 sk_sp<SkData> encoded(SkData::MakeFromFileName(path.c_str()));
1200 const SkColorType colorType = fColorTypes[fCurrentColorType];
1201 uint32_t sampleSize = brdSampleSizes[fCurrentSampleSize];
1202 int currentSubsetType = fCurrentSubsetType++;
1203
1204 int width = 0;
1205 int height = 0;
1206 if (!valid_brd_bench(encoded, colorType, sampleSize, minOutputSize,
1207 &width, &height)) {
1208 break;
1209 }
1210
1211 SkString basename = SkOSPath::Basename(path.c_str());
1212 SkIRect subset;
1213 const uint32_t subsetSize = sampleSize * minOutputSize;
1214 switch (currentSubsetType) {
1215 case kTopLeft_SubsetType:
1216 basename.append("_TopLeft");
1217 subset = SkIRect::MakeXYWH(0, 0, subsetSize, subsetSize);
1218 break;
1219 case kTopRight_SubsetType:
1220 basename.append("_TopRight");
1221 subset = SkIRect::MakeXYWH(width - subsetSize, 0, subsetSize,
1222 subsetSize);
1223 break;
1224 case kMiddle_SubsetType:
1225 basename.append("_Middle");
1226 subset = SkIRect::MakeXYWH((width - subsetSize) / 2,
1227 (height - subsetSize) / 2, subsetSize, subsetSize);
1228 break;
1229 case kBottomLeft_SubsetType:
1230 basename.append("_BottomLeft");
1231 subset = SkIRect::MakeXYWH(0, height - subsetSize, subsetSize,
1232 subsetSize);
1233 break;
1234 case kBottomRight_SubsetType:
1235 basename.append("_BottomRight");
1236 subset = SkIRect::MakeXYWH(width - subsetSize,
1237 height - subsetSize, subsetSize, subsetSize);
1238 break;
1239 default:
1240 SkASSERT(false);
1241 }
1242
1243 return new BitmapRegionDecoderBench(basename.c_str(), encoded.get(),
1244 colorType, sampleSize, subset);
1245 }
1246 fCurrentSubsetType = 0;
1247 fCurrentSampleSize++;
1248 }
1249 fCurrentSampleSize = 0;
1250 fCurrentColorType++;
1251 }
1252 fCurrentColorType = 0;
1253 }
1254 #endif // SK_ENABLE_ANDROID_UTILS
1255
1256 return nullptr;
1257 }
1258
fillCurrentOptions(NanoJSONResultsWriter & log) const1259 void fillCurrentOptions(NanoJSONResultsWriter& log) const {
1260 log.appendCString("source_type", fSourceType);
1261 log.appendCString("bench_type", fBenchType);
1262 if (0 == strcmp(fSourceType, "skp")) {
1263 log.appendString("clip",
1264 SkStringPrintf("%d %d %d %d", fClip.fLeft, fClip.fTop,
1265 fClip.fRight, fClip.fBottom));
1266 SkASSERT_RELEASE(fCurrentScale < fScales.size()); // debugging paranoia
1267 log.appendString("scale", SkStringPrintf("%.2g", fScales[fCurrentScale]));
1268 }
1269 }
1270
fillCurrentMetrics(NanoJSONResultsWriter & log) const1271 void fillCurrentMetrics(NanoJSONResultsWriter& log) const {
1272 if (0 == strcmp(fBenchType, "recording")) {
1273 log.appendMetric("bytes", fSKPBytes);
1274 log.appendMetric("ops", fSKPOps);
1275 }
1276 }
1277
1278 private:
1279 #ifdef SK_ENABLE_ANDROID_UTILS
1280 enum SubsetType {
1281 kTopLeft_SubsetType = 0,
1282 kTopRight_SubsetType = 1,
1283 kMiddle_SubsetType = 2,
1284 kBottomLeft_SubsetType = 3,
1285 kBottomRight_SubsetType = 4,
1286 kTranslate_SubsetType = 5,
1287 kZoom_SubsetType = 6,
1288 kLast_SubsetType = kZoom_SubsetType,
1289 kLastSingle_SubsetType = kBottomRight_SubsetType,
1290 };
1291 #endif
1292
1293 const BenchRegistry* fBenches;
1294 const skiagm::GMRegistry* fGMs;
1295 SkIRect fClip;
1296 TArray<SkScalar> fScales;
1297 TArray<SkString> fSKPs;
1298 TArray<SkString> fMSKPs;
1299 TArray<SkString> fSVGs;
1300 TArray<SkString> fTextBlobTraces;
1301 TArray<SkString> fImages;
1302 TArray<SkColorType, true> fColorTypes;
1303 SkScalar fZoomMax;
1304 double fZoomPeriodMs;
1305
1306 double fSKPBytes, fSKPOps;
1307
1308 const char* fSourceType; // What we're benching: bench, GM, SKP, ...
1309 const char* fBenchType; // How we bench it: micro, recording, playback, ...
1310 int fCurrentRecording = 0;
1311 int fCurrentDeserialPicture = 0;
1312 int fCurrentMSKP = 0;
1313 int fCurrentScale = 0;
1314 int fCurrentSKP = 0;
1315 int fCurrentSVG = 0;
1316 int fCurrentTextBlobTrace = 0;
1317 int fCurrentCodec = 0;
1318 int fCurrentAndroidCodec = 0;
1319 #ifdef SK_ENABLE_ANDROID_UTILS
1320 int fCurrentBRDImage = 0;
1321 int fCurrentSubsetType = 0;
1322 #endif
1323 int fCurrentColorType = 0;
1324 int fCurrentAlphaType = 0;
1325 int fCurrentSampleSize = 0;
1326 int fCurrentAnimSKP = 0;
1327 };
1328
1329 // Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
1330 // This prints something every once in a while so that it knows we're still working.
start_keepalive()1331 static void start_keepalive() {
1332 static std::thread* intentionallyLeaked = new std::thread([]{
1333 for (;;) {
1334 static const int kSec = 1200;
1335 #if defined(SK_BUILD_FOR_WIN)
1336 Sleep(kSec * 1000);
1337 #else
1338 sleep(kSec);
1339 #endif
1340 SkDebugf("\nBenchmarks still running...\n");
1341 }
1342 });
1343 (void)intentionallyLeaked;
1344 SK_INTENTIONALLY_LEAKED(intentionallyLeaked);
1345 }
1346
1347 class NanobenchShaderErrorHandler : public GrContextOptions::ShaderErrorHandler {
compileError(const char * shader,const char * errors)1348 void compileError(const char* shader, const char* errors) override {
1349 // Nanobench should abort if any shader can't compile. Failure is much better than
1350 // reporting meaningless performance metrics.
1351 std::string message = SkShaderUtils::BuildShaderErrorMessage(shader, errors);
1352 SK_ABORT("\n%s", message.c_str());
1353 }
1354 };
1355
main(int argc,char ** argv)1356 int main(int argc, char** argv) {
1357 CommandLineFlags::Parse(argc, argv);
1358
1359 initializeEventTracingForTools();
1360
1361 #if defined(SK_BUILD_FOR_IOS)
1362 cd_Documents();
1363 #endif
1364 SetupCrashHandler();
1365 if (FLAGS_runtimeCPUDetection) {
1366 SkGraphics::Init();
1367 }
1368
1369 // Our benchmarks only currently decode .png or .jpg files
1370 SkCodecs::Register(SkPngDecoder::Decoder());
1371 SkCodecs::Register(SkJpegDecoder::Decoder());
1372
1373 SkTaskGroup::Enabler enabled(FLAGS_threads);
1374
1375 CommonFlags::SetCtxOptions(&grContextOpts);
1376
1377 #if defined(SK_GRAPHITE)
1378 CommonFlags::SetTestOptions(&gTestOptions);
1379 #endif
1380
1381 NanobenchShaderErrorHandler errorHandler;
1382 grContextOpts.fShaderErrorHandler = &errorHandler;
1383
1384 if (kAutoTuneLoops != FLAGS_loops) {
1385 FLAGS_samples = 1;
1386 FLAGS_gpuFrameLag = 0;
1387 }
1388
1389 if (!FLAGS_writePath.isEmpty()) {
1390 SkDebugf("Writing files to %s.\n", FLAGS_writePath[0]);
1391 if (!sk_mkdir(FLAGS_writePath[0])) {
1392 SkDebugf("Could not create %s. Files won't be written.\n", FLAGS_writePath[0]);
1393 FLAGS_writePath.set(0, nullptr);
1394 }
1395 }
1396
1397 std::unique_ptr<SkWStream> logStream(new SkNullWStream);
1398 if (!FLAGS_outResultsFile.isEmpty()) {
1399 #if defined(SK_RELEASE)
1400 logStream.reset(new SkFILEWStream(FLAGS_outResultsFile[0]));
1401 #else
1402 SkDebugf("I'm ignoring --outResultsFile because this is a Debug build.");
1403 return 1;
1404 #endif
1405 }
1406 NanoJSONResultsWriter log(logStream.get(), SkJSONWriter::Mode::kPretty);
1407 log.beginObject(); // root
1408
1409 if (1 == FLAGS_properties.size() % 2) {
1410 SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
1411 return 1;
1412 }
1413 for (int i = 1; i < FLAGS_properties.size(); i += 2) {
1414 log.appendCString(FLAGS_properties[i-1], FLAGS_properties[i]);
1415 }
1416
1417 if (1 == FLAGS_key.size() % 2) {
1418 SkDebugf("ERROR: --key must be passed with an even number of arguments.\n");
1419 return 1;
1420 }
1421 if (FLAGS_key.size()) {
1422 log.beginObject("key");
1423 for (int i = 1; i < FLAGS_key.size(); i += 2) {
1424 log.appendCString(FLAGS_key[i - 1], FLAGS_key[i]);
1425 }
1426 log.endObject(); // key
1427 }
1428
1429 const double overhead = estimate_timer_overhead();
1430 if (!FLAGS_quiet && !FLAGS_csv) {
1431 SkDebugf("Timer overhead: %s\n", HUMANIZE(overhead));
1432 }
1433
1434 TArray<double> samples;
1435
1436 if (kAutoTuneLoops != FLAGS_loops) {
1437 SkDebugf("Fixed number of loops; times would only be misleading so we won't print them.\n");
1438 } else if (FLAGS_quiet) {
1439 SkDebugf("! -> high variance, ? -> moderate variance\n");
1440 SkDebugf(" micros \tbench\n");
1441 } else if (FLAGS_csv) {
1442 SkDebugf("min,median,mean,max,stddev,config,bench\n");
1443 } else if (FLAGS_ms) {
1444 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\tsamples\tconfig\tbench\n");
1445 } else {
1446 SkDebugf("curr/maxrss\tloops\tmin\tmedian\tmean\tmax\tstddev\t%-*s\tconfig\tbench\n",
1447 FLAGS_samples, "samples");
1448 }
1449
1450 GrRecordingContextPriv::DMSAAStats combinedDMSAAStats;
1451
1452 TArray<Config> configs;
1453 create_configs(&configs);
1454
1455 if (FLAGS_keepAlive) {
1456 start_keepalive();
1457 }
1458
1459 gSkForceRasterPipelineBlitter = FLAGS_forceRasterPipelineHP || FLAGS_forceRasterPipeline;
1460 gForceHighPrecisionRasterPipeline = FLAGS_forceRasterPipelineHP;
1461
1462 // The SkSL memory benchmark must run before any GPU painting occurs. SkSL allocates memory for
1463 // its modules the first time they are accessed, and this test is trying to measure the size of
1464 // those allocations. If a paint has already occurred, some modules will have already been
1465 // loaded, so we won't be able to capture a delta for them.
1466 log.beginObject("results");
1467 RunSkSLModuleBenchmarks(&log);
1468
1469 int runs = 0;
1470 BenchmarkStream benchStream;
1471 AutoreleasePool pool;
1472 while (Benchmark* b = benchStream.next()) {
1473 std::unique_ptr<Benchmark> bench(b);
1474 if (CommandLineFlags::ShouldSkip(FLAGS_match, bench->getUniqueName())) {
1475 continue;
1476 }
1477
1478 if (!configs.empty()) {
1479 log.beginBench(
1480 bench->getUniqueName(), bench->getSize().width(), bench->getSize().height());
1481 bench->delayedSetup();
1482 }
1483 for (int i = 0; i < configs.size(); ++i) {
1484 Target* target = is_enabled(b, configs[i]);
1485 if (!target) {
1486 continue;
1487 }
1488
1489 // During HWUI output this canvas may be nullptr.
1490 SkCanvas* canvas = target->getCanvas();
1491 const char* config = target->config.name.c_str();
1492
1493 if (FLAGS_pre_log || FLAGS_dryRun) {
1494 SkDebugf("Running %s\t%s\n"
1495 , bench->getUniqueName()
1496 , config);
1497 if (FLAGS_dryRun) {
1498 continue;
1499 }
1500 }
1501
1502 if (FLAGS_purgeBetweenBenches) {
1503 SkGraphics::PurgeAllCaches();
1504 }
1505
1506 if (FLAGS_splitPerfettoTracesByBenchmark) {
1507 TRACE_EVENT_API_NEW_TRACE_SECTION(TRACE_STR_COPY(bench->getUniqueName()));
1508 }
1509 TRACE_EVENT2("skia", "Benchmark", "name", TRACE_STR_COPY(bench->getUniqueName()),
1510 "config", TRACE_STR_COPY(config));
1511
1512 target->setup();
1513 bench->perCanvasPreDraw(canvas);
1514
1515 int maxFrameLag;
1516 int loops = target->needsFrameTiming(&maxFrameLag)
1517 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
1518 : setup_cpu_bench(overhead, target, bench.get());
1519
1520 if (kFailedLoops == loops) {
1521 // Can't be timed. A warning note has already been printed.
1522 cleanup_run(target);
1523 continue;
1524 }
1525
1526 if (runs == 0 && FLAGS_ms < 1000) {
1527 // Run the first bench for 1000ms to warm up the nanobench if FLAGS_ms < 1000.
1528 // Otherwise, the first few benches' measurements will be inaccurate.
1529 auto stop = now_ms() + 1000;
1530 do {
1531 time(loops, bench.get(), target);
1532 pool.drain();
1533 } while (now_ms() < stop);
1534 }
1535
1536 if (FLAGS_ms) {
1537 samples.clear();
1538 auto stop = now_ms() + FLAGS_ms;
1539 do {
1540 samples.push_back(time(loops, bench.get(), target) / loops);
1541 pool.drain();
1542 } while (now_ms() < stop);
1543 } else {
1544 samples.reset(FLAGS_samples);
1545 for (int s = 0; s < FLAGS_samples; s++) {
1546 samples[s] = time(loops, bench.get(), target) / loops;
1547 pool.drain();
1548 }
1549 }
1550
1551 // Scale each result to the benchmark's own units, time/unit.
1552 for (double& sample : samples) {
1553 sample *= (1.0 / bench->getUnits());
1554 }
1555
1556 TArray<SkString> keys;
1557 TArray<double> values;
1558 if (configs[i].backend == Benchmark::Backend::kGanesh) {
1559 if (FLAGS_gpuStatsDump) {
1560 // TODO cache stats
1561 bench->getGpuStats(canvas, &keys, &values);
1562 }
1563 if (FLAGS_dmsaaStatsDump && bench->getDMSAAStats(canvas->recordingContext())) {
1564 const auto& dmsaaStats = canvas->recordingContext()->priv().dmsaaStats();
1565 dmsaaStats.dumpKeyValuePairs(&keys, &values);
1566 dmsaaStats.dump();
1567 combinedDMSAAStats.merge(dmsaaStats);
1568 }
1569 }
1570
1571 bench->perCanvasPostDraw(canvas);
1572
1573 if (Benchmark::Backend::kNonRendering != target->config.backend &&
1574 !FLAGS_writePath.isEmpty() && FLAGS_writePath[0]) {
1575 SkString pngFilename = SkOSPath::Join(FLAGS_writePath[0], config);
1576 pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getUniqueName());
1577 pngFilename.append(".png");
1578 write_canvas_png(target, pngFilename);
1579 }
1580
1581 // Building stats.plot often shows up in profiles,
1582 // so skip building it when we're not going to print it anyway.
1583 const bool want_plot = !FLAGS_quiet && !FLAGS_ms;
1584
1585 Stats stats(samples, want_plot);
1586 log.beginObject(config);
1587
1588 log.beginObject("options");
1589 log.appendCString("name", bench->getName());
1590 benchStream.fillCurrentOptions(log);
1591 log.endObject(); // options
1592
1593 // Metrics
1594 log.appendMetric("min_ms", stats.min);
1595 log.appendMetric("median_ms", stats.median);
1596 log.appendMetric("min_ratio", sk_ieee_double_divide(stats.median, stats.min));
1597 log.beginArray("samples");
1598 for (double sample : samples) {
1599 log.appendDoubleDigits(sample, 16);
1600 }
1601 log.endArray(); // samples
1602 benchStream.fillCurrentMetrics(log);
1603 if (!keys.empty()) {
1604 // dump to json, only SKPBench currently returns valid keys / values
1605 SkASSERT(keys.size() == values.size());
1606 for (int j = 0; j < keys.size(); j++) {
1607 log.appendMetric(keys[j].c_str(), values[j]);
1608 }
1609 }
1610
1611 log.endObject(); // config
1612
1613 if (runs++ % FLAGS_flushEvery == 0) {
1614 log.flush();
1615 }
1616
1617 if (kAutoTuneLoops != FLAGS_loops) {
1618 if (configs.size() == 1) {
1619 config = ""; // Only print the config if we run the same bench on more than one.
1620 }
1621 SkDebugf("%4d/%-4dMB\t%s\t%s "
1622 , sk_tools::getCurrResidentSetSizeMB()
1623 , sk_tools::getMaxResidentSetSizeMB()
1624 , bench->getUniqueName()
1625 , config);
1626 SkDebugf("\n");
1627 } else if (FLAGS_quiet) {
1628 const char* mark = " ";
1629 const double stddev_percent =
1630 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1631 if (stddev_percent > 5) mark = "?";
1632 if (stddev_percent > 10) mark = "!";
1633
1634 SkDebugf("%10.2f %s\t%s\t%s\n",
1635 stats.median*1e3, mark, bench->getUniqueName(), config);
1636 } else if (FLAGS_csv) {
1637 const double stddev_percent =
1638 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1639 SkDebugf("%g,%g,%g,%g,%g,%s,%s\n"
1640 , stats.min
1641 , stats.median
1642 , stats.mean
1643 , stats.max
1644 , stddev_percent
1645 , config
1646 , bench->getUniqueName()
1647 );
1648 } else {
1649 const double stddev_percent =
1650 sk_ieee_double_divide(100 * sqrt(stats.var), stats.mean);
1651 SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n"
1652 , sk_tools::getCurrResidentSetSizeMB()
1653 , sk_tools::getMaxResidentSetSizeMB()
1654 , loops
1655 , HUMANIZE(stats.min)
1656 , HUMANIZE(stats.median)
1657 , HUMANIZE(stats.mean)
1658 , HUMANIZE(stats.max)
1659 , stddev_percent
1660 , FLAGS_ms ? to_string(samples.size()).c_str() : stats.plot.c_str()
1661 , config
1662 , bench->getUniqueName()
1663 );
1664 }
1665
1666 if (FLAGS_gpuStats && Benchmark::Backend::kGanesh == configs[i].backend) {
1667 target->dumpStats();
1668 }
1669
1670 if (FLAGS_verbose) {
1671 SkDebugf("Samples: ");
1672 for (int j = 0; j < samples.size(); j++) {
1673 SkDebugf("%s ", HUMANIZE(samples[j]));
1674 }
1675 SkDebugf("%s\n", bench->getUniqueName());
1676 }
1677 cleanup_run(target);
1678 pool.drain();
1679 }
1680 if (!configs.empty()) {
1681 log.endBench();
1682 }
1683 }
1684
1685 if (FLAGS_dmsaaStatsDump) {
1686 SkDebugf("<<Total Combined DMSAA Stats>>\n");
1687 combinedDMSAAStats.dump();
1688 }
1689
1690 SkGraphics::PurgeAllCaches();
1691
1692 log.beginBench("memory_usage", 0, 0);
1693 log.beginObject("meta"); // config
1694 log.appendS32("max_rss_mb", sk_tools::getMaxResidentSetSizeMB());
1695 log.endObject(); // config
1696 log.endBench();
1697
1698 log.endObject(); // results
1699 log.endObject(); // root
1700 log.flush();
1701
1702 return 0;
1703 }
1704