1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "BenchTimer.h"
9 #include "ResultsWriter.h"
10 #include "SkBenchLogger.h"
11 #include "SkBenchmark.h"
12 #include "SkBitmapDevice.h"
13 #include "SkCanvas.h"
14 #include "SkColorPriv.h"
15 #include "SkCommandLineFlags.h"
16 #include "SkDeferredCanvas.h"
17 #include "SkGraphics.h"
18 #include "SkImageEncoder.h"
19 #include "SkOSFile.h"
20 #include "SkPicture.h"
21 #include "SkString.h"
22
23 #if SK_SUPPORT_GPU
24 #include "GrContext.h"
25 #include "GrContextFactory.h"
26 #include "GrRenderTarget.h"
27 #include "SkGpuDevice.h"
28 #include "gl/GrGLDefines.h"
29 #else
30 class GrContext;
31 #endif // SK_SUPPORT_GPU
32
33 #include <limits>
34
35 enum BenchMode {
36 kNormal_BenchMode,
37 kDeferred_BenchMode,
38 kDeferredSilent_BenchMode,
39 kRecord_BenchMode,
40 kPictureRecord_BenchMode
41 };
42 const char* BenchMode_Name[] = {
43 "normal", "deferred", "deferredSilent", "record", "picturerecord"
44 };
45
46 static const char kDefaultsConfigStr[] = "defaults";
47
48 ///////////////////////////////////////////////////////////////////////////////
49
erase(SkBitmap & bm)50 static void erase(SkBitmap& bm) {
51 if (bm.config() == SkBitmap::kA8_Config) {
52 bm.eraseColor(SK_ColorTRANSPARENT);
53 } else {
54 bm.eraseColor(SK_ColorWHITE);
55 }
56 }
57
58 class Iter {
59 public:
Iter()60 Iter() : fBench(BenchRegistry::Head()) {}
61
next()62 SkBenchmark* next() {
63 if (fBench) {
64 BenchRegistry::Factory f = fBench->factory();
65 fBench = fBench->next();
66 return f();
67 }
68 return NULL;
69 }
70
71 private:
72 const BenchRegistry* fBench;
73 };
74
75 class AutoPrePostDraw {
76 public:
AutoPrePostDraw(SkBenchmark * bench)77 AutoPrePostDraw(SkBenchmark* bench) : fBench(bench) {
78 fBench->preDraw();
79 }
~AutoPrePostDraw()80 ~AutoPrePostDraw() {
81 fBench->postDraw();
82 }
83 private:
84 SkBenchmark* fBench;
85 };
86
make_filename(const char name[],SkString * path)87 static void make_filename(const char name[], SkString* path) {
88 path->set(name);
89 for (int i = 0; name[i]; i++) {
90 switch (name[i]) {
91 case '/':
92 case '\\':
93 case ' ':
94 case ':':
95 path->writable_str()[i] = '-';
96 break;
97 default:
98 break;
99 }
100 }
101 }
102
saveFile(const char name[],const char config[],const char dir[],const SkBitmap & bm)103 static void saveFile(const char name[], const char config[], const char dir[],
104 const SkBitmap& bm) {
105 SkBitmap copy;
106 if (!bm.copyTo(©, SkBitmap::kARGB_8888_Config)) {
107 return;
108 }
109
110 if (bm.config() == SkBitmap::kA8_Config) {
111 // turn alpha into gray-scale
112 size_t size = copy.getSize() >> 2;
113 SkPMColor* p = copy.getAddr32(0, 0);
114 for (size_t i = 0; i < size; i++) {
115 int c = (*p >> SK_A32_SHIFT) & 0xFF;
116 c = 255 - c;
117 c |= (c << 24) | (c << 16) | (c << 8);
118 *p++ = c | (SK_A32_MASK << SK_A32_SHIFT);
119 }
120 }
121
122 SkString filename;
123 make_filename(name, &filename);
124 filename.appendf("_%s.png", config);
125 SkString path = SkOSPath::SkPathJoin(dir, filename.c_str());
126 ::remove(path.c_str());
127 SkImageEncoder::EncodeFile(path.c_str(), copy, SkImageEncoder::kPNG_Type, 100);
128 }
129
performClip(SkCanvas * canvas,int w,int h)130 static void performClip(SkCanvas* canvas, int w, int h) {
131 SkRect r;
132
133 r.set(SkIntToScalar(10), SkIntToScalar(10),
134 SkIntToScalar(w*2/3), SkIntToScalar(h*2/3));
135 canvas->clipRect(r, SkRegion::kIntersect_Op);
136
137 r.set(SkIntToScalar(w/3), SkIntToScalar(h/3),
138 SkIntToScalar(w-10), SkIntToScalar(h-10));
139 canvas->clipRect(r, SkRegion::kXOR_Op);
140 }
141
performRotate(SkCanvas * canvas,int w,int h)142 static void performRotate(SkCanvas* canvas, int w, int h) {
143 const SkScalar x = SkIntToScalar(w) / 2;
144 const SkScalar y = SkIntToScalar(h) / 2;
145
146 canvas->translate(x, y);
147 canvas->rotate(SkIntToScalar(35));
148 canvas->translate(-x, -y);
149 }
150
performScale(SkCanvas * canvas,int w,int h)151 static void performScale(SkCanvas* canvas, int w, int h) {
152 const SkScalar x = SkIntToScalar(w) / 2;
153 const SkScalar y = SkIntToScalar(h) / 2;
154
155 canvas->translate(x, y);
156 // just enough so we can't take the sprite case
157 canvas->scale(SK_Scalar1 * 99/100, SK_Scalar1 * 99/100);
158 canvas->translate(-x, -y);
159 }
160
make_device(SkBitmap::Config config,const SkIPoint & size,SkBenchmark::Backend backend,int sampleCount,GrContext * context)161 static SkBaseDevice* make_device(SkBitmap::Config config, const SkIPoint& size,
162 SkBenchmark::Backend backend, int sampleCount, GrContext* context) {
163 SkBaseDevice* device = NULL;
164 SkBitmap bitmap;
165 bitmap.setConfig(config, size.fX, size.fY);
166
167 switch (backend) {
168 case SkBenchmark::kRaster_Backend:
169 bitmap.allocPixels();
170 erase(bitmap);
171 device = SkNEW_ARGS(SkBitmapDevice, (bitmap));
172 break;
173 #if SK_SUPPORT_GPU
174 case SkBenchmark::kGPU_Backend: {
175 GrTextureDesc desc;
176 desc.fConfig = kSkia8888_GrPixelConfig;
177 desc.fFlags = kRenderTarget_GrTextureFlagBit;
178 desc.fWidth = size.fX;
179 desc.fHeight = size.fY;
180 desc.fSampleCnt = sampleCount;
181 SkAutoTUnref<GrTexture> texture(context->createUncachedTexture(desc, NULL, 0));
182 if (!texture) {
183 return NULL;
184 }
185 device = SkNEW_ARGS(SkGpuDevice, (context, texture.get()));
186 break;
187 }
188 #endif
189 case SkBenchmark::kPDF_Backend:
190 default:
191 SkDEBUGFAIL("unsupported");
192 }
193 return device;
194 }
195
196 #if SK_SUPPORT_GPU
197 GrContextFactory gContextFactory;
198 typedef GrContextFactory::GLContextType GLContextType;
199 static const GLContextType kNative = GrContextFactory::kNative_GLContextType;
200 #if SK_ANGLE
201 static const GLContextType kANGLE = GrContextFactory::kANGLE_GLContextType;
202 #endif
203 static const GLContextType kDebug = GrContextFactory::kDebug_GLContextType;
204 static const GLContextType kNull = GrContextFactory::kNull_GLContextType;
205 #else
206 typedef int GLContextType;
207 static const GLContextType kNative = 0, kANGLE = 0, kDebug = 0, kNull = 0;
208 #endif
209
210 #ifdef SK_DEBUG
211 static const bool kIsDebug = true;
212 #else
213 static const bool kIsDebug = false;
214 #endif
215
216 static const struct Config {
217 SkBitmap::Config config;
218 const char* name;
219 int sampleCount;
220 SkBenchmark::Backend backend;
221 GLContextType contextType;
222 bool runByDefault;
223 } gConfigs[] = {
224 { SkBitmap::kNo_Config, "NONRENDERING", 0, SkBenchmark::kNonRendering_Backend, kNative, true},
225 { SkBitmap::kARGB_8888_Config, "8888", 0, SkBenchmark::kRaster_Backend, kNative, true},
226 { SkBitmap::kRGB_565_Config, "565", 0, SkBenchmark::kRaster_Backend, kNative, true},
227 #if SK_SUPPORT_GPU
228 { SkBitmap::kARGB_8888_Config, "GPU", 0, SkBenchmark::kGPU_Backend, kNative, true},
229 { SkBitmap::kARGB_8888_Config, "MSAA4", 4, SkBenchmark::kGPU_Backend, kNative, false},
230 { SkBitmap::kARGB_8888_Config, "MSAA16", 16, SkBenchmark::kGPU_Backend, kNative, false},
231 #if SK_ANGLE
232 { SkBitmap::kARGB_8888_Config, "ANGLE", 0, SkBenchmark::kGPU_Backend, kANGLE, true},
233 #endif // SK_ANGLE
234 { SkBitmap::kARGB_8888_Config, "Debug", 0, SkBenchmark::kGPU_Backend, kDebug, kIsDebug},
235 { SkBitmap::kARGB_8888_Config, "NULLGPU", 0, SkBenchmark::kGPU_Backend, kNull, true},
236 #endif // SK_SUPPORT_GPU
237 };
238
239 DEFINE_string(outDir, "", "If given, image of each bench will be put in outDir.");
240 DEFINE_string(timers, "cg", "Timers to display. "
241 "Options: w(all) W(all, truncated) c(pu) C(pu, truncated) g(pu)");
242
243 DEFINE_bool(rotate, false, "Rotate canvas before bench run?");
244 DEFINE_bool(scale, false, "Scale canvas before bench run?");
245 DEFINE_bool(clip, false, "Clip canvas before bench run?");
246
247 DEFINE_bool(forceAA, true, "Force anti-aliasing?");
248 DEFINE_bool(forceFilter, false, "Force bitmap filtering?");
249 DEFINE_string(forceDither, "default", "Force dithering: true, false, or default?");
250 DEFINE_bool(forceBlend, false, "Force alpha blending?");
251
252 DEFINE_int32(gpuCacheBytes, -1, "GPU cache size limit in bytes. 0 to disable cache.");
253 DEFINE_int32(gpuCacheCount, -1, "GPU cache size limit in object count. 0 to disable cache.");
254
255 DEFINE_string(match, "", "[~][^]substring[$] [...] of test name to run.\n"
256 "Multiple matches may be separated by spaces.\n"
257 "~ causes a matching test to always be skipped\n"
258 "^ requires the start of the test to match\n"
259 "$ requires the end of the test to match\n"
260 "^ and $ requires an exact match\n"
261 "If a test does not match any list entry,\n"
262 "it is skipped unless some list entry starts with ~\n");
263 DEFINE_string(mode, "normal",
264 "normal: draw to a normal canvas;\n"
265 "deferred: draw to a deferred canvas;\n"
266 "deferredSilent: deferred with silent playback;\n"
267 "record: draw to an SkPicture;\n"
268 "picturerecord: draw from an SkPicture to an SkPicture.\n");
269 DEFINE_string(config, kDefaultsConfigStr,
270 "Run configs given. By default, runs the configs marked \"runByDefault\" in gConfigs.");
271 DEFINE_string(logFile, "", "Also write stdout here.");
272 DEFINE_int32(minMs, 20, "Shortest time we'll allow a benchmark to run.");
273 DEFINE_int32(maxMs, 4000, "Longest time we'll allow a benchmark to run.");
274 DEFINE_double(error, 0.01,
275 "Ratio of subsequent bench measurements must drop within 1±error to converge.");
276 DEFINE_string(timeFormat, "%9.2f", "Format to print results, in milliseconds per 1000 loops.");
277 DEFINE_bool2(verbose, v, false, "Print more.");
278 DEFINE_string2(resourcePath, i, NULL, "directory for test resources.");
279 DEFINE_string(outResultsFile, "", "If given, the results will be written to the file in JSON format.");
280
281 // Has this bench converged? First arguments are milliseconds / loop iteration,
282 // last is overall runtime in milliseconds.
HasConverged(double prevPerLoop,double currPerLoop,double currRaw)283 static bool HasConverged(double prevPerLoop, double currPerLoop, double currRaw) {
284 if (currRaw < FLAGS_minMs) {
285 return false;
286 }
287 const double low = 1 - FLAGS_error, high = 1 + FLAGS_error;
288 const double ratio = currPerLoop / prevPerLoop;
289 return low < ratio && ratio < high;
290 }
291
292 int tool_main(int argc, char** argv);
tool_main(int argc,char ** argv)293 int tool_main(int argc, char** argv) {
294 #if SK_ENABLE_INST_COUNT
295 gPrintInstCount = true;
296 #endif
297 SkAutoGraphics ag;
298 SkCommandLineFlags::Parse(argc, argv);
299
300 // First, parse some flags.
301 SkBenchLogger logger;
302 if (FLAGS_logFile.count()) {
303 logger.SetLogFile(FLAGS_logFile[0]);
304 }
305
306 LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]);
307 MultiResultsWriter writer;
308 writer.add(&logWriter);
309 SkAutoTDelete<JSONResultsWriter> jsonWriter;
310 if (FLAGS_outResultsFile.count()) {
311 jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0])));
312 writer.add(jsonWriter.get());
313 }
314 // Instantiate after all the writers have been added to writer so that we
315 // call close() before their destructors are called on the way out.
316 CallEnd<MultiResultsWriter> ender(writer);
317
318 const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF;
319 SkTriState::State dither = SkTriState::kDefault;
320 for (size_t i = 0; i < 3; i++) {
321 if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) {
322 dither = static_cast<SkTriState::State>(i);
323 }
324 }
325
326 BenchMode benchMode = kNormal_BenchMode;
327 for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) {
328 if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) {
329 benchMode = static_cast<BenchMode>(i);
330 }
331 }
332
333 SkTDArray<int> configs;
334 bool runDefaultConfigs = false;
335 // Try user-given configs first.
336 for (int i = 0; i < FLAGS_config.count(); i++) {
337 for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) {
338 if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) {
339 *configs.append() = j;
340 } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) {
341 runDefaultConfigs = true;
342 }
343 }
344 }
345 // If there weren't any, fill in with defaults.
346 if (runDefaultConfigs) {
347 for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) {
348 if (gConfigs[i].runByDefault) {
349 *configs.append() = i;
350 }
351 }
352 }
353 // Filter out things we can't run.
354 if (kNormal_BenchMode != benchMode) {
355 // Non-rendering configs only run in normal mode
356 for (int i = 0; i < configs.count(); ++i) {
357 const Config& config = gConfigs[configs[i]];
358 if (SkBenchmark::kNonRendering_Backend == config.backend) {
359 configs.remove(i, 1);
360 --i;
361 }
362 }
363 }
364 // Set the resource path.
365 if (!FLAGS_resourcePath.isEmpty()) {
366 SkBenchmark::SetResourcePath(FLAGS_resourcePath[0]);
367 }
368
369 #if SK_SUPPORT_GPU
370 for (int i = 0; i < configs.count(); ++i) {
371 const Config& config = gConfigs[configs[i]];
372
373 if (SkBenchmark::kGPU_Backend == config.backend) {
374 GrContext* context = gContextFactory.get(config.contextType);
375 if (NULL == context) {
376 logger.logError(SkStringPrintf(
377 "Error creating GrContext for config %s. Config will be skipped.\n",
378 config.name));
379 configs.remove(i);
380 --i;
381 continue;
382 }
383 if (config.sampleCount > context->getMaxSampleCount()){
384 logger.logError(SkStringPrintf(
385 "Sample count (%d) for config %s is unsupported. Config will be skipped.\n",
386 config.sampleCount, config.name));
387 configs.remove(i);
388 --i;
389 continue;
390 }
391 }
392 }
393 #endif
394
395 // All flags should be parsed now. Report our settings.
396 if (kIsDebug) {
397 logger.logError("bench was built in Debug mode, so we're going to hide the times."
398 " It's for your own good!\n");
399 }
400 writer.option("mode", FLAGS_mode[0]);
401 writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str());
402 writer.option("antialias", SkStringPrintf("%d", FLAGS_forceAA).c_str());
403 writer.option("filter", SkStringPrintf("%d", FLAGS_forceFilter).c_str());
404 writer.option("dither", SkTriState::Name[dither]);
405
406 writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str());
407 writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str());
408 writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str());
409
410 #if defined(SK_SCALAR_IS_FIXED)
411 writer.option("scalar", "fixed");
412 #else
413 writer.option("scalar", "float");
414 #endif
415
416 #if defined(SK_BUILD_FOR_WIN32)
417 writer.option("system", "WIN32");
418 #elif defined(SK_BUILD_FOR_MAC)
419 writer.option("system", "MAC");
420 #elif defined(SK_BUILD_FOR_ANDROID)
421 writer.option("system", "ANDROID");
422 #elif defined(SK_BUILD_FOR_UNIX)
423 writer.option("system", "UNIX");
424 #else
425 writer.option("system", "other");
426 #endif
427
428 #if defined(SK_DEBUG)
429 writer.option("build", "DEBUG");
430 #else
431 writer.option("build", "RELEASE");
432 #endif
433
434 // Set texture cache limits if non-default.
435 for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) {
436 #if SK_SUPPORT_GPU
437 const Config& config = gConfigs[i];
438 if (SkBenchmark::kGPU_Backend != config.backend) {
439 continue;
440 }
441 GrContext* context = gContextFactory.get(config.contextType);
442 if (NULL == context) {
443 continue;
444 }
445
446 size_t bytes;
447 int count;
448 context->getTextureCacheLimits(&count, &bytes);
449 if (-1 != FLAGS_gpuCacheBytes) {
450 bytes = static_cast<size_t>(FLAGS_gpuCacheBytes);
451 }
452 if (-1 != FLAGS_gpuCacheCount) {
453 count = FLAGS_gpuCacheCount;
454 }
455 context->setTextureCacheLimits(count, bytes);
456 #endif
457 }
458
459 // Run each bench in each configuration it supports and we asked for.
460 Iter iter;
461 SkBenchmark* bench;
462 while ((bench = iter.next()) != NULL) {
463 SkAutoTUnref<SkBenchmark> benchUnref(bench);
464 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) {
465 continue;
466 }
467
468 bench->setForceAlpha(alpha);
469 bench->setForceAA(FLAGS_forceAA);
470 bench->setForceFilter(FLAGS_forceFilter);
471 bench->setDither(dither);
472 AutoPrePostDraw appd(bench);
473
474 bool loggedBenchName = false;
475 for (int i = 0; i < configs.count(); ++i) {
476 const int configIndex = configs[i];
477 const Config& config = gConfigs[configIndex];
478
479 if (!bench->isSuitableFor(config.backend)) {
480 continue;
481 }
482
483 GrContext* context = NULL;
484 #if SK_SUPPORT_GPU
485 SkGLContextHelper* glContext = NULL;
486 if (SkBenchmark::kGPU_Backend == config.backend) {
487 context = gContextFactory.get(config.contextType);
488 if (NULL == context) {
489 continue;
490 }
491 glContext = gContextFactory.getGLContext(config.contextType);
492 }
493 #endif
494 SkAutoTUnref<SkBaseDevice> device;
495 SkAutoTUnref<SkCanvas> canvas;
496 SkPicture recordFrom, recordTo;
497 const SkIPoint dim = bench->getSize();
498
499 const SkPicture::RecordingFlags kRecordFlags =
500 SkPicture::kUsePathBoundsForClip_RecordingFlag;
501
502 if (SkBenchmark::kNonRendering_Backend != config.backend) {
503 device.reset(make_device(config.config,
504 dim,
505 config.backend,
506 config.sampleCount,
507 context));
508 if (!device.get()) {
509 logger.logError(SkStringPrintf(
510 "Device creation failure for config %s. Will skip.\n", config.name));
511 continue;
512 }
513
514 switch(benchMode) {
515 case kDeferredSilent_BenchMode:
516 case kDeferred_BenchMode:
517 canvas.reset(SkDeferredCanvas::Create(device.get()));
518 break;
519 case kRecord_BenchMode:
520 canvas.reset(SkRef(recordTo.beginRecording(dim.fX, dim.fY, kRecordFlags)));
521 break;
522 case kPictureRecord_BenchMode:
523 bench->draw(1, recordFrom.beginRecording(dim.fX, dim.fY, kRecordFlags));
524 recordFrom.endRecording();
525 canvas.reset(SkRef(recordTo.beginRecording(dim.fX, dim.fY, kRecordFlags)));
526 break;
527 case kNormal_BenchMode:
528 canvas.reset(new SkCanvas(device.get()));
529 break;
530 default:
531 SkASSERT(false);
532 }
533 }
534
535 if (NULL != canvas) {
536 canvas->clear(SK_ColorWHITE);
537 if (FLAGS_clip) { performClip(canvas, dim.fX, dim.fY); }
538 if (FLAGS_scale) { performScale(canvas, dim.fX, dim.fY); }
539 if (FLAGS_rotate) { performRotate(canvas, dim.fX, dim.fY); }
540 }
541
542 if (!loggedBenchName) {
543 loggedBenchName = true;
544 writer.bench(bench->getName(), dim.fX, dim.fY);
545 }
546
547 #if SK_SUPPORT_GPU
548 SkGLContextHelper* contextHelper = NULL;
549 if (SkBenchmark::kGPU_Backend == config.backend) {
550 contextHelper = gContextFactory.getGLContext(config.contextType);
551 }
552 BenchTimer timer(contextHelper);
553 #else
554 BenchTimer timer;
555 #endif
556
557 double previous = std::numeric_limits<double>::infinity();
558 bool converged = false;
559
560 // variables used to compute loopsPerFrame
561 double frameIntervalTime = 0.0f;
562 int frameIntervalTotalLoops = 0;
563
564 bool frameIntervalComputed = false;
565 int loopsPerFrame = 0;
566 int loopsPerIter = 0;
567 if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); }
568 do {
569 // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion.
570 loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2;
571 if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) {
572 // If you find it takes more than a billion loops to get up to 20ms of runtime,
573 // you've got a computer clocked at several THz or have a broken benchmark. ;)
574 // "1B ought to be enough for anybody."
575 logger.logError(SkStringPrintf(
576 "\nCan't get %s %s to converge in %dms (%d loops)",
577 bench->getName(), config.name, FLAGS_maxMs, loopsPerIter));
578 break;
579 }
580
581 if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) {
582 // Clear the recorded commands so that they do not accumulate.
583 canvas.reset(recordTo.beginRecording(dim.fX, dim.fY, kRecordFlags));
584 }
585
586 timer.start();
587 // Inner loop that allows us to break the run into smaller
588 // chunks (e.g. frames). This is especially useful for the GPU
589 // as we can flush and/or swap buffers to keep the GPU from
590 // queuing up too much work.
591 for (int loopCount = loopsPerIter; loopCount > 0; ) {
592 // Save and restore around each call to draw() to guarantee a pristine canvas.
593 SkAutoCanvasRestore saveRestore(canvas, true/*also save*/);
594
595 int loops;
596 if (frameIntervalComputed && loopCount > loopsPerFrame) {
597 loops = loopsPerFrame;
598 loopCount -= loopsPerFrame;
599 } else {
600 loops = loopCount;
601 loopCount = 0;
602 }
603
604 if (benchMode == kPictureRecord_BenchMode) {
605 recordFrom.draw(canvas);
606 } else {
607 bench->draw(loops, canvas);
608 }
609
610 if (kDeferredSilent_BenchMode == benchMode) {
611 static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush();
612 } else if (NULL != canvas) {
613 canvas->flush();
614 }
615
616 #if SK_SUPPORT_GPU
617 // swap drawing buffers on each frame to prevent the GPU
618 // from queuing up too much work
619 if (NULL != glContext) {
620 glContext->swapBuffers();
621 }
622 #endif
623 }
624
625
626
627 // Stop truncated timers before GL calls complete, and stop the full timers after.
628 timer.truncatedEnd();
629 #if SK_SUPPORT_GPU
630 if (NULL != glContext) {
631 context->flush();
632 SK_GL(*glContext, Finish());
633 }
634 #endif
635 timer.end();
636
637 // setup the frame interval for subsequent iterations
638 if (!frameIntervalComputed) {
639 frameIntervalTime += timer.fWall;
640 frameIntervalTotalLoops += loopsPerIter;
641 if (frameIntervalTime >= FLAGS_minMs) {
642 frameIntervalComputed = true;
643 loopsPerFrame =
644 (int)(((double)frameIntervalTotalLoops / frameIntervalTime) * FLAGS_minMs);
645 if (loopsPerFrame < 1) {
646 loopsPerFrame = 1;
647 }
648 // SkDebugf(" %s has %d loops in %f ms (normalized to %d)\n",
649 // bench->getName(), frameIntervalTotalLoops,
650 // timer.fWall, loopsPerFrame);
651 }
652 }
653
654 const double current = timer.fWall / loopsPerIter;
655 if (FLAGS_verbose && current > previous) { SkDebugf("↑"); }
656 if (FLAGS_verbose) { SkDebugf("%.3g ", current); }
657 converged = HasConverged(previous, current, timer.fWall);
658 previous = current;
659 } while (!kIsDebug && !converged);
660 if (FLAGS_verbose) { SkDebugf("\n"); }
661
662 if (FLAGS_outDir.count() && SkBenchmark::kNonRendering_Backend != config.backend) {
663 saveFile(bench->getName(),
664 config.name,
665 FLAGS_outDir[0],
666 device->accessBitmap(false));
667 }
668
669 if (kIsDebug) {
670 // Let's not mislead ourselves by looking at Debug build bench times!
671 continue;
672 }
673
674 // Normalize to ms per 1000 iterations.
675 const double normalize = 1000.0 / loopsPerIter;
676 const struct { char shortName; const char* longName; double ms; } times[] = {
677 {'w', "msecs", normalize * timer.fWall},
678 {'W', "Wmsecs", normalize * timer.fTruncatedWall},
679 {'c', "cmsecs", normalize * timer.fCpu},
680 {'C', "Cmsecs", normalize * timer.fTruncatedCpu},
681 {'g', "gmsecs", normalize * timer.fGpu},
682 };
683
684 writer.config(config.name);
685 for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) {
686 if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) {
687 writer.timer(times[i].longName, times[i].ms);
688 }
689 }
690 }
691 }
692 #if SK_SUPPORT_GPU
693 gContextFactory.destroyContexts();
694 #endif
695 return 0;
696 }
697
698 #if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL)
main(int argc,char * const argv[])699 int main(int argc, char * const argv[]) {
700 return tool_main(argc, (char**) argv);
701 }
702 #endif
703