1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkExecutor.h"
9 #include "include/gpu/GrContextOptions.h"
10 #include "tools/flags/CommonFlags.h"
11
12 DEFINE_int(gpuThreads,
13 2,
14 "Create this many extra threads to assist with GPU work, "
15 "including software path rendering. Defaults to two.");
16
17 extern bool gSkBlobAsSlugTesting;
18
19 namespace CommonFlags {
20
21 static DEFINE_bool(cachePathMasks, true,
22 "Allows path mask textures to be cached in GPU configs.");
23 static DEFINE_bool(failFlushTimeCallbacks, false,
24 "Causes all flush-time callbacks to fail.");
25 static DEFINE_bool(allPathsVolatile, false,
26 "Causes all GPU paths to be processed as if 'setIsVolatile' had been called.");
27
28 static DEFINE_string(pr, "",
29 "Set of enabled gpu path renderers. Defined as a list of: "
30 "[~]none [~]dashline [~]aahairline [~]aaconvex [~]aalinearizing [~]small [~]tri "
31 "[~]atlas [~]tess [~]all");
32
33 static DEFINE_int(internalSamples, -1,
34 "Number of samples for internal draws that use MSAA, or default value if negative.");
35
36 static DEFINE_int(maxAtlasSize, -1,
37 "Maximum width and height of internal texture atlases, or default value if negative.");
38
39 static DEFINE_bool(disableDriverCorrectnessWorkarounds, false,
40 "Disables all GPU driver correctness workarounds");
41
42 static DEFINE_bool(dontReduceOpsTaskSplitting, false,
43 "Don't reorder tasks to reduce render passes");
44
45 static DEFINE_int(gpuResourceCacheLimit, -1,
46 "Maximum number of bytes to use for budgeted GPU resources. "
47 "Default is -1, which means GrResourceCache::kDefaultMaxSize.");
48
49 static DEFINE_bool(allowMSAAOnNewIntel, false,
50 "Allows MSAA to be enabled on newer intel GPUs.");
51
get_named_pathrenderers_flags(const char * name)52 static GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
53 if (!strcmp(name, "none")) {
54 return GpuPathRenderers::kNone;
55 } else if (!strcmp(name, "dashline")) {
56 return GpuPathRenderers::kDashLine;
57 } else if (!strcmp(name, "aahairline")) {
58 return GpuPathRenderers::kAAHairline;
59 } else if (!strcmp(name, "aaconvex")) {
60 return GpuPathRenderers::kAAConvex;
61 } else if (!strcmp(name, "aalinearizing")) {
62 return GpuPathRenderers::kAALinearizing;
63 } else if (!strcmp(name, "small")) {
64 return GpuPathRenderers::kSmall;
65 } else if (!strcmp(name, "tri")) {
66 return GpuPathRenderers::kTriangulating;
67 } else if (!strcmp(name, "atlas")) {
68 return GpuPathRenderers::kAtlas;
69 } else if (!strcmp(name, "tess")) {
70 return GpuPathRenderers::kTessellation;
71 } else if (!strcmp(name, "default")) {
72 return GpuPathRenderers::kDefault;
73 }
74 SK_ABORT("error: unknown named path renderer \"%s\"\n", name);
75 }
76
collect_gpu_path_renderers_from_flags()77 static GpuPathRenderers collect_gpu_path_renderers_from_flags() {
78 if (FLAGS_pr.isEmpty()) {
79 return GpuPathRenderers::kDefault;
80 }
81
82 GpuPathRenderers gpuPathRenderers = ('~' == FLAGS_pr[0][0])
83 ? GpuPathRenderers::kDefault
84 : GpuPathRenderers::kNone;
85
86 for (int i = 0; i < FLAGS_pr.size(); ++i) {
87 const char* name = FLAGS_pr[i];
88 if (name[0] == '~') {
89 gpuPathRenderers &= ~get_named_pathrenderers_flags(&name[1]);
90 } else {
91 gpuPathRenderers |= get_named_pathrenderers_flags(name);
92 }
93 }
94 return gpuPathRenderers;
95 }
96
SetCtxOptions(GrContextOptions * ctxOptions)97 void SetCtxOptions(GrContextOptions* ctxOptions) {
98 static std::unique_ptr<SkExecutor> gGpuExecutor = (0 != FLAGS_gpuThreads)
99 ? SkExecutor::MakeFIFOThreadPool(FLAGS_gpuThreads)
100 : nullptr;
101
102 ctxOptions->fExecutor = gGpuExecutor.get();
103 ctxOptions->fAllowPathMaskCaching = FLAGS_cachePathMasks;
104 ctxOptions->fFailFlushTimeCallbacks = FLAGS_failFlushTimeCallbacks;
105 ctxOptions->fAllPathsVolatile = FLAGS_allPathsVolatile;
106 ctxOptions->fGpuPathRenderers = collect_gpu_path_renderers_from_flags();
107 ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;
108 ctxOptions->fResourceCacheLimitOverride = FLAGS_gpuResourceCacheLimit;
109 // If testing with slugs ensure that padding is added in the atlas.
110 ctxOptions->fSupportBilerpFromGlyphAtlas |= gSkBlobAsSlugTesting;
111
112 if (FLAGS_internalSamples >= 0) {
113 ctxOptions->fInternalMultisampleCount = FLAGS_internalSamples;
114 }
115 if (FLAGS_maxAtlasSize >= 0) {
116 ctxOptions->fMaxTextureAtlasSize = FLAGS_maxAtlasSize;
117 }
118
119 if (FLAGS_dontReduceOpsTaskSplitting) {
120 ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kNo;
121 } else {
122 ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kYes;
123 }
124 ctxOptions->fAllowMSAAOnNewIntel = FLAGS_allowMSAAOnNewIntel;
125 }
126
127 } // namespace CommonFlags
128