• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/core/SkExecutor.h"
9 #include "include/gpu/GrContextOptions.h"
10 #include "tools/flags/CommonFlags.h"
11 
12 DEFINE_int(gpuThreads,
13              2,
14              "Create this many extra threads to assist with GPU work, "
15              "including software path rendering. Defaults to two.");
16 
17 namespace CommonFlags {
18 
19 static DEFINE_bool(cachePathMasks, true,
20                    "Allows path mask textures to be cached in GPU configs.");
21 static DEFINE_bool(allPathsVolatile, false,
22                    "Causes all GPU paths to be processed as if 'setIsVolatile' had been called.");
23 
24 static DEFINE_bool(hwtess, false, "Enables support for tessellation shaders (if hw allows.).");
25 
26 static DEFINE_int(maxTessellationSegments, 0,
27                   "Overrides the max number of tessellation segments supported by the caps.");
28 
29 static DEFINE_bool(alwaysHwTess, false,
30         "Always try to use hardware tessellation, regardless of how small a path may be.");
31 
32 static DEFINE_string(pr, "",
33               "Set of enabled gpu path renderers. Defined as a list of: "
34               "[~]none [~]dashline [~]aahairline [~]aaconvex [~]aalinearizing [~]small [~]tri "
35               "[~]atlas [~]tess [~]all");
36 
37 static DEFINE_int(internalSamples, -1,
38         "Number of samples for internal draws that use MSAA, or default value if negative.");
39 
40 static DEFINE_int(maxAtlasSize, -1,
41         "Maximum width and height of internal texture atlases, or default value if negative.");
42 
43 static DEFINE_bool(disableDriverCorrectnessWorkarounds, false,
44                    "Disables all GPU driver correctness workarounds");
45 
46 static DEFINE_bool(dontReduceOpsTaskSplitting, false,
47                    "Don't reorder tasks to reduce render passes");
48 
49 static DEFINE_int(gpuResourceCacheLimit, -1,
50                   "Maximum number of bytes to use for budgeted GPU resources. "
51                   "Default is -1, which means GrResourceCache::kDefaultMaxSize.");
52 
get_named_pathrenderers_flags(const char * name)53 static GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
54     if (!strcmp(name, "none")) {
55         return GpuPathRenderers::kNone;
56     } else if (!strcmp(name, "dashline")) {
57         return GpuPathRenderers::kDashLine;
58     } else if (!strcmp(name, "aahairline")) {
59         return GpuPathRenderers::kAAHairline;
60     } else if (!strcmp(name, "aaconvex")) {
61         return GpuPathRenderers::kAAConvex;
62     } else if (!strcmp(name, "aalinearizing")) {
63         return GpuPathRenderers::kAALinearizing;
64     } else if (!strcmp(name, "small")) {
65         return GpuPathRenderers::kSmall;
66     } else if (!strcmp(name, "tri")) {
67         return GpuPathRenderers::kTriangulating;
68     } else if (!strcmp(name, "atlas")) {
69         return GpuPathRenderers::kAtlas;
70     } else if (!strcmp(name, "tess")) {
71         return GpuPathRenderers::kTessellation;
72     } else if (!strcmp(name, "default")) {
73         return GpuPathRenderers::kDefault;
74     }
75     SK_ABORT("error: unknown named path renderer \"%s\"\n", name);
76 }
77 
collect_gpu_path_renderers_from_flags()78 static GpuPathRenderers collect_gpu_path_renderers_from_flags() {
79     if (FLAGS_pr.isEmpty()) {
80         return GpuPathRenderers::kDefault;
81     }
82 
83     GpuPathRenderers gpuPathRenderers = ('~' == FLAGS_pr[0][0])
84             ? GpuPathRenderers::kDefault
85             : GpuPathRenderers::kNone;
86 
87     for (int i = 0; i < FLAGS_pr.count(); ++i) {
88         const char* name = FLAGS_pr[i];
89         if (name[0] == '~') {
90             gpuPathRenderers &= ~get_named_pathrenderers_flags(&name[1]);
91         } else {
92             gpuPathRenderers |= get_named_pathrenderers_flags(name);
93         }
94     }
95     return gpuPathRenderers;
96 }
97 
SetCtxOptions(GrContextOptions * ctxOptions)98 void SetCtxOptions(GrContextOptions* ctxOptions) {
99     static std::unique_ptr<SkExecutor> gGpuExecutor = (0 != FLAGS_gpuThreads)
100         ? SkExecutor::MakeFIFOThreadPool(FLAGS_gpuThreads)
101         : nullptr;
102 
103     ctxOptions->fExecutor                            = gGpuExecutor.get();
104     ctxOptions->fAllowPathMaskCaching                = FLAGS_cachePathMasks;
105     ctxOptions->fAllPathsVolatile                    = FLAGS_allPathsVolatile;
106     ctxOptions->fEnableExperimentalHardwareTessellation = FLAGS_hwtess;
107     ctxOptions->fMaxTessellationSegmentsOverride     = FLAGS_maxTessellationSegments;
108     ctxOptions->fAlwaysPreferHardwareTessellation    = FLAGS_alwaysHwTess;
109     ctxOptions->fGpuPathRenderers                    = collect_gpu_path_renderers_from_flags();
110     ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;
111     ctxOptions->fResourceCacheLimitOverride          = FLAGS_gpuResourceCacheLimit;
112 
113     if (FLAGS_internalSamples >= 0) {
114         ctxOptions->fInternalMultisampleCount = FLAGS_internalSamples;
115     }
116     if (FLAGS_maxAtlasSize >= 0) {
117         ctxOptions->fMaxTextureAtlasSize = FLAGS_maxAtlasSize;
118     }
119 
120     if (FLAGS_dontReduceOpsTaskSplitting) {
121         ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kNo;
122     } else {
123         ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kYes;
124     }
125 }
126 
127 }  // namespace CommonFlags
128