1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkExecutor.h"
9 #include "include/gpu/GrContextOptions.h"
10 #include "tools/flags/CommonFlags.h"
11
12 DEFINE_int(gpuThreads,
13 2,
14 "Create this many extra threads to assist with GPU work, "
15 "including software path rendering. Defaults to two.");
16
17 static DEFINE_bool(cachePathMasks, true,
18 "Allows path mask textures to be cached in GPU configs.");
19
20 static DEFINE_bool(noGS, false, "Disables support for geometry shaders.");
21
22 static DEFINE_bool(cc, false, "Allow coverage counting shortcuts to render paths?");
23
24 static DEFINE_string(pr, "",
25 "Set of enabled gpu path renderers. Defined as a list of: "
26 "[~]none [~]dashline [~]nvpr [~]ccpr [~]aahairline [~]aaconvex [~]aalinearizing "
27 "[~]small [~]tess] [~]all");
28
29 static DEFINE_bool(disableDriverCorrectnessWorkarounds, false,
30 "Disables all GPU driver correctness workarounds");
31
32 static DEFINE_bool(reduceOpListSplitting, false, "Improve opList sorting");
33 static DEFINE_bool(dontReduceOpListSplitting, false, "Allow more opList splitting");
34
get_named_pathrenderers_flags(const char * name)35 static GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
36 if (!strcmp(name, "none")) {
37 return GpuPathRenderers::kNone;
38 } else if (!strcmp(name, "dashline")) {
39 return GpuPathRenderers::kDashLine;
40 } else if (!strcmp(name, "nvpr")) {
41 return GpuPathRenderers::kStencilAndCover;
42 } else if (!strcmp(name, "ccpr")) {
43 return GpuPathRenderers::kCoverageCounting;
44 } else if (!strcmp(name, "aahairline")) {
45 return GpuPathRenderers::kAAHairline;
46 } else if (!strcmp(name, "aaconvex")) {
47 return GpuPathRenderers::kAAConvex;
48 } else if (!strcmp(name, "aalinearizing")) {
49 return GpuPathRenderers::kAALinearizing;
50 } else if (!strcmp(name, "small")) {
51 return GpuPathRenderers::kSmall;
52 } else if (!strcmp(name, "tess")) {
53 return GpuPathRenderers::kTessellating;
54 } else if (!strcmp(name, "all")) {
55 return GpuPathRenderers::kAll;
56 }
57 SK_ABORT(SkStringPrintf("error: unknown named path renderer \"%s\"\n", name).c_str());
58 }
59
collect_gpu_path_renderers_from_flags()60 static GpuPathRenderers collect_gpu_path_renderers_from_flags() {
61 if (FLAGS_pr.isEmpty()) {
62 return GpuPathRenderers::kAll;
63 }
64
65 GpuPathRenderers gpuPathRenderers = ('~' == FLAGS_pr[0][0])
66 ? GpuPathRenderers::kAll
67 : GpuPathRenderers::kNone;
68
69 for (int i = 0; i < FLAGS_pr.count(); ++i) {
70 const char* name = FLAGS_pr[i];
71 if (name[0] == '~') {
72 gpuPathRenderers &= ~get_named_pathrenderers_flags(&name[1]);
73 } else {
74 gpuPathRenderers |= get_named_pathrenderers_flags(name);
75 }
76 }
77 return gpuPathRenderers;
78 }
79
SetCtxOptionsFromCommonFlags(GrContextOptions * ctxOptions)80 void SetCtxOptionsFromCommonFlags(GrContextOptions* ctxOptions) {
81 static std::unique_ptr<SkExecutor> gGpuExecutor = (0 != FLAGS_gpuThreads)
82 ? SkExecutor::MakeFIFOThreadPool(FLAGS_gpuThreads)
83 : nullptr;
84
85 ctxOptions->fExecutor = gGpuExecutor.get();
86 ctxOptions->fDisableCoverageCountingPaths = !FLAGS_cc;
87 ctxOptions->fAllowPathMaskCaching = FLAGS_cachePathMasks;
88 ctxOptions->fSuppressGeometryShaders = FLAGS_noGS;
89 ctxOptions->fGpuPathRenderers = collect_gpu_path_renderers_from_flags();
90 ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;
91
92 if (FLAGS_reduceOpListSplitting) {
93 SkASSERT(!FLAGS_dontReduceOpListSplitting);
94 ctxOptions->fReduceOpListSplitting = GrContextOptions::Enable::kYes;
95 } else if (FLAGS_dontReduceOpListSplitting) {
96 ctxOptions->fReduceOpListSplitting = GrContextOptions::Enable::kNo;
97 }
98 }
99