1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/sksl/DSLCore.h"
9 #include "src/core/SkSafeMath.h"
10 #include "src/sksl/SkSLAnalysis.h"
11 #include "src/sksl/SkSLCompiler.h"
12 #include "src/sksl/SkSLContext.h"
13 #include "src/sksl/SkSLIntrinsicMap.h"
14 #include "src/sksl/SkSLProgramSettings.h"
15 #include "src/sksl/SkSLThreadContext.h"
16 #include "src/sksl/ir/SkSLFieldAccess.h"
17 #include "src/sksl/ir/SkSLFunctionCall.h"
18 #include "src/sksl/ir/SkSLFunctionDefinition.h"
19 #include "src/sksl/ir/SkSLInterfaceBlock.h"
20 #include "src/sksl/ir/SkSLReturnStatement.h"
21 #include "src/sksl/transform/SkSLProgramWriter.h"
22
23 #include <forward_list>
24
25 namespace SkSL {
26
append_rtadjust_fixup_to_vertex_main(const Context & context,const FunctionDeclaration & decl,Block & body)27 static void append_rtadjust_fixup_to_vertex_main(const Context& context,
28 const FunctionDeclaration& decl, Block& body) {
29 using namespace SkSL::dsl;
30 using SkSL::dsl::Swizzle; // disambiguate from SkSL::Swizzle
31 using OwnerKind = SkSL::FieldAccess::OwnerKind;
32
33 // If this program uses RTAdjust...
34 ThreadContext::RTAdjustData& rtAdjust = ThreadContext::RTAdjustState();
35 if (rtAdjust.fVar || rtAdjust.fInterfaceBlock) {
36 // ...append a line to the end of the function body which fixes up sk_Position.
37 const Variable* skPerVertex = nullptr;
38 if (const ProgramElement* perVertexDecl =
39 context.fIntrinsics->find(Compiler::PERVERTEX_NAME)) {
40 SkASSERT(perVertexDecl->is<SkSL::InterfaceBlock>());
41 skPerVertex = &perVertexDecl->as<SkSL::InterfaceBlock>().variable();
42 }
43
44 SkASSERT(skPerVertex);
45 auto Ref = [](const Variable* var) -> std::unique_ptr<Expression> {
46 return VariableReference::Make(/*line=*/-1, var);
47 };
48 auto Field = [&](const Variable* var, int idx) -> std::unique_ptr<Expression> {
49 return FieldAccess::Make(context, Ref(var), idx, OwnerKind::kAnonymousInterfaceBlock);
50 };
51 auto Pos = [&]() -> DSLExpression {
52 return DSLExpression(FieldAccess::Make(context, Ref(skPerVertex), /*fieldIndex=*/0,
53 OwnerKind::kAnonymousInterfaceBlock));
54 };
55 auto Adjust = [&]() -> DSLExpression {
56 return DSLExpression(rtAdjust.fInterfaceBlock
57 ? Field(rtAdjust.fInterfaceBlock, rtAdjust.fFieldIndex)
58 : Ref(rtAdjust.fVar));
59 };
60
61 auto fixupStmt = DSLStatement(
62 Pos() = Float4(Swizzle(Pos(), X, Y) * Swizzle(Adjust(), X, Z) +
63 Swizzle(Pos(), W, W) * Swizzle(Adjust(), Y, W),
64 0,
65 Pos().w())
66 );
67
68 body.children().push_back(fixupStmt.release());
69 }
70 }
71
Convert(const Context & context,int line,const FunctionDeclaration & function,std::unique_ptr<Statement> body,bool builtin)72 std::unique_ptr<FunctionDefinition> FunctionDefinition::Convert(const Context& context,
73 int line,
74 const FunctionDeclaration& function,
75 std::unique_ptr<Statement> body,
76 bool builtin) {
77 class Finalizer : public ProgramWriter {
78 public:
79 Finalizer(const Context& context, const FunctionDeclaration& function,
80 IntrinsicSet* referencedIntrinsics)
81 : fContext(context)
82 , fFunction(function)
83 , fReferencedIntrinsics(referencedIntrinsics) {}
84
85 ~Finalizer() override {
86 SkASSERT(fBreakableLevel == 0);
87 SkASSERT(fContinuableLevel == std::forward_list<int>{0});
88 }
89
90 void copyIntrinsicIfNeeded(const FunctionDeclaration& function) {
91 if (const ProgramElement* found =
92 fContext.fIntrinsics->findAndInclude(function.description())) {
93 const FunctionDefinition& original = found->as<FunctionDefinition>();
94
95 // Sort the referenced intrinsics into a consistent order; otherwise our output will
96 // become non-deterministic.
97 std::vector<const FunctionDeclaration*> intrinsics(
98 original.referencedIntrinsics().begin(),
99 original.referencedIntrinsics().end());
100 std::sort(intrinsics.begin(), intrinsics.end(),
101 [](const FunctionDeclaration* a, const FunctionDeclaration* b) {
102 if (a->isBuiltin() != b->isBuiltin()) {
103 return a->isBuiltin() < b->isBuiltin();
104 }
105 if (a->fLine != b->fLine) {
106 return a->fLine < b->fLine;
107 }
108 if (a->name() != b->name()) {
109 return a->name() < b->name();
110 }
111 return a->description() < b->description();
112 });
113 for (const FunctionDeclaration* f : intrinsics) {
114 this->copyIntrinsicIfNeeded(*f);
115 }
116
117 ThreadContext::SharedElements().push_back(found);
118 }
119 }
120
121 bool functionReturnsValue() const {
122 return !fFunction.returnType().isVoid();
123 }
124
125 bool visitExpression(Expression& expr) override {
126 if (expr.is<FunctionCall>()) {
127 const FunctionDeclaration& func = expr.as<FunctionCall>().function();
128 if (func.isBuiltin()) {
129 if (func.intrinsicKind() == k_dFdy_IntrinsicKind) {
130 ThreadContext::Inputs().fUseFlipRTUniform = true;
131 }
132 if (func.definition()) {
133 fReferencedIntrinsics->insert(&func);
134 }
135 if (!fContext.fConfig->fIsBuiltinCode && fContext.fIntrinsics) {
136 this->copyIntrinsicIfNeeded(func);
137 }
138 }
139
140 }
141 return INHERITED::visitExpression(expr);
142 }
143
144 bool visitStatement(Statement& stmt) override {
145 switch (stmt.kind()) {
146 case Statement::Kind::kVarDeclaration: {
147 // We count the number of slots used, but don't consider the precision of the
148 // base type. In practice, this reflects what GPUs really do pretty well.
149 // (i.e., RelaxedPrecision math doesn't mean your variable takes less space.)
150 // We also don't attempt to reclaim slots at the end of a Block.
151 size_t prevSlotsUsed = fSlotsUsed;
152 fSlotsUsed = SkSafeMath::Add(
153 fSlotsUsed, stmt.as<VarDeclaration>().var().type().slotCount());
154 // To avoid overzealous error reporting, only trigger the error at the first
155 // place where the stack limit is exceeded.
156 if (prevSlotsUsed < kVariableSlotLimit && fSlotsUsed >= kVariableSlotLimit) {
157 fContext.fErrors->error(stmt.fLine, "variable '" +
158 stmt.as<VarDeclaration>().var().name() +
159 "' exceeds the stack size limit");
160 }
161 break;
162 }
163 case Statement::Kind::kReturn: {
164 // Early returns from a vertex main() function will bypass sk_Position
165 // normalization, so SkASSERT that we aren't doing that. If this becomes an
166 // issue, we can add normalization before each return statement.
167 if (fContext.fConfig->fKind == ProgramKind::kVertex && fFunction.isMain()) {
168 fContext.fErrors->error(
169 stmt.fLine,
170 "early returns from vertex programs are not supported");
171 }
172
173 // Verify that the return statement matches the function's return type.
174 ReturnStatement& returnStmt = stmt.as<ReturnStatement>();
175 if (returnStmt.expression()) {
176 if (this->functionReturnsValue()) {
177 // Coerce return expression to the function's return type.
178 returnStmt.setExpression(fFunction.returnType().coerceExpression(
179 std::move(returnStmt.expression()), fContext));
180 } else {
181 // Returning something from a function with a void return type.
182 returnStmt.setExpression(nullptr);
183 fContext.fErrors->error(returnStmt.fLine,
184 "may not return a value from a void function");
185 }
186 } else {
187 if (this->functionReturnsValue()) {
188 // Returning nothing from a function with a non-void return type.
189 fContext.fErrors->error(returnStmt.fLine,
190 "expected function to return '" +
191 fFunction.returnType().displayName() + "'");
192 }
193 }
194 break;
195 }
196 case Statement::Kind::kDo:
197 case Statement::Kind::kFor: {
198 ++fBreakableLevel;
199 ++fContinuableLevel.front();
200 bool result = INHERITED::visitStatement(stmt);
201 --fContinuableLevel.front();
202 --fBreakableLevel;
203 return result;
204 }
205 case Statement::Kind::kSwitch: {
206 ++fBreakableLevel;
207 fContinuableLevel.push_front(0);
208 bool result = INHERITED::visitStatement(stmt);
209 fContinuableLevel.pop_front();
210 --fBreakableLevel;
211 return result;
212 }
213 case Statement::Kind::kBreak:
214 if (fBreakableLevel == 0) {
215 fContext.fErrors->error(stmt.fLine,
216 "break statement must be inside a loop or switch");
217 }
218 break;
219 case Statement::Kind::kContinue:
220 if (fContinuableLevel.front() == 0) {
221 if (std::any_of(fContinuableLevel.begin(),
222 fContinuableLevel.end(),
223 [](int level) { return level > 0; })) {
224 fContext.fErrors->error(stmt.fLine,
225 "continue statement cannot be used in a switch");
226 } else {
227 fContext.fErrors->error(stmt.fLine,
228 "continue statement must be inside a loop");
229 }
230 }
231 break;
232 default:
233 break;
234 }
235 return INHERITED::visitStatement(stmt);
236 }
237
238 private:
239 const Context& fContext;
240 const FunctionDeclaration& fFunction;
241 // which intrinsics have we encountered in this function
242 IntrinsicSet* fReferencedIntrinsics;
243 // how deeply nested we are in breakable constructs (for, do, switch).
244 int fBreakableLevel = 0;
245 // number of slots consumed by all variables declared in the function
246 size_t fSlotsUsed = 0;
247 // how deeply nested we are in continuable constructs (for, do).
248 // We keep a stack (via a forward_list) in order to disallow continue inside of switch.
249 std::forward_list<int> fContinuableLevel{0};
250
251 using INHERITED = ProgramWriter;
252 };
253
254 IntrinsicSet referencedIntrinsics;
255 Finalizer(context, function, &referencedIntrinsics).visitStatement(*body);
256 if (function.isMain() && context.fConfig->fKind == ProgramKind::kVertex) {
257 append_rtadjust_fixup_to_vertex_main(context, function, body->as<Block>());
258 }
259
260 if (Analysis::CanExitWithoutReturningValue(function, *body)) {
261 context.fErrors->error(function.fLine, "function '" + function.name() +
262 "' can exit without returning a value");
263 }
264
265 return std::make_unique<FunctionDefinition>(line, &function, builtin, std::move(body),
266 std::move(referencedIntrinsics));
267 }
268
269 } // namespace SkSL
270