1 //
2 // Copyright (C) 2017-2018 Google, Inc.
3 // Copyright (C) 2017 LunarG, Inc.
4 //
5 // All rights reserved.
6 //
7 // Redistribution and use in source and binary forms, with or without
8 // modification, are permitted provided that the following conditions
9 // are met:
10 //
11 // Redistributions of source code must retain the above copyright
12 // notice, this list of conditions and the following disclaimer.
13 //
14 // Redistributions in binary form must reproduce the above
15 // copyright notice, this list of conditions and the following
16 // disclaimer in the documentation and/or other materials provided
17 // with the distribution.
18 //
19 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
20 // contributors may be used to endorse or promote products derived
21 // from this software without specific prior written permission.
22 //
23 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 // POSSIBILITY OF SUCH DAMAGE.
35 //
36
37 #include "hlslParseHelper.h"
38 #include "hlslScanContext.h"
39 #include "hlslGrammar.h"
40 #include "hlslAttributes.h"
41
42 #include "../Include/Common.h"
43 #include "../MachineIndependent/Scan.h"
44 #include "../MachineIndependent/preprocessor/PpContext.h"
45
46 #include "../OSDependent/osinclude.h"
47
48 #include <algorithm>
49 #include <functional>
50 #include <cctype>
51 #include <array>
52 #include <set>
53
54 namespace glslang {
55
HlslParseContext(TSymbolTable & symbolTable,TIntermediate & interm,bool parsingBuiltins,int version,EProfile profile,const SpvVersion & spvVersion,EShLanguage language,TInfoSink & infoSink,const TString sourceEntryPointName,bool forwardCompatible,EShMessages messages)56 HlslParseContext::HlslParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins,
57 int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
58 TInfoSink& infoSink,
59 const TString sourceEntryPointName,
60 bool forwardCompatible, EShMessages messages) :
61 TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language, infoSink,
62 forwardCompatible, messages, &sourceEntryPointName),
63 annotationNestingLevel(0),
64 inputPatch(nullptr),
65 nextInLocation(0), nextOutLocation(0),
66 entryPointFunction(nullptr),
67 entryPointFunctionBody(nullptr),
68 gsStreamOutput(nullptr),
69 clipDistanceOutput(nullptr),
70 cullDistanceOutput(nullptr),
71 clipDistanceInput(nullptr),
72 cullDistanceInput(nullptr),
73 parsingEntrypointParameters(false)
74 {
75 globalUniformDefaults.clear();
76 globalUniformDefaults.layoutMatrix = ElmRowMajor;
77 globalUniformDefaults.layoutPacking = ElpStd140;
78
79 globalBufferDefaults.clear();
80 globalBufferDefaults.layoutMatrix = ElmRowMajor;
81 globalBufferDefaults.layoutPacking = ElpStd430;
82
83 globalInputDefaults.clear();
84 globalOutputDefaults.clear();
85
86 clipSemanticNSizeIn.fill(0);
87 cullSemanticNSizeIn.fill(0);
88 clipSemanticNSizeOut.fill(0);
89 cullSemanticNSizeOut.fill(0);
90
91 // "Shaders in the transform
92 // feedback capturing mode have an initial global default of
93 // layout(xfb_buffer = 0) out;"
94 if (language == EShLangVertex ||
95 language == EShLangTessControl ||
96 language == EShLangTessEvaluation ||
97 language == EShLangGeometry)
98 globalOutputDefaults.layoutXfbBuffer = 0;
99
100 if (language == EShLangGeometry)
101 globalOutputDefaults.layoutStream = 0;
102 }
103
~HlslParseContext()104 HlslParseContext::~HlslParseContext()
105 {
106 }
107
initializeExtensionBehavior()108 void HlslParseContext::initializeExtensionBehavior()
109 {
110 TParseContextBase::initializeExtensionBehavior();
111
112 // HLSL allows #line by default.
113 extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhEnable;
114 }
115
setLimits(const TBuiltInResource & r)116 void HlslParseContext::setLimits(const TBuiltInResource& r)
117 {
118 resources = r;
119 intermediate.setLimits(resources);
120 }
121
122 //
123 // Parse an array of strings using the parser in HlslRules.
124 //
125 // Returns true for successful acceptance of the shader, false if any errors.
126 //
parseShaderStrings(TPpContext & ppContext,TInputScanner & input,bool versionWillBeError)127 bool HlslParseContext::parseShaderStrings(TPpContext& ppContext, TInputScanner& input, bool versionWillBeError)
128 {
129 currentScanner = &input;
130 ppContext.setInput(input, versionWillBeError);
131
132 HlslScanContext scanContext(*this, ppContext);
133 HlslGrammar grammar(scanContext, *this);
134 if (!grammar.parse()) {
135 // Print a message formated such that if you click on the message it will take you right to
136 // the line through most UIs.
137 const glslang::TSourceLoc& sourceLoc = input.getSourceLoc();
138 infoSink.info << sourceLoc.getFilenameStr() << "(" << sourceLoc.line << "): error at column " << sourceLoc.column
139 << ", HLSL parsing failed.\n";
140 ++numErrors;
141 return false;
142 }
143
144 finish();
145
146 return numErrors == 0;
147 }
148
149 //
150 // Return true if this l-value node should be converted in some manner.
151 // For instance: turning a load aggregate into a store in an l-value.
152 //
shouldConvertLValue(const TIntermNode * node) const153 bool HlslParseContext::shouldConvertLValue(const TIntermNode* node) const
154 {
155 if (node == nullptr || node->getAsTyped() == nullptr)
156 return false;
157
158 const TIntermAggregate* lhsAsAggregate = node->getAsAggregate();
159 const TIntermBinary* lhsAsBinary = node->getAsBinaryNode();
160
161 // If it's a swizzled/indexed aggregate, look at the left node instead.
162 if (lhsAsBinary != nullptr &&
163 (lhsAsBinary->getOp() == EOpVectorSwizzle || lhsAsBinary->getOp() == EOpIndexDirect))
164 lhsAsAggregate = lhsAsBinary->getLeft()->getAsAggregate();
165 if (lhsAsAggregate != nullptr && lhsAsAggregate->getOp() == EOpImageLoad)
166 return true;
167
168 return false;
169 }
170
growGlobalUniformBlock(const TSourceLoc & loc,TType & memberType,const TString & memberName,TTypeList * newTypeList)171 void HlslParseContext::growGlobalUniformBlock(const TSourceLoc& loc, TType& memberType, const TString& memberName,
172 TTypeList* newTypeList)
173 {
174 newTypeList = nullptr;
175 correctUniform(memberType.getQualifier());
176 if (memberType.isStruct()) {
177 auto it = ioTypeMap.find(memberType.getStruct());
178 if (it != ioTypeMap.end() && it->second.uniform)
179 newTypeList = it->second.uniform;
180 }
181 TParseContextBase::growGlobalUniformBlock(loc, memberType, memberName, newTypeList);
182 }
183
184 //
185 // Return a TLayoutFormat corresponding to the given texture type.
186 //
getLayoutFromTxType(const TSourceLoc & loc,const TType & txType)187 TLayoutFormat HlslParseContext::getLayoutFromTxType(const TSourceLoc& loc, const TType& txType)
188 {
189 if (txType.isStruct()) {
190 // TODO: implement.
191 error(loc, "unimplemented: structure type in image or buffer", "", "");
192 return ElfNone;
193 }
194
195 const int components = txType.getVectorSize();
196 const TBasicType txBasicType = txType.getBasicType();
197
198 const auto selectFormat = [this,&components](TLayoutFormat v1, TLayoutFormat v2, TLayoutFormat v4) -> TLayoutFormat {
199 if (intermediate.getNoStorageFormat())
200 return ElfNone;
201
202 return components == 1 ? v1 :
203 components == 2 ? v2 : v4;
204 };
205
206 switch (txBasicType) {
207 case EbtFloat: return selectFormat(ElfR32f, ElfRg32f, ElfRgba32f);
208 case EbtInt: return selectFormat(ElfR32i, ElfRg32i, ElfRgba32i);
209 case EbtUint: return selectFormat(ElfR32ui, ElfRg32ui, ElfRgba32ui);
210 default:
211 error(loc, "unknown basic type in image format", "", "");
212 return ElfNone;
213 }
214 }
215
216 //
217 // Both test and if necessary, spit out an error, to see if the node is really
218 // an l-value that can be operated on this way.
219 //
220 // Returns true if there was an error.
221 //
lValueErrorCheck(const TSourceLoc & loc,const char * op,TIntermTyped * node)222 bool HlslParseContext::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
223 {
224 if (shouldConvertLValue(node)) {
225 // if we're writing to a texture, it must be an RW form.
226
227 TIntermAggregate* lhsAsAggregate = node->getAsAggregate();
228 TIntermTyped* object = lhsAsAggregate->getSequence()[0]->getAsTyped();
229
230 if (!object->getType().getSampler().isImage()) {
231 error(loc, "operator[] on a non-RW texture must be an r-value", "", "");
232 return true;
233 }
234 }
235
236 // We tolerate samplers as l-values, even though they are nominally
237 // illegal, because we expect a later optimization to eliminate them.
238 if (node->getType().getBasicType() == EbtSampler) {
239 intermediate.setNeedsLegalization();
240 return false;
241 }
242
243 // Let the base class check errors
244 return TParseContextBase::lValueErrorCheck(loc, op, node);
245 }
246
247 //
248 // This function handles l-value conversions and verifications. It uses, but is not synonymous
249 // with lValueErrorCheck. That function accepts an l-value directly, while this one must be
250 // given the surrounding tree - e.g, with an assignment, so we can convert the assign into a
251 // series of other image operations.
252 //
253 // Most things are passed through unmodified, except for error checking.
254 //
handleLvalue(const TSourceLoc & loc,const char * op,TIntermTyped * & node)255 TIntermTyped* HlslParseContext::handleLvalue(const TSourceLoc& loc, const char* op, TIntermTyped*& node)
256 {
257 if (node == nullptr)
258 return nullptr;
259
260 TIntermBinary* nodeAsBinary = node->getAsBinaryNode();
261 TIntermUnary* nodeAsUnary = node->getAsUnaryNode();
262 TIntermAggregate* sequence = nullptr;
263
264 TIntermTyped* lhs = nodeAsUnary ? nodeAsUnary->getOperand() :
265 nodeAsBinary ? nodeAsBinary->getLeft() :
266 nullptr;
267
268 // Early bail out if there is no conversion to apply
269 if (!shouldConvertLValue(lhs)) {
270 if (lhs != nullptr)
271 if (lValueErrorCheck(loc, op, lhs))
272 return nullptr;
273 return node;
274 }
275
276 // *** If we get here, we're going to apply some conversion to an l-value.
277
278 // Helper to create a load.
279 const auto makeLoad = [&](TIntermSymbol* rhsTmp, TIntermTyped* object, TIntermTyped* coord, const TType& derefType) {
280 TIntermAggregate* loadOp = new TIntermAggregate(EOpImageLoad);
281 loadOp->setLoc(loc);
282 loadOp->getSequence().push_back(object);
283 loadOp->getSequence().push_back(intermediate.addSymbol(*coord->getAsSymbolNode()));
284 loadOp->setType(derefType);
285
286 sequence = intermediate.growAggregate(sequence,
287 intermediate.addAssign(EOpAssign, rhsTmp, loadOp, loc),
288 loc);
289 };
290
291 // Helper to create a store.
292 const auto makeStore = [&](TIntermTyped* object, TIntermTyped* coord, TIntermSymbol* rhsTmp) {
293 TIntermAggregate* storeOp = new TIntermAggregate(EOpImageStore);
294 storeOp->getSequence().push_back(object);
295 storeOp->getSequence().push_back(coord);
296 storeOp->getSequence().push_back(intermediate.addSymbol(*rhsTmp));
297 storeOp->setLoc(loc);
298 storeOp->setType(TType(EbtVoid));
299
300 sequence = intermediate.growAggregate(sequence, storeOp);
301 };
302
303 // Helper to create an assign.
304 const auto makeBinary = [&](TOperator op, TIntermTyped* lhs, TIntermTyped* rhs) {
305 sequence = intermediate.growAggregate(sequence,
306 intermediate.addBinaryNode(op, lhs, rhs, loc, lhs->getType()),
307 loc);
308 };
309
310 // Helper to complete sequence by adding trailing variable, so we evaluate to the right value.
311 const auto finishSequence = [&](TIntermSymbol* rhsTmp, const TType& derefType) -> TIntermAggregate* {
312 // Add a trailing use of the temp, so the sequence returns the proper value.
313 sequence = intermediate.growAggregate(sequence, intermediate.addSymbol(*rhsTmp));
314 sequence->setOperator(EOpSequence);
315 sequence->setLoc(loc);
316 sequence->setType(derefType);
317
318 return sequence;
319 };
320
321 // Helper to add unary op
322 const auto makeUnary = [&](TOperator op, TIntermSymbol* rhsTmp) {
323 sequence = intermediate.growAggregate(sequence,
324 intermediate.addUnaryNode(op, intermediate.addSymbol(*rhsTmp), loc,
325 rhsTmp->getType()),
326 loc);
327 };
328
329 // Return true if swizzle or index writes all components of the given variable.
330 const auto writesAllComponents = [&](TIntermSymbol* var, TIntermBinary* swizzle) -> bool {
331 if (swizzle == nullptr) // not a swizzle or index
332 return true;
333
334 // Track which components are being set.
335 std::array<bool, 4> compIsSet;
336 compIsSet.fill(false);
337
338 const TIntermConstantUnion* asConst = swizzle->getRight()->getAsConstantUnion();
339 const TIntermAggregate* asAggregate = swizzle->getRight()->getAsAggregate();
340
341 // This could be either a direct index, or a swizzle.
342 if (asConst) {
343 compIsSet[asConst->getConstArray()[0].getIConst()] = true;
344 } else if (asAggregate) {
345 const TIntermSequence& seq = asAggregate->getSequence();
346 for (int comp=0; comp<int(seq.size()); ++comp)
347 compIsSet[seq[comp]->getAsConstantUnion()->getConstArray()[0].getIConst()] = true;
348 } else {
349 assert(0);
350 }
351
352 // Return true if all components are being set by the index or swizzle
353 return std::all_of(compIsSet.begin(), compIsSet.begin() + var->getType().getVectorSize(),
354 [](bool isSet) { return isSet; } );
355 };
356
357 // Create swizzle matching input swizzle
358 const auto addSwizzle = [&](TIntermSymbol* var, TIntermBinary* swizzle) -> TIntermTyped* {
359 if (swizzle)
360 return intermediate.addBinaryNode(swizzle->getOp(), var, swizzle->getRight(), loc, swizzle->getType());
361 else
362 return var;
363 };
364
365 TIntermBinary* lhsAsBinary = lhs->getAsBinaryNode();
366 TIntermAggregate* lhsAsAggregate = lhs->getAsAggregate();
367 bool lhsIsSwizzle = false;
368
369 // If it's a swizzled L-value, remember the swizzle, and use the LHS.
370 if (lhsAsBinary != nullptr && (lhsAsBinary->getOp() == EOpVectorSwizzle || lhsAsBinary->getOp() == EOpIndexDirect)) {
371 lhsAsAggregate = lhsAsBinary->getLeft()->getAsAggregate();
372 lhsIsSwizzle = true;
373 }
374
375 TIntermTyped* object = lhsAsAggregate->getSequence()[0]->getAsTyped();
376 TIntermTyped* coord = lhsAsAggregate->getSequence()[1]->getAsTyped();
377
378 const TSampler& texSampler = object->getType().getSampler();
379
380 TType objDerefType;
381 getTextureReturnType(texSampler, objDerefType);
382
383 if (nodeAsBinary) {
384 TIntermTyped* rhs = nodeAsBinary->getRight();
385 const TOperator assignOp = nodeAsBinary->getOp();
386
387 bool isModifyOp = false;
388
389 switch (assignOp) {
390 case EOpAddAssign:
391 case EOpSubAssign:
392 case EOpMulAssign:
393 case EOpVectorTimesMatrixAssign:
394 case EOpVectorTimesScalarAssign:
395 case EOpMatrixTimesScalarAssign:
396 case EOpMatrixTimesMatrixAssign:
397 case EOpDivAssign:
398 case EOpModAssign:
399 case EOpAndAssign:
400 case EOpInclusiveOrAssign:
401 case EOpExclusiveOrAssign:
402 case EOpLeftShiftAssign:
403 case EOpRightShiftAssign:
404 isModifyOp = true;
405 // fall through...
406 case EOpAssign:
407 {
408 // Since this is an lvalue, we'll convert an image load to a sequence like this
409 // (to still provide the value):
410 // OpSequence
411 // OpImageStore(object, lhs, rhs)
412 // rhs
413 // But if it's not a simple symbol RHS (say, a fn call), we don't want to duplicate the RHS,
414 // so we'll convert instead to this:
415 // OpSequence
416 // rhsTmp = rhs
417 // OpImageStore(object, coord, rhsTmp)
418 // rhsTmp
419 // If this is a read-modify-write op, like +=, we issue:
420 // OpSequence
421 // coordtmp = load's param1
422 // rhsTmp = OpImageLoad(object, coordTmp)
423 // rhsTmp op= rhs
424 // OpImageStore(object, coordTmp, rhsTmp)
425 // rhsTmp
426 //
427 // If the lvalue is swizzled, we apply that when writing the temp variable, like so:
428 // ...
429 // rhsTmp.some_swizzle = ...
430 // For partial writes, an error is generated.
431
432 TIntermSymbol* rhsTmp = rhs->getAsSymbolNode();
433 TIntermTyped* coordTmp = coord;
434
435 if (rhsTmp == nullptr || isModifyOp || lhsIsSwizzle) {
436 rhsTmp = makeInternalVariableNode(loc, "storeTemp", objDerefType);
437
438 // Partial updates not yet supported
439 if (!writesAllComponents(rhsTmp, lhsAsBinary)) {
440 error(loc, "unimplemented: partial image updates", "", "");
441 }
442
443 // Assign storeTemp = rhs
444 if (isModifyOp) {
445 // We have to make a temp var for the coordinate, to avoid evaluating it twice.
446 coordTmp = makeInternalVariableNode(loc, "coordTemp", coord->getType());
447 makeBinary(EOpAssign, coordTmp, coord); // coordtmp = load[param1]
448 makeLoad(rhsTmp, object, coordTmp, objDerefType); // rhsTmp = OpImageLoad(object, coordTmp)
449 }
450
451 // rhsTmp op= rhs.
452 makeBinary(assignOp, addSwizzle(intermediate.addSymbol(*rhsTmp), lhsAsBinary), rhs);
453 }
454
455 makeStore(object, coordTmp, rhsTmp); // add a store
456 return finishSequence(rhsTmp, objDerefType); // return rhsTmp from sequence
457 }
458
459 default:
460 break;
461 }
462 }
463
464 if (nodeAsUnary) {
465 const TOperator assignOp = nodeAsUnary->getOp();
466
467 switch (assignOp) {
468 case EOpPreIncrement:
469 case EOpPreDecrement:
470 {
471 // We turn this into:
472 // OpSequence
473 // coordtmp = load's param1
474 // rhsTmp = OpImageLoad(object, coordTmp)
475 // rhsTmp op
476 // OpImageStore(object, coordTmp, rhsTmp)
477 // rhsTmp
478
479 TIntermSymbol* rhsTmp = makeInternalVariableNode(loc, "storeTemp", objDerefType);
480 TIntermTyped* coordTmp = makeInternalVariableNode(loc, "coordTemp", coord->getType());
481
482 makeBinary(EOpAssign, coordTmp, coord); // coordtmp = load[param1]
483 makeLoad(rhsTmp, object, coordTmp, objDerefType); // rhsTmp = OpImageLoad(object, coordTmp)
484 makeUnary(assignOp, rhsTmp); // op rhsTmp
485 makeStore(object, coordTmp, rhsTmp); // OpImageStore(object, coordTmp, rhsTmp)
486 return finishSequence(rhsTmp, objDerefType); // return rhsTmp from sequence
487 }
488
489 case EOpPostIncrement:
490 case EOpPostDecrement:
491 {
492 // We turn this into:
493 // OpSequence
494 // coordtmp = load's param1
495 // rhsTmp1 = OpImageLoad(object, coordTmp)
496 // rhsTmp2 = rhsTmp1
497 // rhsTmp2 op
498 // OpImageStore(object, coordTmp, rhsTmp2)
499 // rhsTmp1 (pre-op value)
500 TIntermSymbol* rhsTmp1 = makeInternalVariableNode(loc, "storeTempPre", objDerefType);
501 TIntermSymbol* rhsTmp2 = makeInternalVariableNode(loc, "storeTempPost", objDerefType);
502 TIntermTyped* coordTmp = makeInternalVariableNode(loc, "coordTemp", coord->getType());
503
504 makeBinary(EOpAssign, coordTmp, coord); // coordtmp = load[param1]
505 makeLoad(rhsTmp1, object, coordTmp, objDerefType); // rhsTmp1 = OpImageLoad(object, coordTmp)
506 makeBinary(EOpAssign, rhsTmp2, rhsTmp1); // rhsTmp2 = rhsTmp1
507 makeUnary(assignOp, rhsTmp2); // rhsTmp op
508 makeStore(object, coordTmp, rhsTmp2); // OpImageStore(object, coordTmp, rhsTmp2)
509 return finishSequence(rhsTmp1, objDerefType); // return rhsTmp from sequence
510 }
511
512 default:
513 break;
514 }
515 }
516
517 if (lhs)
518 if (lValueErrorCheck(loc, op, lhs))
519 return nullptr;
520
521 return node;
522 }
523
handlePragma(const TSourceLoc & loc,const TVector<TString> & tokens)524 void HlslParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>& tokens)
525 {
526 if (pragmaCallback)
527 pragmaCallback(loc.line, tokens);
528
529 if (tokens.size() == 0)
530 return;
531
532 // These pragmas are case insensitive in HLSL, so we'll compare in lower case.
533 TVector<TString> lowerTokens = tokens;
534
535 for (auto it = lowerTokens.begin(); it != lowerTokens.end(); ++it)
536 std::transform(it->begin(), it->end(), it->begin(), ::tolower);
537
538 // Handle pack_matrix
539 if (tokens.size() == 4 && lowerTokens[0] == "pack_matrix" && tokens[1] == "(" && tokens[3] == ")") {
540 // Note that HLSL semantic order is Mrc, not Mcr like SPIR-V, so we reverse the sense.
541 // Row major becomes column major and vice versa.
542
543 if (lowerTokens[2] == "row_major") {
544 globalUniformDefaults.layoutMatrix = globalBufferDefaults.layoutMatrix = ElmColumnMajor;
545 } else if (lowerTokens[2] == "column_major") {
546 globalUniformDefaults.layoutMatrix = globalBufferDefaults.layoutMatrix = ElmRowMajor;
547 } else {
548 // unknown majorness strings are treated as (HLSL column major)==(SPIR-V row major)
549 warn(loc, "unknown pack_matrix pragma value", tokens[2].c_str(), "");
550 globalUniformDefaults.layoutMatrix = globalBufferDefaults.layoutMatrix = ElmRowMajor;
551 }
552 return;
553 }
554
555 // Handle once
556 if (lowerTokens[0] == "once") {
557 warn(loc, "not implemented", "#pragma once", "");
558 return;
559 }
560 }
561
562 //
563 // Look at a '.' matrix selector string and change it into components
564 // for a matrix. There are two types:
565 //
566 // _21 second row, first column (one based)
567 // _m21 third row, second column (zero based)
568 //
569 // Returns true if there is no error.
570 //
parseMatrixSwizzleSelector(const TSourceLoc & loc,const TString & fields,int cols,int rows,TSwizzleSelectors<TMatrixSelector> & components)571 bool HlslParseContext::parseMatrixSwizzleSelector(const TSourceLoc& loc, const TString& fields, int cols, int rows,
572 TSwizzleSelectors<TMatrixSelector>& components)
573 {
574 int startPos[MaxSwizzleSelectors];
575 int numComps = 0;
576 TString compString = fields;
577
578 // Find where each component starts,
579 // recording the first character position after the '_'.
580 for (size_t c = 0; c < compString.size(); ++c) {
581 if (compString[c] == '_') {
582 if (numComps >= MaxSwizzleSelectors) {
583 error(loc, "matrix component swizzle has too many components", compString.c_str(), "");
584 return false;
585 }
586 if (c > compString.size() - 3 ||
587 ((compString[c+1] == 'm' || compString[c+1] == 'M') && c > compString.size() - 4)) {
588 error(loc, "matrix component swizzle missing", compString.c_str(), "");
589 return false;
590 }
591 startPos[numComps++] = (int)c + 1;
592 }
593 }
594
595 // Process each component
596 for (int i = 0; i < numComps; ++i) {
597 int pos = startPos[i];
598 int bias = -1;
599 if (compString[pos] == 'm' || compString[pos] == 'M') {
600 bias = 0;
601 ++pos;
602 }
603 TMatrixSelector comp;
604 comp.coord1 = compString[pos+0] - '0' + bias;
605 comp.coord2 = compString[pos+1] - '0' + bias;
606 if (comp.coord1 < 0 || comp.coord1 >= cols) {
607 error(loc, "matrix row component out of range", compString.c_str(), "");
608 return false;
609 }
610 if (comp.coord2 < 0 || comp.coord2 >= rows) {
611 error(loc, "matrix column component out of range", compString.c_str(), "");
612 return false;
613 }
614 components.push_back(comp);
615 }
616
617 return true;
618 }
619
620 // If the 'comps' express a column of a matrix,
621 // return the column. Column means the first coords all match.
622 //
623 // Otherwise, return -1.
624 //
getMatrixComponentsColumn(int rows,const TSwizzleSelectors<TMatrixSelector> & selector)625 int HlslParseContext::getMatrixComponentsColumn(int rows, const TSwizzleSelectors<TMatrixSelector>& selector)
626 {
627 int col = -1;
628
629 // right number of comps?
630 if (selector.size() != rows)
631 return -1;
632
633 // all comps in the same column?
634 // rows in order?
635 col = selector[0].coord1;
636 for (int i = 0; i < rows; ++i) {
637 if (col != selector[i].coord1)
638 return -1;
639 if (i != selector[i].coord2)
640 return -1;
641 }
642
643 return col;
644 }
645
646 //
647 // Handle seeing a variable identifier in the grammar.
648 //
handleVariable(const TSourceLoc & loc,const TString * string)649 TIntermTyped* HlslParseContext::handleVariable(const TSourceLoc& loc, const TString* string)
650 {
651 int thisDepth;
652 TSymbol* symbol = symbolTable.find(*string, thisDepth);
653 if (symbol && symbol->getAsVariable() && symbol->getAsVariable()->isUserType()) {
654 error(loc, "expected symbol, not user-defined type", string->c_str(), "");
655 return nullptr;
656 }
657
658 const TVariable* variable = nullptr;
659 const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr;
660 TIntermTyped* node = nullptr;
661 if (anon) {
662 // It was a member of an anonymous container, which could be a 'this' structure.
663
664 // Create a subtree for its dereference.
665 if (thisDepth > 0) {
666 variable = getImplicitThis(thisDepth);
667 if (variable == nullptr)
668 error(loc, "cannot access member variables (static member function?)", "this", "");
669 }
670 if (variable == nullptr)
671 variable = anon->getAnonContainer().getAsVariable();
672
673 TIntermTyped* container = intermediate.addSymbol(*variable, loc);
674 TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc);
675 node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc);
676
677 node->setType(*(*variable->getType().getStruct())[anon->getMemberNumber()].type);
678 if (node->getType().hiddenMember())
679 error(loc, "member of nameless block was not redeclared", string->c_str(), "");
680 } else {
681 // Not a member of an anonymous container.
682
683 // The symbol table search was done in the lexical phase.
684 // See if it was a variable.
685 variable = symbol ? symbol->getAsVariable() : nullptr;
686 if (variable) {
687 if ((variable->getType().getBasicType() == EbtBlock ||
688 variable->getType().getBasicType() == EbtStruct) && variable->getType().getStruct() == nullptr) {
689 error(loc, "cannot be used (maybe an instance name is needed)", string->c_str(), "");
690 variable = nullptr;
691 }
692 } else {
693 if (symbol)
694 error(loc, "variable name expected", string->c_str(), "");
695 }
696
697 // Recovery, if it wasn't found or was not a variable.
698 if (variable == nullptr) {
699 error(loc, "unknown variable", string->c_str(), "");
700 variable = new TVariable(string, TType(EbtVoid));
701 }
702
703 if (variable->getType().getQualifier().isFrontEndConstant())
704 node = intermediate.addConstantUnion(variable->getConstArray(), variable->getType(), loc);
705 else
706 node = intermediate.addSymbol(*variable, loc);
707 }
708
709 if (variable->getType().getQualifier().isIo())
710 intermediate.addIoAccessed(*string);
711
712 return node;
713 }
714
715 //
716 // Handle operator[] on any objects it applies to. Currently:
717 // Textures
718 // Buffers
719 //
handleBracketOperator(const TSourceLoc & loc,TIntermTyped * base,TIntermTyped * index)720 TIntermTyped* HlslParseContext::handleBracketOperator(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index)
721 {
722 // handle r-value operator[] on textures and images. l-values will be processed later.
723 if (base->getType().getBasicType() == EbtSampler && !base->isArray()) {
724 const TSampler& sampler = base->getType().getSampler();
725 if (sampler.isImage() || sampler.isTexture()) {
726 if (! mipsOperatorMipArg.empty() && mipsOperatorMipArg.back().mipLevel == nullptr) {
727 // The first operator[] to a .mips[] sequence is the mip level. We'll remember it.
728 mipsOperatorMipArg.back().mipLevel = index;
729 return base; // next [] index is to the same base.
730 } else {
731 TIntermAggregate* load = new TIntermAggregate(sampler.isImage() ? EOpImageLoad : EOpTextureFetch);
732
733 TType sampReturnType;
734 getTextureReturnType(sampler, sampReturnType);
735
736 load->setType(sampReturnType);
737 load->setLoc(loc);
738 load->getSequence().push_back(base);
739 load->getSequence().push_back(index);
740
741 // Textures need a MIP. If we saw one go by, use it. Otherwise, use zero.
742 if (sampler.isTexture()) {
743 if (! mipsOperatorMipArg.empty()) {
744 load->getSequence().push_back(mipsOperatorMipArg.back().mipLevel);
745 mipsOperatorMipArg.pop_back();
746 } else {
747 load->getSequence().push_back(intermediate.addConstantUnion(0, loc, true));
748 }
749 }
750
751 return load;
752 }
753 }
754 }
755
756 // Handle operator[] on structured buffers: this indexes into the array element of the buffer.
757 // indexStructBufferContent returns nullptr if it isn't a structuredbuffer (SSBO).
758 TIntermTyped* sbArray = indexStructBufferContent(loc, base);
759 if (sbArray != nullptr) {
760 // Now we'll apply the [] index to that array
761 const TOperator idxOp = (index->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
762
763 TIntermTyped* element = intermediate.addIndex(idxOp, sbArray, index, loc);
764 const TType derefType(sbArray->getType(), 0);
765 element->setType(derefType);
766 return element;
767 }
768
769 return nullptr;
770 }
771
772 //
773 // Cast index value to a uint if it isn't already (for operator[], load indexes, etc)
makeIntegerIndex(TIntermTyped * index)774 TIntermTyped* HlslParseContext::makeIntegerIndex(TIntermTyped* index)
775 {
776 const TBasicType indexBasicType = index->getType().getBasicType();
777 const int vecSize = index->getType().getVectorSize();
778
779 // We can use int types directly as the index
780 if (indexBasicType == EbtInt || indexBasicType == EbtUint ||
781 indexBasicType == EbtInt64 || indexBasicType == EbtUint64)
782 return index;
783
784 // Cast index to unsigned integer if it isn't one.
785 return intermediate.addConversion(EOpConstructUint, TType(EbtUint, EvqTemporary, vecSize), index);
786 }
787
788 //
789 // Handle seeing a base[index] dereference in the grammar.
790 //
handleBracketDereference(const TSourceLoc & loc,TIntermTyped * base,TIntermTyped * index)791 TIntermTyped* HlslParseContext::handleBracketDereference(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index)
792 {
793 index = makeIntegerIndex(index);
794
795 if (index == nullptr) {
796 error(loc, " unknown index type ", "", "");
797 return nullptr;
798 }
799
800 TIntermTyped* result = handleBracketOperator(loc, base, index);
801
802 if (result != nullptr)
803 return result; // it was handled as an operator[]
804
805 bool flattened = false;
806 int indexValue = 0;
807 if (index->getQualifier().isFrontEndConstant())
808 indexValue = index->getAsConstantUnion()->getConstArray()[0].getIConst();
809
810 variableCheck(base);
811 if (! base->isArray() && ! base->isMatrix() && ! base->isVector()) {
812 if (base->getAsSymbolNode())
813 error(loc, " left of '[' is not of type array, matrix, or vector ",
814 base->getAsSymbolNode()->getName().c_str(), "");
815 else
816 error(loc, " left of '[' is not of type array, matrix, or vector ", "expression", "");
817 } else if (base->getType().getQualifier().isFrontEndConstant() &&
818 index->getQualifier().isFrontEndConstant()) {
819 // both base and index are front-end constants
820 checkIndex(loc, base->getType(), indexValue);
821 return intermediate.foldDereference(base, indexValue, loc);
822 } else {
823 // at least one of base and index is variable...
824
825 if (index->getQualifier().isFrontEndConstant())
826 checkIndex(loc, base->getType(), indexValue);
827
828 if (base->getType().isScalarOrVec1())
829 result = base;
830 else if (base->getAsSymbolNode() && wasFlattened(base)) {
831 if (index->getQualifier().storage != EvqConst)
832 error(loc, "Invalid variable index to flattened array", base->getAsSymbolNode()->getName().c_str(), "");
833
834 result = flattenAccess(base, indexValue);
835 flattened = (result != base);
836 } else {
837 if (index->getQualifier().isFrontEndConstant()) {
838 if (base->getType().isUnsizedArray())
839 base->getWritableType().updateImplicitArraySize(indexValue + 1);
840 else
841 checkIndex(loc, base->getType(), indexValue);
842 result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
843 } else
844 result = intermediate.addIndex(EOpIndexIndirect, base, index, loc);
845 }
846 }
847
848 if (result == nullptr) {
849 // Insert dummy error-recovery result
850 result = intermediate.addConstantUnion(0.0, EbtFloat, loc);
851 } else {
852 // If the array reference was flattened, it has the correct type. E.g, if it was
853 // a uniform array, it was flattened INTO a set of scalar uniforms, not scalar temps.
854 // In that case, we preserve the qualifiers.
855 if (!flattened) {
856 // Insert valid dereferenced result
857 TType newType(base->getType(), 0); // dereferenced type
858 if (base->getType().getQualifier().storage == EvqConst && index->getQualifier().storage == EvqConst)
859 newType.getQualifier().storage = EvqConst;
860 else
861 newType.getQualifier().storage = EvqTemporary;
862 result->setType(newType);
863 }
864 }
865
866 return result;
867 }
868
869 // Handle seeing a binary node with a math operation.
handleBinaryMath(const TSourceLoc & loc,const char * str,TOperator op,TIntermTyped * left,TIntermTyped * right)870 TIntermTyped* HlslParseContext::handleBinaryMath(const TSourceLoc& loc, const char* str, TOperator op,
871 TIntermTyped* left, TIntermTyped* right)
872 {
873 TIntermTyped* result = intermediate.addBinaryMath(op, left, right, loc);
874 if (result == nullptr)
875 binaryOpError(loc, str, left->getCompleteString(), right->getCompleteString());
876
877 return result;
878 }
879
880 // Handle seeing a unary node with a math operation.
handleUnaryMath(const TSourceLoc & loc,const char * str,TOperator op,TIntermTyped * childNode)881 TIntermTyped* HlslParseContext::handleUnaryMath(const TSourceLoc& loc, const char* str, TOperator op,
882 TIntermTyped* childNode)
883 {
884 TIntermTyped* result = intermediate.addUnaryMath(op, childNode, loc);
885
886 if (result)
887 return result;
888 else
889 unaryOpError(loc, str, childNode->getCompleteString());
890
891 return childNode;
892 }
893 //
894 // Return true if the name is a struct buffer method
895 //
isStructBufferMethod(const TString & name) const896 bool HlslParseContext::isStructBufferMethod(const TString& name) const
897 {
898 return
899 name == "GetDimensions" ||
900 name == "Load" ||
901 name == "Load2" ||
902 name == "Load3" ||
903 name == "Load4" ||
904 name == "Store" ||
905 name == "Store2" ||
906 name == "Store3" ||
907 name == "Store4" ||
908 name == "InterlockedAdd" ||
909 name == "InterlockedAnd" ||
910 name == "InterlockedCompareExchange" ||
911 name == "InterlockedCompareStore" ||
912 name == "InterlockedExchange" ||
913 name == "InterlockedMax" ||
914 name == "InterlockedMin" ||
915 name == "InterlockedOr" ||
916 name == "InterlockedXor" ||
917 name == "IncrementCounter" ||
918 name == "DecrementCounter" ||
919 name == "Append" ||
920 name == "Consume";
921 }
922
923 //
924 // Handle seeing a base.field dereference in the grammar, where 'field' is a
925 // swizzle or member variable.
926 //
handleDotDereference(const TSourceLoc & loc,TIntermTyped * base,const TString & field)927 TIntermTyped* HlslParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field)
928 {
929 variableCheck(base);
930
931 if (base->isArray()) {
932 error(loc, "cannot apply to an array:", ".", field.c_str());
933 return base;
934 }
935
936 TIntermTyped* result = base;
937
938 if (base->getType().getBasicType() == EbtSampler) {
939 // Handle .mips[mipid][pos] operation on textures
940 const TSampler& sampler = base->getType().getSampler();
941 if (sampler.isTexture() && field == "mips") {
942 // Push a null to signify that we expect a mip level under operator[] next.
943 mipsOperatorMipArg.push_back(tMipsOperatorData(loc, nullptr));
944 // Keep 'result' pointing to 'base', since we expect an operator[] to go by next.
945 } else {
946 if (field == "mips")
947 error(loc, "unexpected texture type for .mips[][] operator:",
948 base->getType().getCompleteString().c_str(), "");
949 else
950 error(loc, "unexpected operator on texture type:", field.c_str(),
951 base->getType().getCompleteString().c_str());
952 }
953 } else if (base->isVector() || base->isScalar()) {
954 TSwizzleSelectors<TVectorSelector> selectors;
955 parseSwizzleSelector(loc, field, base->getVectorSize(), selectors);
956
957 if (base->isScalar()) {
958 if (selectors.size() == 1)
959 return result;
960 else {
961 TType type(base->getBasicType(), EvqTemporary, selectors.size());
962 return addConstructor(loc, base, type);
963 }
964 }
965 if (base->getVectorSize() == 1) {
966 TType scalarType(base->getBasicType(), EvqTemporary, 1);
967 if (selectors.size() == 1)
968 return addConstructor(loc, base, scalarType);
969 else {
970 TType vectorType(base->getBasicType(), EvqTemporary, selectors.size());
971 return addConstructor(loc, addConstructor(loc, base, scalarType), vectorType);
972 }
973 }
974
975 if (base->getType().getQualifier().isFrontEndConstant())
976 result = intermediate.foldSwizzle(base, selectors, loc);
977 else {
978 if (selectors.size() == 1) {
979 TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc);
980 result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
981 result->setType(TType(base->getBasicType(), EvqTemporary));
982 } else {
983 TIntermTyped* index = intermediate.addSwizzle(selectors, loc);
984 result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc);
985 result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision,
986 selectors.size()));
987 }
988 }
989 } else if (base->isMatrix()) {
990 TSwizzleSelectors<TMatrixSelector> selectors;
991 if (! parseMatrixSwizzleSelector(loc, field, base->getMatrixCols(), base->getMatrixRows(), selectors))
992 return result;
993
994 if (selectors.size() == 1) {
995 // Representable by m[c][r]
996 if (base->getType().getQualifier().isFrontEndConstant()) {
997 result = intermediate.foldDereference(base, selectors[0].coord1, loc);
998 result = intermediate.foldDereference(result, selectors[0].coord2, loc);
999 } else {
1000 result = intermediate.addIndex(EOpIndexDirect, base,
1001 intermediate.addConstantUnion(selectors[0].coord1, loc),
1002 loc);
1003 TType dereferencedCol(base->getType(), 0);
1004 result->setType(dereferencedCol);
1005 result = intermediate.addIndex(EOpIndexDirect, result,
1006 intermediate.addConstantUnion(selectors[0].coord2, loc),
1007 loc);
1008 TType dereferenced(dereferencedCol, 0);
1009 result->setType(dereferenced);
1010 }
1011 } else {
1012 int column = getMatrixComponentsColumn(base->getMatrixRows(), selectors);
1013 if (column >= 0) {
1014 // Representable by m[c]
1015 if (base->getType().getQualifier().isFrontEndConstant())
1016 result = intermediate.foldDereference(base, column, loc);
1017 else {
1018 result = intermediate.addIndex(EOpIndexDirect, base, intermediate.addConstantUnion(column, loc),
1019 loc);
1020 TType dereferenced(base->getType(), 0);
1021 result->setType(dereferenced);
1022 }
1023 } else {
1024 // general case, not a column, not a single component
1025 TIntermTyped* index = intermediate.addSwizzle(selectors, loc);
1026 result = intermediate.addIndex(EOpMatrixSwizzle, base, index, loc);
1027 result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision,
1028 selectors.size()));
1029 }
1030 }
1031 } else if (base->getBasicType() == EbtStruct || base->getBasicType() == EbtBlock) {
1032 const TTypeList* fields = base->getType().getStruct();
1033 bool fieldFound = false;
1034 int member;
1035 for (member = 0; member < (int)fields->size(); ++member) {
1036 if ((*fields)[member].type->getFieldName() == field) {
1037 fieldFound = true;
1038 break;
1039 }
1040 }
1041 if (fieldFound) {
1042 if (base->getAsSymbolNode() && wasFlattened(base)) {
1043 result = flattenAccess(base, member);
1044 } else {
1045 if (base->getType().getQualifier().storage == EvqConst)
1046 result = intermediate.foldDereference(base, member, loc);
1047 else {
1048 TIntermTyped* index = intermediate.addConstantUnion(member, loc);
1049 result = intermediate.addIndex(EOpIndexDirectStruct, base, index, loc);
1050 result->setType(*(*fields)[member].type);
1051 }
1052 }
1053 } else
1054 error(loc, "no such field in structure", field.c_str(), "");
1055 } else
1056 error(loc, "does not apply to this type:", field.c_str(), base->getType().getCompleteString().c_str());
1057
1058 return result;
1059 }
1060
1061 //
1062 // Return true if the field should be treated as a built-in method.
1063 // Return false otherwise.
1064 //
isBuiltInMethod(const TSourceLoc &,TIntermTyped * base,const TString & field)1065 bool HlslParseContext::isBuiltInMethod(const TSourceLoc&, TIntermTyped* base, const TString& field)
1066 {
1067 if (base == nullptr)
1068 return false;
1069
1070 variableCheck(base);
1071
1072 if (base->getType().getBasicType() == EbtSampler) {
1073 return true;
1074 } else if (isStructBufferType(base->getType()) && isStructBufferMethod(field)) {
1075 return true;
1076 } else if (field == "Append" ||
1077 field == "RestartStrip") {
1078 // We cannot check the type here: it may be sanitized if we're not compiling a geometry shader, but
1079 // the code is around in the shader source.
1080 return true;
1081 } else
1082 return false;
1083 }
1084
1085 // Independently establish a built-in that is a member of a structure.
1086 // 'arraySizes' are what's desired for the independent built-in, whatever
1087 // the higher-level source/expression of them was.
splitBuiltIn(const TString & baseName,const TType & memberType,const TArraySizes * arraySizes,const TQualifier & outerQualifier)1088 void HlslParseContext::splitBuiltIn(const TString& baseName, const TType& memberType, const TArraySizes* arraySizes,
1089 const TQualifier& outerQualifier)
1090 {
1091 // Because of arrays of structs, we might be asked more than once,
1092 // but the arraySizes passed in should have captured the whole thing
1093 // the first time.
1094 // However, clip/cull rely on multiple updates.
1095 if (!isClipOrCullDistance(memberType))
1096 if (splitBuiltIns.find(tInterstageIoData(memberType.getQualifier().builtIn, outerQualifier.storage)) !=
1097 splitBuiltIns.end())
1098 return;
1099
1100 TVariable* ioVar = makeInternalVariable(baseName + "." + memberType.getFieldName(), memberType);
1101
1102 if (arraySizes != nullptr && !memberType.isArray())
1103 ioVar->getWritableType().copyArraySizes(*arraySizes);
1104
1105 splitBuiltIns[tInterstageIoData(memberType.getQualifier().builtIn, outerQualifier.storage)] = ioVar;
1106 if (!isClipOrCullDistance(ioVar->getType()))
1107 trackLinkage(*ioVar);
1108
1109 // Merge qualifier from the user structure
1110 mergeQualifiers(ioVar->getWritableType().getQualifier(), outerQualifier);
1111
1112 // Fix the builtin type if needed (e.g, some types require fixed array sizes, no matter how the
1113 // shader declared them). This is done after mergeQualifiers(), in case fixBuiltInIoType looks
1114 // at the qualifier to determine e.g, in or out qualifications.
1115 fixBuiltInIoType(ioVar->getWritableType());
1116
1117 // But, not location, we're losing that
1118 ioVar->getWritableType().getQualifier().layoutLocation = TQualifier::layoutLocationEnd;
1119 }
1120
1121 // Split a type into
1122 // 1. a struct of non-I/O members
1123 // 2. a collection of independent I/O variables
split(const TVariable & variable)1124 void HlslParseContext::split(const TVariable& variable)
1125 {
1126 // Create a new variable:
1127 const TType& clonedType = *variable.getType().clone();
1128 const TType& splitType = split(clonedType, variable.getName(), clonedType.getQualifier());
1129 splitNonIoVars[variable.getUniqueId()] = makeInternalVariable(variable.getName(), splitType);
1130 }
1131
1132 // Recursive implementation of split().
1133 // Returns reference to the modified type.
split(const TType & type,const TString & name,const TQualifier & outerQualifier)1134 const TType& HlslParseContext::split(const TType& type, const TString& name, const TQualifier& outerQualifier)
1135 {
1136 if (type.isStruct()) {
1137 TTypeList* userStructure = type.getWritableStruct();
1138 for (auto ioType = userStructure->begin(); ioType != userStructure->end(); ) {
1139 if (ioType->type->isBuiltIn()) {
1140 // move out the built-in
1141 splitBuiltIn(name, *ioType->type, type.getArraySizes(), outerQualifier);
1142 ioType = userStructure->erase(ioType);
1143 } else {
1144 split(*ioType->type, name + "." + ioType->type->getFieldName(), outerQualifier);
1145 ++ioType;
1146 }
1147 }
1148 }
1149
1150 return type;
1151 }
1152
1153 // Is this an aggregate that should be flattened?
1154 // Can be applied to intermediate levels of type in a hierarchy.
1155 // Some things like flattening uniform arrays are only about the top level
1156 // of the aggregate, triggered on 'topLevel'.
shouldFlatten(const TType & type,TStorageQualifier qualifier,bool topLevel) const1157 bool HlslParseContext::shouldFlatten(const TType& type, TStorageQualifier qualifier, bool topLevel) const
1158 {
1159 switch (qualifier) {
1160 case EvqVaryingIn:
1161 case EvqVaryingOut:
1162 return type.isStruct() || type.isArray();
1163 case EvqUniform:
1164 return (type.isArray() && intermediate.getFlattenUniformArrays() && topLevel) ||
1165 (type.isStruct() && type.containsOpaque());
1166 default:
1167 return false;
1168 };
1169 }
1170
1171 // Top level variable flattening: construct data
flatten(const TVariable & variable,bool linkage,bool arrayed)1172 void HlslParseContext::flatten(const TVariable& variable, bool linkage, bool arrayed)
1173 {
1174 const TType& type = variable.getType();
1175
1176 // If it's a standalone built-in, there is nothing to flatten
1177 if (type.isBuiltIn() && !type.isStruct())
1178 return;
1179
1180
1181 auto entry = flattenMap.insert(std::make_pair(variable.getUniqueId(),
1182 TFlattenData(type.getQualifier().layoutBinding,
1183 type.getQualifier().layoutLocation)));
1184
1185 if (type.isStruct() && type.getStruct()->size()==0)
1186 return;
1187 // if flattening arrayed io struct, array each member of dereferenced type
1188 if (arrayed) {
1189 const TType dereferencedType(type, 0);
1190 flatten(variable, dereferencedType, entry.first->second, variable.getName(), linkage,
1191 type.getQualifier(), type.getArraySizes());
1192 } else {
1193 flatten(variable, type, entry.first->second, variable.getName(), linkage,
1194 type.getQualifier(), nullptr);
1195 }
1196 }
1197
1198 // Recursively flatten the given variable at the provided type, building the flattenData as we go.
1199 //
1200 // This is mutually recursive with flattenStruct and flattenArray.
1201 // We are going to flatten an arbitrarily nested composite structure into a linear sequence of
1202 // members, and later on, we want to turn a path through the tree structure into a final
1203 // location in this linear sequence.
1204 //
1205 // If the tree was N-ary, that can be directly calculated. However, we are dealing with
1206 // arbitrary numbers - perhaps a struct of 7 members containing an array of 3. Thus, we must
1207 // build a data structure to allow the sequence of bracket and dot operators on arrays and
1208 // structs to arrive at the proper member.
1209 //
1210 // To avoid storing a tree with pointers, we are going to flatten the tree into a vector of integers.
1211 // The leaves are the indexes into the flattened member array.
1212 // Each level will have the next location for the Nth item stored sequentially, so for instance:
1213 //
1214 // struct { float2 a[2]; int b; float4 c[3] };
1215 //
1216 // This will produce the following flattened tree:
1217 // Pos: 0 1 2 3 4 5 6 7 8 9 10 11 12 13
1218 // (3, 7, 8, 5, 6, 0, 1, 2, 11, 12, 13, 3, 4, 5}
1219 //
1220 // Given a reference to mystruct.c[1], the access chain is (2,1), so we traverse:
1221 // (0+2) = 8 --> (8+1) = 12 --> 12 = 4
1222 //
1223 // so the 4th flattened member in traversal order is ours.
1224 //
flatten(const TVariable & variable,const TType & type,TFlattenData & flattenData,TString name,bool linkage,const TQualifier & outerQualifier,const TArraySizes * builtInArraySizes)1225 int HlslParseContext::flatten(const TVariable& variable, const TType& type,
1226 TFlattenData& flattenData, TString name, bool linkage,
1227 const TQualifier& outerQualifier,
1228 const TArraySizes* builtInArraySizes)
1229 {
1230 // If something is an arrayed struct, the array flattener will recursively call flatten()
1231 // to then flatten the struct, so this is an "if else": we don't do both.
1232 if (type.isArray())
1233 return flattenArray(variable, type, flattenData, name, linkage, outerQualifier);
1234 else if (type.isStruct())
1235 return flattenStruct(variable, type, flattenData, name, linkage, outerQualifier, builtInArraySizes);
1236 else {
1237 assert(0); // should never happen
1238 return -1;
1239 }
1240 }
1241
1242 // Add a single flattened member to the flattened data being tracked for the composite
1243 // Returns true for the final flattening level.
addFlattenedMember(const TVariable & variable,const TType & type,TFlattenData & flattenData,const TString & memberName,bool linkage,const TQualifier & outerQualifier,const TArraySizes * builtInArraySizes)1244 int HlslParseContext::addFlattenedMember(const TVariable& variable, const TType& type, TFlattenData& flattenData,
1245 const TString& memberName, bool linkage,
1246 const TQualifier& outerQualifier,
1247 const TArraySizes* builtInArraySizes)
1248 {
1249 if (!shouldFlatten(type, outerQualifier.storage, false)) {
1250 // This is as far as we flatten. Insert the variable.
1251 TVariable* memberVariable = makeInternalVariable(memberName, type);
1252 mergeQualifiers(memberVariable->getWritableType().getQualifier(), variable.getType().getQualifier());
1253
1254 if (flattenData.nextBinding != TQualifier::layoutBindingEnd)
1255 memberVariable->getWritableType().getQualifier().layoutBinding = flattenData.nextBinding++;
1256
1257 if (memberVariable->getType().isBuiltIn()) {
1258 // inherited locations are nonsensical for built-ins (TODO: what if semantic had a number)
1259 memberVariable->getWritableType().getQualifier().layoutLocation = TQualifier::layoutLocationEnd;
1260 } else {
1261 // inherited locations must be auto bumped, not replicated
1262 if (flattenData.nextLocation != TQualifier::layoutLocationEnd) {
1263 memberVariable->getWritableType().getQualifier().layoutLocation = flattenData.nextLocation;
1264 flattenData.nextLocation += intermediate.computeTypeLocationSize(memberVariable->getType(), language);
1265 nextOutLocation = std::max(nextOutLocation, flattenData.nextLocation);
1266 }
1267 }
1268
1269 // Only propagate arraysizes here for arrayed io
1270 if (variable.getType().getQualifier().isArrayedIo(language) && builtInArraySizes != nullptr)
1271 memberVariable->getWritableType().copyArraySizes(*builtInArraySizes);
1272
1273 flattenData.offsets.push_back(static_cast<int>(flattenData.members.size()));
1274 flattenData.members.push_back(memberVariable);
1275
1276 if (linkage)
1277 trackLinkage(*memberVariable);
1278
1279 return static_cast<int>(flattenData.offsets.size()) - 1; // location of the member reference
1280 } else {
1281 // Further recursion required
1282 return flatten(variable, type, flattenData, memberName, linkage, outerQualifier, builtInArraySizes);
1283 }
1284 }
1285
1286 // Figure out the mapping between an aggregate's top members and an
1287 // equivalent set of individual variables.
1288 //
1289 // Assumes shouldFlatten() or equivalent was called first.
flattenStruct(const TVariable & variable,const TType & type,TFlattenData & flattenData,TString name,bool linkage,const TQualifier & outerQualifier,const TArraySizes * builtInArraySizes)1290 int HlslParseContext::flattenStruct(const TVariable& variable, const TType& type,
1291 TFlattenData& flattenData, TString name, bool linkage,
1292 const TQualifier& outerQualifier,
1293 const TArraySizes* builtInArraySizes)
1294 {
1295 assert(type.isStruct());
1296
1297 auto members = *type.getStruct();
1298
1299 // Reserve space for this tree level.
1300 int start = static_cast<int>(flattenData.offsets.size());
1301 int pos = start;
1302 flattenData.offsets.resize(int(pos + members.size()), -1);
1303
1304 for (int member = 0; member < (int)members.size(); ++member) {
1305 TType& dereferencedType = *members[member].type;
1306 if (dereferencedType.isBuiltIn())
1307 splitBuiltIn(variable.getName(), dereferencedType, builtInArraySizes, outerQualifier);
1308 else {
1309 const int mpos = addFlattenedMember(variable, dereferencedType, flattenData,
1310 name + "." + dereferencedType.getFieldName(),
1311 linkage, outerQualifier,
1312 builtInArraySizes == nullptr && dereferencedType.isArray()
1313 ? dereferencedType.getArraySizes()
1314 : builtInArraySizes);
1315 flattenData.offsets[pos++] = mpos;
1316 }
1317 }
1318
1319 return start;
1320 }
1321
1322 // Figure out mapping between an array's members and an
1323 // equivalent set of individual variables.
1324 //
1325 // Assumes shouldFlatten() or equivalent was called first.
flattenArray(const TVariable & variable,const TType & type,TFlattenData & flattenData,TString name,bool linkage,const TQualifier & outerQualifier)1326 int HlslParseContext::flattenArray(const TVariable& variable, const TType& type,
1327 TFlattenData& flattenData, TString name, bool linkage,
1328 const TQualifier& outerQualifier)
1329 {
1330 assert(type.isSizedArray());
1331
1332 const int size = type.getOuterArraySize();
1333 const TType dereferencedType(type, 0);
1334
1335 if (name.empty())
1336 name = variable.getName();
1337
1338 // Reserve space for this tree level.
1339 int start = static_cast<int>(flattenData.offsets.size());
1340 int pos = start;
1341 flattenData.offsets.resize(int(pos + size), -1);
1342
1343 for (int element=0; element < size; ++element) {
1344 char elementNumBuf[20]; // sufficient for MAXINT
1345 snprintf(elementNumBuf, sizeof(elementNumBuf)-1, "[%d]", element);
1346 const int mpos = addFlattenedMember(variable, dereferencedType, flattenData,
1347 name + elementNumBuf, linkage, outerQualifier,
1348 type.getArraySizes());
1349
1350 flattenData.offsets[pos++] = mpos;
1351 }
1352
1353 return start;
1354 }
1355
1356 // Return true if we have flattened this node.
wasFlattened(const TIntermTyped * node) const1357 bool HlslParseContext::wasFlattened(const TIntermTyped* node) const
1358 {
1359 return node != nullptr && node->getAsSymbolNode() != nullptr &&
1360 wasFlattened(node->getAsSymbolNode()->getId());
1361 }
1362
1363 // Return true if we have split this structure
wasSplit(const TIntermTyped * node) const1364 bool HlslParseContext::wasSplit(const TIntermTyped* node) const
1365 {
1366 return node != nullptr && node->getAsSymbolNode() != nullptr &&
1367 wasSplit(node->getAsSymbolNode()->getId());
1368 }
1369
1370 // Turn an access into an aggregate that was flattened to instead be
1371 // an access to the individual variable the member was flattened to.
1372 // Assumes wasFlattened() or equivalent was called first.
flattenAccess(TIntermTyped * base,int member)1373 TIntermTyped* HlslParseContext::flattenAccess(TIntermTyped* base, int member)
1374 {
1375 const TType dereferencedType(base->getType(), member); // dereferenced type
1376 const TIntermSymbol& symbolNode = *base->getAsSymbolNode();
1377 TIntermTyped* flattened = flattenAccess(symbolNode.getId(), member, base->getQualifier().storage,
1378 dereferencedType, symbolNode.getFlattenSubset());
1379
1380 return flattened ? flattened : base;
1381 }
flattenAccess(long long uniqueId,int member,TStorageQualifier outerStorage,const TType & dereferencedType,int subset)1382 TIntermTyped* HlslParseContext::flattenAccess(long long uniqueId, int member, TStorageQualifier outerStorage,
1383 const TType& dereferencedType, int subset)
1384 {
1385 const auto flattenData = flattenMap.find(uniqueId);
1386
1387 if (flattenData == flattenMap.end())
1388 return nullptr;
1389
1390 // Calculate new cumulative offset from the packed tree
1391 int newSubset = flattenData->second.offsets[subset >= 0 ? subset + member : member];
1392
1393 TIntermSymbol* subsetSymbol;
1394 if (!shouldFlatten(dereferencedType, outerStorage, false)) {
1395 // Finished flattening: create symbol for variable
1396 member = flattenData->second.offsets[newSubset];
1397 const TVariable* memberVariable = flattenData->second.members[member];
1398 subsetSymbol = intermediate.addSymbol(*memberVariable);
1399 subsetSymbol->setFlattenSubset(-1);
1400 } else {
1401
1402 // If this is not the final flattening, accumulate the position and return
1403 // an object of the partially dereferenced type.
1404 subsetSymbol = new TIntermSymbol(uniqueId, "flattenShadow", dereferencedType);
1405 subsetSymbol->setFlattenSubset(newSubset);
1406 }
1407
1408 return subsetSymbol;
1409 }
1410
1411 // For finding where the first leaf is in a subtree of a multi-level aggregate
1412 // that is just getting a subset assigned. Follows the same logic as flattenAccess,
1413 // but logically going down the "left-most" tree branch each step of the way.
1414 //
1415 // Returns the offset into the first leaf of the subset.
findSubtreeOffset(const TIntermNode & node) const1416 int HlslParseContext::findSubtreeOffset(const TIntermNode& node) const
1417 {
1418 const TIntermSymbol* sym = node.getAsSymbolNode();
1419 if (sym == nullptr)
1420 return 0;
1421 if (!sym->isArray() && !sym->isStruct())
1422 return 0;
1423 int subset = sym->getFlattenSubset();
1424 if (subset == -1)
1425 return 0;
1426
1427 // Getting this far means a partial aggregate is identified by the flatten subset.
1428 // Find the first leaf of the subset.
1429
1430 const auto flattenData = flattenMap.find(sym->getId());
1431 if (flattenData == flattenMap.end())
1432 return 0;
1433
1434 return findSubtreeOffset(sym->getType(), subset, flattenData->second.offsets);
1435
1436 do {
1437 subset = flattenData->second.offsets[subset];
1438 } while (true);
1439 }
1440 // Recursively do the desent
findSubtreeOffset(const TType & type,int subset,const TVector<int> & offsets) const1441 int HlslParseContext::findSubtreeOffset(const TType& type, int subset, const TVector<int>& offsets) const
1442 {
1443 if (!type.isArray() && !type.isStruct())
1444 return offsets[subset];
1445 TType derefType(type, 0);
1446 return findSubtreeOffset(derefType, offsets[subset], offsets);
1447 };
1448
1449 // Find and return the split IO TVariable for id, or nullptr if none.
getSplitNonIoVar(long long id) const1450 TVariable* HlslParseContext::getSplitNonIoVar(long long id) const
1451 {
1452 const auto splitNonIoVar = splitNonIoVars.find(id);
1453 if (splitNonIoVar == splitNonIoVars.end())
1454 return nullptr;
1455
1456 return splitNonIoVar->second;
1457 }
1458
1459 // Pass through to base class after remembering built-in mappings.
trackLinkage(TSymbol & symbol)1460 void HlslParseContext::trackLinkage(TSymbol& symbol)
1461 {
1462 TBuiltInVariable biType = symbol.getType().getQualifier().builtIn;
1463
1464 if (biType != EbvNone)
1465 builtInTessLinkageSymbols[biType] = symbol.clone();
1466
1467 TParseContextBase::trackLinkage(symbol);
1468 }
1469
1470
1471 // Returns true if the built-in is a clip or cull distance variable.
isClipOrCullDistance(TBuiltInVariable builtIn)1472 bool HlslParseContext::isClipOrCullDistance(TBuiltInVariable builtIn)
1473 {
1474 return builtIn == EbvClipDistance || builtIn == EbvCullDistance;
1475 }
1476
1477 // Some types require fixed array sizes in SPIR-V, but can be scalars or
1478 // arrays of sizes SPIR-V doesn't allow. For example, tessellation factors.
1479 // This creates the right size. A conversion is performed when the internal
1480 // type is copied to or from the external type. This corrects the externally
1481 // facing input or output type to abide downstream semantics.
fixBuiltInIoType(TType & type)1482 void HlslParseContext::fixBuiltInIoType(TType& type)
1483 {
1484 int requiredArraySize = 0;
1485 int requiredVectorSize = 0;
1486
1487 switch (type.getQualifier().builtIn) {
1488 case EbvTessLevelOuter: requiredArraySize = 4; break;
1489 case EbvTessLevelInner: requiredArraySize = 2; break;
1490
1491 case EbvSampleMask:
1492 {
1493 // Promote scalar to array of size 1. Leave existing arrays alone.
1494 if (!type.isArray())
1495 requiredArraySize = 1;
1496 break;
1497 }
1498
1499 case EbvWorkGroupId: requiredVectorSize = 3; break;
1500 case EbvGlobalInvocationId: requiredVectorSize = 3; break;
1501 case EbvLocalInvocationId: requiredVectorSize = 3; break;
1502 case EbvTessCoord: requiredVectorSize = 3; break;
1503
1504 default:
1505 if (isClipOrCullDistance(type)) {
1506 const int loc = type.getQualifier().layoutLocation;
1507
1508 if (type.getQualifier().builtIn == EbvClipDistance) {
1509 if (type.getQualifier().storage == EvqVaryingIn)
1510 clipSemanticNSizeIn[loc] = type.getVectorSize();
1511 else
1512 clipSemanticNSizeOut[loc] = type.getVectorSize();
1513 } else {
1514 if (type.getQualifier().storage == EvqVaryingIn)
1515 cullSemanticNSizeIn[loc] = type.getVectorSize();
1516 else
1517 cullSemanticNSizeOut[loc] = type.getVectorSize();
1518 }
1519 }
1520
1521 return;
1522 }
1523
1524 // Alter or set vector size as needed.
1525 if (requiredVectorSize > 0) {
1526 TType newType(type.getBasicType(), type.getQualifier().storage, requiredVectorSize);
1527 newType.getQualifier() = type.getQualifier();
1528
1529 type.shallowCopy(newType);
1530 }
1531
1532 // Alter or set array size as needed.
1533 if (requiredArraySize > 0) {
1534 if (!type.isArray() || type.getOuterArraySize() != requiredArraySize) {
1535 TArraySizes* arraySizes = new TArraySizes;
1536 arraySizes->addInnerSize(requiredArraySize);
1537 type.transferArraySizes(arraySizes);
1538 }
1539 }
1540 }
1541
1542 // Variables that correspond to the user-interface in and out of a stage
1543 // (not the built-in interface) are
1544 // - assigned locations
1545 // - registered as a linkage node (part of the stage's external interface).
1546 // Assumes it is called in the order in which locations should be assigned.
assignToInterface(TVariable & variable)1547 void HlslParseContext::assignToInterface(TVariable& variable)
1548 {
1549 const auto assignLocation = [&](TVariable& variable) {
1550 TType& type = variable.getWritableType();
1551 if (!type.isStruct() || type.getStruct()->size() > 0) {
1552 TQualifier& qualifier = type.getQualifier();
1553 if (qualifier.storage == EvqVaryingIn || qualifier.storage == EvqVaryingOut) {
1554 if (qualifier.builtIn == EbvNone && !qualifier.hasLocation()) {
1555 // Strip off the outer array dimension for those having an extra one.
1556 int size;
1557 if (type.isArray() && qualifier.isArrayedIo(language)) {
1558 TType elementType(type, 0);
1559 size = intermediate.computeTypeLocationSize(elementType, language);
1560 } else
1561 size = intermediate.computeTypeLocationSize(type, language);
1562
1563 if (qualifier.storage == EvqVaryingIn) {
1564 variable.getWritableType().getQualifier().layoutLocation = nextInLocation;
1565 nextInLocation += size;
1566 } else {
1567 variable.getWritableType().getQualifier().layoutLocation = nextOutLocation;
1568 nextOutLocation += size;
1569 }
1570 }
1571 trackLinkage(variable);
1572 }
1573 }
1574 };
1575
1576 if (wasFlattened(variable.getUniqueId())) {
1577 auto& memberList = flattenMap[variable.getUniqueId()].members;
1578 for (auto member = memberList.begin(); member != memberList.end(); ++member)
1579 assignLocation(**member);
1580 } else if (wasSplit(variable.getUniqueId())) {
1581 TVariable* splitIoVar = getSplitNonIoVar(variable.getUniqueId());
1582 assignLocation(*splitIoVar);
1583 } else {
1584 assignLocation(variable);
1585 }
1586 }
1587
1588 //
1589 // Handle seeing a function declarator in the grammar. This is the precursor
1590 // to recognizing a function prototype or function definition.
1591 //
handleFunctionDeclarator(const TSourceLoc & loc,TFunction & function,bool prototype)1592 void HlslParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype)
1593 {
1594 //
1595 // Multiple declarations of the same function name are allowed.
1596 //
1597 // If this is a definition, the definition production code will check for redefinitions
1598 // (we don't know at this point if it's a definition or not).
1599 //
1600 bool builtIn;
1601 TSymbol* symbol = symbolTable.find(function.getMangledName(), &builtIn);
1602 const TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr;
1603
1604 if (prototype) {
1605 // All built-in functions are defined, even though they don't have a body.
1606 // Count their prototype as a definition instead.
1607 if (symbolTable.atBuiltInLevel())
1608 function.setDefined();
1609 else {
1610 if (prevDec && ! builtIn)
1611 symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const
1612 function.setPrototyped();
1613 }
1614 }
1615
1616 // This insert won't actually insert it if it's a duplicate signature, but it will still check for
1617 // other forms of name collisions.
1618 if (! symbolTable.insert(function))
1619 error(loc, "function name is redeclaration of existing name", function.getName().c_str(), "");
1620 }
1621
1622 // For struct buffers with counters, we must pass the counter buffer as hidden parameter.
1623 // This adds the hidden parameter to the parameter list in 'paramNodes' if needed.
1624 // Otherwise, it's a no-op
addStructBufferHiddenCounterParam(const TSourceLoc & loc,TParameter & param,TIntermAggregate * & paramNodes)1625 void HlslParseContext::addStructBufferHiddenCounterParam(const TSourceLoc& loc, TParameter& param,
1626 TIntermAggregate*& paramNodes)
1627 {
1628 if (! hasStructBuffCounter(*param.type))
1629 return;
1630
1631 const TString counterBlockName(intermediate.addCounterBufferName(*param.name));
1632
1633 TType counterType;
1634 counterBufferType(loc, counterType);
1635 TVariable *variable = makeInternalVariable(counterBlockName, counterType);
1636
1637 if (! symbolTable.insert(*variable))
1638 error(loc, "redefinition", variable->getName().c_str(), "");
1639
1640 paramNodes = intermediate.growAggregate(paramNodes,
1641 intermediate.addSymbol(*variable, loc),
1642 loc);
1643 }
1644
1645 //
1646 // Handle seeing the function prototype in front of a function definition in the grammar.
1647 // The body is handled after this function returns.
1648 //
1649 // Returns an aggregate of parameter-symbol nodes.
1650 //
handleFunctionDefinition(const TSourceLoc & loc,TFunction & function,const TAttributes & attributes,TIntermNode * & entryPointTree)1651 TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function,
1652 const TAttributes& attributes,
1653 TIntermNode*& entryPointTree)
1654 {
1655 currentCaller = function.getMangledName();
1656 TSymbol* symbol = symbolTable.find(function.getMangledName());
1657 TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr;
1658
1659 if (prevDec == nullptr)
1660 error(loc, "can't find function", function.getName().c_str(), "");
1661 // Note: 'prevDec' could be 'function' if this is the first time we've seen function
1662 // as it would have just been put in the symbol table. Otherwise, we're looking up
1663 // an earlier occurrence.
1664
1665 if (prevDec && prevDec->isDefined()) {
1666 // Then this function already has a body.
1667 error(loc, "function already has a body", function.getName().c_str(), "");
1668 }
1669 if (prevDec && ! prevDec->isDefined()) {
1670 prevDec->setDefined();
1671
1672 // Remember the return type for later checking for RETURN statements.
1673 currentFunctionType = &(prevDec->getType());
1674 } else
1675 currentFunctionType = new TType(EbtVoid);
1676 functionReturnsValue = false;
1677
1678 // Entry points need different I/O and other handling, transform it so the
1679 // rest of this function doesn't care.
1680 entryPointTree = transformEntryPoint(loc, function, attributes);
1681
1682 //
1683 // New symbol table scope for body of function plus its arguments
1684 //
1685 pushScope();
1686
1687 //
1688 // Insert parameters into the symbol table.
1689 // If the parameter has no name, it's not an error, just don't insert it
1690 // (could be used for unused args).
1691 //
1692 // Also, accumulate the list of parameters into the AST, so lower level code
1693 // knows where to find parameters.
1694 //
1695 TIntermAggregate* paramNodes = new TIntermAggregate;
1696 for (int i = 0; i < function.getParamCount(); i++) {
1697 TParameter& param = function[i];
1698 if (param.name != nullptr) {
1699 TVariable *variable = new TVariable(param.name, *param.type);
1700
1701 if (i == 0 && function.hasImplicitThis()) {
1702 // Anonymous 'this' members are already in a symbol-table level,
1703 // and we need to know what function parameter to map them to.
1704 symbolTable.makeInternalVariable(*variable);
1705 pushImplicitThis(variable);
1706 }
1707
1708 // Insert the parameters with name in the symbol table.
1709 if (! symbolTable.insert(*variable))
1710 error(loc, "redefinition", variable->getName().c_str(), "");
1711
1712 // Add parameters to the AST list.
1713 if (shouldFlatten(variable->getType(), variable->getType().getQualifier().storage, true)) {
1714 // Expand the AST parameter nodes (but not the name mangling or symbol table view)
1715 // for structures that need to be flattened.
1716 flatten(*variable, false);
1717 const TTypeList* structure = variable->getType().getStruct();
1718 for (int mem = 0; mem < (int)structure->size(); ++mem) {
1719 paramNodes = intermediate.growAggregate(paramNodes,
1720 flattenAccess(variable->getUniqueId(), mem,
1721 variable->getType().getQualifier().storage,
1722 *(*structure)[mem].type),
1723 loc);
1724 }
1725 } else {
1726 // Add the parameter to the AST
1727 paramNodes = intermediate.growAggregate(paramNodes,
1728 intermediate.addSymbol(*variable, loc),
1729 loc);
1730 }
1731
1732 // Add hidden AST parameter for struct buffer counters, if needed.
1733 addStructBufferHiddenCounterParam(loc, param, paramNodes);
1734 } else
1735 paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc);
1736 }
1737 if (function.hasIllegalImplicitThis())
1738 pushImplicitThis(nullptr);
1739
1740 intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc);
1741 loopNestingLevel = 0;
1742 controlFlowNestingLevel = 0;
1743 postEntryPointReturn = false;
1744
1745 return paramNodes;
1746 }
1747
1748 // Handle all [attrib] attribute for the shader entry point
handleEntryPointAttributes(const TSourceLoc & loc,const TAttributes & attributes)1749 void HlslParseContext::handleEntryPointAttributes(const TSourceLoc& loc, const TAttributes& attributes)
1750 {
1751 for (auto it = attributes.begin(); it != attributes.end(); ++it) {
1752 switch (it->name) {
1753 case EatNumThreads:
1754 {
1755 const TIntermSequence& sequence = it->args->getSequence();
1756 for (int lid = 0; lid < int(sequence.size()); ++lid)
1757 intermediate.setLocalSize(lid, sequence[lid]->getAsConstantUnion()->getConstArray()[0].getIConst());
1758 break;
1759 }
1760 case EatInstance:
1761 {
1762 int invocations;
1763
1764 if (!it->getInt(invocations)) {
1765 error(loc, "invalid instance", "", "");
1766 } else {
1767 if (!intermediate.setInvocations(invocations))
1768 error(loc, "cannot change previously set instance attribute", "", "");
1769 }
1770 break;
1771 }
1772 case EatMaxVertexCount:
1773 {
1774 int maxVertexCount;
1775
1776 if (! it->getInt(maxVertexCount)) {
1777 error(loc, "invalid maxvertexcount", "", "");
1778 } else {
1779 if (! intermediate.setVertices(maxVertexCount))
1780 error(loc, "cannot change previously set maxvertexcount attribute", "", "");
1781 }
1782 break;
1783 }
1784 case EatPatchConstantFunc:
1785 {
1786 TString pcfName;
1787 if (! it->getString(pcfName, 0, false)) {
1788 error(loc, "invalid patch constant function", "", "");
1789 } else {
1790 patchConstantFunctionName = pcfName;
1791 }
1792 break;
1793 }
1794 case EatDomain:
1795 {
1796 // Handle [domain("...")]
1797 TString domainStr;
1798 if (! it->getString(domainStr)) {
1799 error(loc, "invalid domain", "", "");
1800 } else {
1801 TLayoutGeometry domain = ElgNone;
1802
1803 if (domainStr == "tri") {
1804 domain = ElgTriangles;
1805 } else if (domainStr == "quad") {
1806 domain = ElgQuads;
1807 } else if (domainStr == "isoline") {
1808 domain = ElgIsolines;
1809 } else {
1810 error(loc, "unsupported domain type", domainStr.c_str(), "");
1811 }
1812
1813 if (language == EShLangTessEvaluation) {
1814 if (! intermediate.setInputPrimitive(domain))
1815 error(loc, "cannot change previously set domain", TQualifier::getGeometryString(domain), "");
1816 } else {
1817 if (! intermediate.setOutputPrimitive(domain))
1818 error(loc, "cannot change previously set domain", TQualifier::getGeometryString(domain), "");
1819 }
1820 }
1821 break;
1822 }
1823 case EatOutputTopology:
1824 {
1825 // Handle [outputtopology("...")]
1826 TString topologyStr;
1827 if (! it->getString(topologyStr)) {
1828 error(loc, "invalid outputtopology", "", "");
1829 } else {
1830 TVertexOrder vertexOrder = EvoNone;
1831 TLayoutGeometry primitive = ElgNone;
1832
1833 if (topologyStr == "point") {
1834 intermediate.setPointMode();
1835 } else if (topologyStr == "line") {
1836 primitive = ElgIsolines;
1837 } else if (topologyStr == "triangle_cw") {
1838 vertexOrder = EvoCw;
1839 primitive = ElgTriangles;
1840 } else if (topologyStr == "triangle_ccw") {
1841 vertexOrder = EvoCcw;
1842 primitive = ElgTriangles;
1843 } else {
1844 error(loc, "unsupported outputtopology type", topologyStr.c_str(), "");
1845 }
1846
1847 if (vertexOrder != EvoNone) {
1848 if (! intermediate.setVertexOrder(vertexOrder)) {
1849 error(loc, "cannot change previously set outputtopology",
1850 TQualifier::getVertexOrderString(vertexOrder), "");
1851 }
1852 }
1853 if (primitive != ElgNone)
1854 intermediate.setOutputPrimitive(primitive);
1855 }
1856 break;
1857 }
1858 case EatPartitioning:
1859 {
1860 // Handle [partitioning("...")]
1861 TString partitionStr;
1862 if (! it->getString(partitionStr)) {
1863 error(loc, "invalid partitioning", "", "");
1864 } else {
1865 TVertexSpacing partitioning = EvsNone;
1866
1867 if (partitionStr == "integer") {
1868 partitioning = EvsEqual;
1869 } else if (partitionStr == "fractional_even") {
1870 partitioning = EvsFractionalEven;
1871 } else if (partitionStr == "fractional_odd") {
1872 partitioning = EvsFractionalOdd;
1873 //} else if (partition == "pow2") { // TODO: currently nothing to map this to.
1874 } else {
1875 error(loc, "unsupported partitioning type", partitionStr.c_str(), "");
1876 }
1877
1878 if (! intermediate.setVertexSpacing(partitioning))
1879 error(loc, "cannot change previously set partitioning",
1880 TQualifier::getVertexSpacingString(partitioning), "");
1881 }
1882 break;
1883 }
1884 case EatOutputControlPoints:
1885 {
1886 // Handle [outputcontrolpoints("...")]
1887 int ctrlPoints;
1888 if (! it->getInt(ctrlPoints)) {
1889 error(loc, "invalid outputcontrolpoints", "", "");
1890 } else {
1891 if (! intermediate.setVertices(ctrlPoints)) {
1892 error(loc, "cannot change previously set outputcontrolpoints attribute", "", "");
1893 }
1894 }
1895 break;
1896 }
1897 case EatEarlyDepthStencil:
1898 intermediate.setEarlyFragmentTests();
1899 break;
1900 case EatBuiltIn:
1901 case EatLocation:
1902 // tolerate these because of dual use of entrypoint and type attributes
1903 break;
1904 default:
1905 warn(loc, "attribute does not apply to entry point", "", "");
1906 break;
1907 }
1908 }
1909 }
1910
1911 // Update the given type with any type-like attribute information in the
1912 // attributes.
transferTypeAttributes(const TSourceLoc & loc,const TAttributes & attributes,TType & type,bool allowEntry)1913 void HlslParseContext::transferTypeAttributes(const TSourceLoc& loc, const TAttributes& attributes, TType& type,
1914 bool allowEntry)
1915 {
1916 if (attributes.size() == 0)
1917 return;
1918
1919 int value;
1920 TString builtInString;
1921 for (auto it = attributes.begin(); it != attributes.end(); ++it) {
1922 switch (it->name) {
1923 case EatLocation:
1924 // location
1925 if (it->getInt(value))
1926 type.getQualifier().layoutLocation = value;
1927 else
1928 error(loc, "needs a literal integer", "location", "");
1929 break;
1930 case EatBinding:
1931 // binding
1932 if (it->getInt(value)) {
1933 type.getQualifier().layoutBinding = value;
1934 type.getQualifier().layoutSet = 0;
1935 } else
1936 error(loc, "needs a literal integer", "binding", "");
1937 // set
1938 if (it->getInt(value, 1))
1939 type.getQualifier().layoutSet = value;
1940 break;
1941 case EatGlobalBinding:
1942 // global cbuffer binding
1943 if (it->getInt(value))
1944 globalUniformBinding = value;
1945 else
1946 error(loc, "needs a literal integer", "global binding", "");
1947 // global cbuffer set
1948 if (it->getInt(value, 1))
1949 globalUniformSet = value;
1950 break;
1951 case EatInputAttachment:
1952 // input attachment
1953 if (it->getInt(value))
1954 type.getQualifier().layoutAttachment = value;
1955 else
1956 error(loc, "needs a literal integer", "input attachment", "");
1957 break;
1958 case EatBuiltIn:
1959 // PointSize built-in
1960 if (it->getString(builtInString, 0, false)) {
1961 if (builtInString == "PointSize")
1962 type.getQualifier().builtIn = EbvPointSize;
1963 }
1964 break;
1965 case EatPushConstant:
1966 // push_constant
1967 type.getQualifier().layoutPushConstant = true;
1968 break;
1969 case EatConstantId:
1970 // specialization constant
1971 if (type.getQualifier().storage != EvqConst) {
1972 error(loc, "needs a const type", "constant_id", "");
1973 break;
1974 }
1975 if (it->getInt(value)) {
1976 TSourceLoc loc;
1977 loc.init();
1978 setSpecConstantId(loc, type.getQualifier(), value);
1979 }
1980 break;
1981
1982 // image formats
1983 case EatFormatRgba32f: type.getQualifier().layoutFormat = ElfRgba32f; break;
1984 case EatFormatRgba16f: type.getQualifier().layoutFormat = ElfRgba16f; break;
1985 case EatFormatR32f: type.getQualifier().layoutFormat = ElfR32f; break;
1986 case EatFormatRgba8: type.getQualifier().layoutFormat = ElfRgba8; break;
1987 case EatFormatRgba8Snorm: type.getQualifier().layoutFormat = ElfRgba8Snorm; break;
1988 case EatFormatRg32f: type.getQualifier().layoutFormat = ElfRg32f; break;
1989 case EatFormatRg16f: type.getQualifier().layoutFormat = ElfRg16f; break;
1990 case EatFormatR11fG11fB10f: type.getQualifier().layoutFormat = ElfR11fG11fB10f; break;
1991 case EatFormatR16f: type.getQualifier().layoutFormat = ElfR16f; break;
1992 case EatFormatRgba16: type.getQualifier().layoutFormat = ElfRgba16; break;
1993 case EatFormatRgb10A2: type.getQualifier().layoutFormat = ElfRgb10A2; break;
1994 case EatFormatRg16: type.getQualifier().layoutFormat = ElfRg16; break;
1995 case EatFormatRg8: type.getQualifier().layoutFormat = ElfRg8; break;
1996 case EatFormatR16: type.getQualifier().layoutFormat = ElfR16; break;
1997 case EatFormatR8: type.getQualifier().layoutFormat = ElfR8; break;
1998 case EatFormatRgba16Snorm: type.getQualifier().layoutFormat = ElfRgba16Snorm; break;
1999 case EatFormatRg16Snorm: type.getQualifier().layoutFormat = ElfRg16Snorm; break;
2000 case EatFormatRg8Snorm: type.getQualifier().layoutFormat = ElfRg8Snorm; break;
2001 case EatFormatR16Snorm: type.getQualifier().layoutFormat = ElfR16Snorm; break;
2002 case EatFormatR8Snorm: type.getQualifier().layoutFormat = ElfR8Snorm; break;
2003 case EatFormatRgba32i: type.getQualifier().layoutFormat = ElfRgba32i; break;
2004 case EatFormatRgba16i: type.getQualifier().layoutFormat = ElfRgba16i; break;
2005 case EatFormatRgba8i: type.getQualifier().layoutFormat = ElfRgba8i; break;
2006 case EatFormatR32i: type.getQualifier().layoutFormat = ElfR32i; break;
2007 case EatFormatRg32i: type.getQualifier().layoutFormat = ElfRg32i; break;
2008 case EatFormatRg16i: type.getQualifier().layoutFormat = ElfRg16i; break;
2009 case EatFormatRg8i: type.getQualifier().layoutFormat = ElfRg8i; break;
2010 case EatFormatR16i: type.getQualifier().layoutFormat = ElfR16i; break;
2011 case EatFormatR8i: type.getQualifier().layoutFormat = ElfR8i; break;
2012 case EatFormatRgba32ui: type.getQualifier().layoutFormat = ElfRgba32ui; break;
2013 case EatFormatRgba16ui: type.getQualifier().layoutFormat = ElfRgba16ui; break;
2014 case EatFormatRgba8ui: type.getQualifier().layoutFormat = ElfRgba8ui; break;
2015 case EatFormatR32ui: type.getQualifier().layoutFormat = ElfR32ui; break;
2016 case EatFormatRgb10a2ui: type.getQualifier().layoutFormat = ElfRgb10a2ui; break;
2017 case EatFormatRg32ui: type.getQualifier().layoutFormat = ElfRg32ui; break;
2018 case EatFormatRg16ui: type.getQualifier().layoutFormat = ElfRg16ui; break;
2019 case EatFormatRg8ui: type.getQualifier().layoutFormat = ElfRg8ui; break;
2020 case EatFormatR16ui: type.getQualifier().layoutFormat = ElfR16ui; break;
2021 case EatFormatR8ui: type.getQualifier().layoutFormat = ElfR8ui; break;
2022 case EatFormatUnknown: type.getQualifier().layoutFormat = ElfNone; break;
2023
2024 case EatNonWritable: type.getQualifier().readonly = true; break;
2025 case EatNonReadable: type.getQualifier().writeonly = true; break;
2026
2027 default:
2028 if (! allowEntry)
2029 warn(loc, "attribute does not apply to a type", "", "");
2030 break;
2031 }
2032 }
2033 }
2034
2035 //
2036 // Do all special handling for the entry point, including wrapping
2037 // the shader's entry point with the official entry point that will call it.
2038 //
2039 // The following:
2040 //
2041 // retType shaderEntryPoint(args...) // shader declared entry point
2042 // { body }
2043 //
2044 // Becomes
2045 //
2046 // out retType ret;
2047 // in iargs<that are input>...;
2048 // out oargs<that are output> ...;
2049 //
2050 // void shaderEntryPoint() // synthesized, but official, entry point
2051 // {
2052 // args<that are input> = iargs...;
2053 // ret = @shaderEntryPoint(args...);
2054 // oargs = args<that are output>...;
2055 // }
2056 // retType @shaderEntryPoint(args...)
2057 // { body }
2058 //
2059 // The symbol table will still map the original entry point name to the
2060 // the modified function and its new name:
2061 //
2062 // symbol table: shaderEntryPoint -> @shaderEntryPoint
2063 //
2064 // Returns nullptr if no entry-point tree was built, otherwise, returns
2065 // a subtree that creates the entry point.
2066 //
transformEntryPoint(const TSourceLoc & loc,TFunction & userFunction,const TAttributes & attributes)2067 TIntermNode* HlslParseContext::transformEntryPoint(const TSourceLoc& loc, TFunction& userFunction,
2068 const TAttributes& attributes)
2069 {
2070 // Return true if this is a tessellation patch constant function input to a domain shader.
2071 const auto isDsPcfInput = [this](const TType& type) {
2072 return language == EShLangTessEvaluation &&
2073 type.contains([](const TType* t) {
2074 return t->getQualifier().builtIn == EbvTessLevelOuter ||
2075 t->getQualifier().builtIn == EbvTessLevelInner;
2076 });
2077 };
2078
2079 // if we aren't in the entry point, fix the IO as such and exit
2080 if (! isEntrypointName(userFunction.getName())) {
2081 remapNonEntryPointIO(userFunction);
2082 return nullptr;
2083 }
2084
2085 entryPointFunction = &userFunction; // needed in finish()
2086
2087 // Handle entry point attributes
2088 handleEntryPointAttributes(loc, attributes);
2089
2090 // entry point logic...
2091
2092 // Move parameters and return value to shader in/out
2093 TVariable* entryPointOutput; // gets created in remapEntryPointIO
2094 TVector<TVariable*> inputs;
2095 TVector<TVariable*> outputs;
2096 remapEntryPointIO(userFunction, entryPointOutput, inputs, outputs);
2097
2098 // Further this return/in/out transform by flattening, splitting, and assigning locations
2099 const auto makeVariableInOut = [&](TVariable& variable) {
2100 if (variable.getType().isStruct()) {
2101 bool arrayed = variable.getType().getQualifier().isArrayedIo(language);
2102 flatten(variable, false /* don't track linkage here, it will be tracked in assignToInterface() */, arrayed);
2103 }
2104 // TODO: flatten arrays too
2105 // TODO: flatten everything in I/O
2106 // TODO: replace all split with flatten, make all paths can create flattened I/O, then split code can be removed
2107
2108 // For clip and cull distance, multiple output variables potentially get merged
2109 // into one in assignClipCullDistance. That code in assignClipCullDistance
2110 // handles the interface logic, so we avoid it here in that case.
2111 if (!isClipOrCullDistance(variable.getType()))
2112 assignToInterface(variable);
2113 };
2114 if (entryPointOutput != nullptr)
2115 makeVariableInOut(*entryPointOutput);
2116 for (auto it = inputs.begin(); it != inputs.end(); ++it)
2117 if (!isDsPcfInput((*it)->getType())) // wait until the end for PCF input (see comment below)
2118 makeVariableInOut(*(*it));
2119 for (auto it = outputs.begin(); it != outputs.end(); ++it)
2120 makeVariableInOut(*(*it));
2121
2122 // In the domain shader, PCF input must be at the end of the linkage. That's because in the
2123 // hull shader there is no ordering: the output comes from the separate PCF, which does not
2124 // participate in the argument list. That is always put at the end of the HS linkage, so the
2125 // input side of the DS must match. The argument may be in any position in the DS argument list
2126 // however, so this ensures the linkage is built in the correct order regardless of argument order.
2127 if (language == EShLangTessEvaluation) {
2128 for (auto it = inputs.begin(); it != inputs.end(); ++it)
2129 if (isDsPcfInput((*it)->getType()))
2130 makeVariableInOut(*(*it));
2131 }
2132
2133 // Add uniform parameters to the $Global uniform block.
2134 TVector<TVariable*> opaque_uniforms;
2135 for (int i = 0; i < userFunction.getParamCount(); i++) {
2136 TType& paramType = *userFunction[i].type;
2137 TString& paramName = *userFunction[i].name;
2138 if (paramType.getQualifier().storage == EvqUniform) {
2139 if (!paramType.containsOpaque()) {
2140 // Add it to the global uniform block.
2141 growGlobalUniformBlock(loc, paramType, paramName);
2142 } else {
2143 // Declare it as a separate variable.
2144 TVariable *var = makeInternalVariable(paramName.c_str(), paramType);
2145 opaque_uniforms.push_back(var);
2146 }
2147 }
2148 }
2149
2150 // Synthesize the call
2151
2152 pushScope(); // matches the one in handleFunctionBody()
2153
2154 // new signature
2155 TType voidType(EbtVoid);
2156 TFunction synthEntryPoint(&userFunction.getName(), voidType);
2157 TIntermAggregate* synthParams = new TIntermAggregate();
2158 intermediate.setAggregateOperator(synthParams, EOpParameters, voidType, loc);
2159 intermediate.setEntryPointMangledName(synthEntryPoint.getMangledName().c_str());
2160 intermediate.incrementEntryPointCount();
2161 TFunction callee(&userFunction.getName(), voidType); // call based on old name, which is still in the symbol table
2162
2163 // change original name
2164 userFunction.addPrefix("@"); // change the name in the function, but not in the symbol table
2165
2166 // Copy inputs (shader-in -> calling arg), while building up the call node
2167 TVector<TVariable*> argVars;
2168 TIntermAggregate* synthBody = new TIntermAggregate();
2169 auto inputIt = inputs.begin();
2170 auto opaqueUniformIt = opaque_uniforms.begin();
2171 TIntermTyped* callingArgs = nullptr;
2172
2173 for (int i = 0; i < userFunction.getParamCount(); i++) {
2174 TParameter& param = userFunction[i];
2175 argVars.push_back(makeInternalVariable(*param.name, *param.type));
2176 argVars.back()->getWritableType().getQualifier().makeTemporary();
2177
2178 // Track the input patch, which is the only non-builtin supported by hull shader PCF.
2179 if (param.getDeclaredBuiltIn() == EbvInputPatch)
2180 inputPatch = argVars.back();
2181
2182 TIntermSymbol* arg = intermediate.addSymbol(*argVars.back());
2183 handleFunctionArgument(&callee, callingArgs, arg);
2184 if (param.type->getQualifier().isParamInput()) {
2185 TIntermTyped* input = intermediate.addSymbol(**inputIt);
2186 if (input->getType().getQualifier().builtIn == EbvFragCoord && intermediate.getDxPositionW()) {
2187 // Replace FragCoord W with reciprocal
2188 auto pos_xyz = handleDotDereference(loc, input, "xyz");
2189 auto pos_w = handleDotDereference(loc, input, "w");
2190 auto one = intermediate.addConstantUnion(1.0, EbtFloat, loc);
2191 auto recip_w = intermediate.addBinaryMath(EOpDiv, one, pos_w, loc);
2192 TIntermAggregate* dst = new TIntermAggregate(EOpConstructVec4);
2193 dst->getSequence().push_back(pos_xyz);
2194 dst->getSequence().push_back(recip_w);
2195 dst->setType(TType(EbtFloat, EvqTemporary, 4));
2196 dst->setLoc(loc);
2197 input = dst;
2198 }
2199 intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign, arg, input));
2200 inputIt++;
2201 }
2202 if (param.type->getQualifier().storage == EvqUniform) {
2203 if (!param.type->containsOpaque()) {
2204 // Look it up in the $Global uniform block.
2205 intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign, arg,
2206 handleVariable(loc, param.name)));
2207 } else {
2208 intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign, arg,
2209 intermediate.addSymbol(**opaqueUniformIt)));
2210 ++opaqueUniformIt;
2211 }
2212 }
2213 }
2214
2215 // Call
2216 currentCaller = synthEntryPoint.getMangledName();
2217 TIntermTyped* callReturn = handleFunctionCall(loc, &callee, callingArgs);
2218 currentCaller = userFunction.getMangledName();
2219
2220 // Return value
2221 if (entryPointOutput) {
2222 TIntermTyped* returnAssign;
2223
2224 // For hull shaders, the wrapped entry point return value is written to
2225 // an array element as indexed by invocation ID, which we might have to make up.
2226 // This is required to match SPIR-V semantics.
2227 if (language == EShLangTessControl) {
2228 TIntermSymbol* invocationIdSym = findTessLinkageSymbol(EbvInvocationId);
2229
2230 // If there is no user declared invocation ID, we must make one.
2231 if (invocationIdSym == nullptr) {
2232 TType invocationIdType(EbtUint, EvqIn, 1);
2233 TString* invocationIdName = NewPoolTString("InvocationId");
2234 invocationIdType.getQualifier().builtIn = EbvInvocationId;
2235
2236 TVariable* variable = makeInternalVariable(*invocationIdName, invocationIdType);
2237
2238 globalQualifierFix(loc, variable->getWritableType().getQualifier());
2239 trackLinkage(*variable);
2240
2241 invocationIdSym = intermediate.addSymbol(*variable);
2242 }
2243
2244 TIntermTyped* element = intermediate.addIndex(EOpIndexIndirect, intermediate.addSymbol(*entryPointOutput),
2245 invocationIdSym, loc);
2246
2247 // Set the type of the array element being dereferenced
2248 const TType derefElementType(entryPointOutput->getType(), 0);
2249 element->setType(derefElementType);
2250
2251 returnAssign = handleAssign(loc, EOpAssign, element, callReturn);
2252 } else {
2253 returnAssign = handleAssign(loc, EOpAssign, intermediate.addSymbol(*entryPointOutput), callReturn);
2254 }
2255 intermediate.growAggregate(synthBody, returnAssign);
2256 } else
2257 intermediate.growAggregate(synthBody, callReturn);
2258
2259 // Output copies
2260 auto outputIt = outputs.begin();
2261 for (int i = 0; i < userFunction.getParamCount(); i++) {
2262 TParameter& param = userFunction[i];
2263
2264 // GS outputs are via emit, so we do not copy them here.
2265 if (param.type->getQualifier().isParamOutput()) {
2266 if (param.getDeclaredBuiltIn() == EbvGsOutputStream) {
2267 // GS output stream does not assign outputs here: it's the Append() method
2268 // which writes to the output, probably multiple times separated by Emit.
2269 // We merely remember the output to use, here.
2270 gsStreamOutput = *outputIt;
2271 } else {
2272 intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign,
2273 intermediate.addSymbol(**outputIt),
2274 intermediate.addSymbol(*argVars[i])));
2275 }
2276
2277 outputIt++;
2278 }
2279 }
2280
2281 // Put the pieces together to form a full function subtree
2282 // for the synthesized entry point.
2283 synthBody->setOperator(EOpSequence);
2284 TIntermNode* synthFunctionDef = synthParams;
2285 handleFunctionBody(loc, synthEntryPoint, synthBody, synthFunctionDef);
2286
2287 entryPointFunctionBody = synthBody;
2288
2289 return synthFunctionDef;
2290 }
2291
handleFunctionBody(const TSourceLoc & loc,TFunction & function,TIntermNode * functionBody,TIntermNode * & node)2292 void HlslParseContext::handleFunctionBody(const TSourceLoc& loc, TFunction& function, TIntermNode* functionBody,
2293 TIntermNode*& node)
2294 {
2295 node = intermediate.growAggregate(node, functionBody);
2296 intermediate.setAggregateOperator(node, EOpFunction, function.getType(), loc);
2297 node->getAsAggregate()->setName(function.getMangledName().c_str());
2298
2299 popScope();
2300 if (function.hasImplicitThis())
2301 popImplicitThis();
2302
2303 if (function.getType().getBasicType() != EbtVoid && ! functionReturnsValue)
2304 error(loc, "function does not return a value:", "", function.getName().c_str());
2305 }
2306
2307 // AST I/O is done through shader globals declared in the 'in' or 'out'
2308 // storage class. An HLSL entry point has a return value, input parameters
2309 // and output parameters. These need to get remapped to the AST I/O.
remapEntryPointIO(TFunction & function,TVariable * & returnValue,TVector<TVariable * > & inputs,TVector<TVariable * > & outputs)2310 void HlslParseContext::remapEntryPointIO(TFunction& function, TVariable*& returnValue,
2311 TVector<TVariable*>& inputs, TVector<TVariable*>& outputs)
2312 {
2313 // We might have in input structure type with no decorations that caused it
2314 // to look like an input type, yet it has (e.g.) interpolation types that
2315 // must be modified that turn it into an input type.
2316 // Hence, a missing ioTypeMap for 'input' might need to be synthesized.
2317 const auto synthesizeEditedInput = [this](TType& type) {
2318 // True if a type needs to be 'flat'
2319 const auto needsFlat = [](const TType& type) {
2320 return type.containsBasicType(EbtInt) ||
2321 type.containsBasicType(EbtUint) ||
2322 type.containsBasicType(EbtInt64) ||
2323 type.containsBasicType(EbtUint64) ||
2324 type.containsBasicType(EbtBool) ||
2325 type.containsBasicType(EbtDouble);
2326 };
2327
2328 if (language == EShLangFragment && needsFlat(type)) {
2329 if (type.isStruct()) {
2330 TTypeList* finalList = nullptr;
2331 auto it = ioTypeMap.find(type.getStruct());
2332 if (it == ioTypeMap.end() || it->second.input == nullptr) {
2333 // Getting here means we have no input struct, but we need one.
2334 auto list = new TTypeList;
2335 for (auto member = type.getStruct()->begin(); member != type.getStruct()->end(); ++member) {
2336 TType* newType = new TType;
2337 newType->shallowCopy(*member->type);
2338 TTypeLoc typeLoc = { newType, member->loc };
2339 list->push_back(typeLoc);
2340 }
2341 // install the new input type
2342 if (it == ioTypeMap.end()) {
2343 tIoKinds newLists = { list, nullptr, nullptr };
2344 ioTypeMap[type.getStruct()] = newLists;
2345 } else
2346 it->second.input = list;
2347 finalList = list;
2348 } else
2349 finalList = it->second.input;
2350 // edit for 'flat'
2351 for (auto member = finalList->begin(); member != finalList->end(); ++member) {
2352 if (needsFlat(*member->type)) {
2353 member->type->getQualifier().clearInterpolation();
2354 member->type->getQualifier().flat = true;
2355 }
2356 }
2357 } else {
2358 type.getQualifier().clearInterpolation();
2359 type.getQualifier().flat = true;
2360 }
2361 }
2362 };
2363
2364 // Do the actual work to make a type be a shader input or output variable,
2365 // and clear the original to be non-IO (for use as a normal function parameter/return).
2366 const auto makeIoVariable = [this](const char* name, TType& type, TStorageQualifier storage) -> TVariable* {
2367 TVariable* ioVariable = makeInternalVariable(name, type);
2368 clearUniformInputOutput(type.getQualifier());
2369 if (type.isStruct()) {
2370 auto newLists = ioTypeMap.find(ioVariable->getType().getStruct());
2371 if (newLists != ioTypeMap.end()) {
2372 if (storage == EvqVaryingIn && newLists->second.input)
2373 ioVariable->getWritableType().setStruct(newLists->second.input);
2374 else if (storage == EvqVaryingOut && newLists->second.output)
2375 ioVariable->getWritableType().setStruct(newLists->second.output);
2376 }
2377 }
2378 if (storage == EvqVaryingIn) {
2379 correctInput(ioVariable->getWritableType().getQualifier());
2380 if (language == EShLangTessEvaluation)
2381 if (!ioVariable->getType().isArray())
2382 ioVariable->getWritableType().getQualifier().patch = true;
2383 } else {
2384 correctOutput(ioVariable->getWritableType().getQualifier());
2385 }
2386 ioVariable->getWritableType().getQualifier().storage = storage;
2387
2388 fixBuiltInIoType(ioVariable->getWritableType());
2389
2390 return ioVariable;
2391 };
2392
2393 // return value is actually a shader-scoped output (out)
2394 if (function.getType().getBasicType() == EbtVoid) {
2395 returnValue = nullptr;
2396 } else {
2397 if (language == EShLangTessControl) {
2398 // tessellation evaluation in HLSL writes a per-ctrl-pt value, but it needs to be an
2399 // array in SPIR-V semantics. We'll write to it indexed by invocation ID.
2400
2401 returnValue = makeIoVariable("@entryPointOutput", function.getWritableType(), EvqVaryingOut);
2402
2403 TType outputType;
2404 outputType.shallowCopy(function.getType());
2405
2406 // vertices has necessarily already been set when handling entry point attributes.
2407 TArraySizes* arraySizes = new TArraySizes;
2408 arraySizes->addInnerSize(intermediate.getVertices());
2409 outputType.transferArraySizes(arraySizes);
2410
2411 clearUniformInputOutput(function.getWritableType().getQualifier());
2412 returnValue = makeIoVariable("@entryPointOutput", outputType, EvqVaryingOut);
2413 } else {
2414 returnValue = makeIoVariable("@entryPointOutput", function.getWritableType(), EvqVaryingOut);
2415 }
2416 }
2417
2418 // parameters are actually shader-scoped inputs and outputs (in or out)
2419 for (int i = 0; i < function.getParamCount(); i++) {
2420 TType& paramType = *function[i].type;
2421 if (paramType.getQualifier().isParamInput()) {
2422 synthesizeEditedInput(paramType);
2423 TVariable* argAsGlobal = makeIoVariable(function[i].name->c_str(), paramType, EvqVaryingIn);
2424 inputs.push_back(argAsGlobal);
2425 }
2426 if (paramType.getQualifier().isParamOutput()) {
2427 TVariable* argAsGlobal = makeIoVariable(function[i].name->c_str(), paramType, EvqVaryingOut);
2428 outputs.push_back(argAsGlobal);
2429 }
2430 }
2431 }
2432
2433 // An HLSL function that looks like an entry point, but is not,
2434 // declares entry point IO built-ins, but these have to be undone.
remapNonEntryPointIO(TFunction & function)2435 void HlslParseContext::remapNonEntryPointIO(TFunction& function)
2436 {
2437 // return value
2438 if (function.getType().getBasicType() != EbtVoid)
2439 clearUniformInputOutput(function.getWritableType().getQualifier());
2440
2441 // parameters.
2442 // References to structuredbuffer types are left unmodified
2443 for (int i = 0; i < function.getParamCount(); i++)
2444 if (!isReference(*function[i].type))
2445 clearUniformInputOutput(function[i].type->getQualifier());
2446 }
2447
handleDeclare(const TSourceLoc & loc,TIntermTyped * var)2448 TIntermNode* HlslParseContext::handleDeclare(const TSourceLoc& loc, TIntermTyped* var)
2449 {
2450 return intermediate.addUnaryNode(EOpDeclare, var, loc, TType(EbtVoid));
2451 }
2452
2453 // Handle function returns, including type conversions to the function return type
2454 // if necessary.
handleReturnValue(const TSourceLoc & loc,TIntermTyped * value)2455 TIntermNode* HlslParseContext::handleReturnValue(const TSourceLoc& loc, TIntermTyped* value)
2456 {
2457 functionReturnsValue = true;
2458
2459 if (currentFunctionType->getBasicType() == EbtVoid) {
2460 error(loc, "void function cannot return a value", "return", "");
2461 return intermediate.addBranch(EOpReturn, loc);
2462 } else if (*currentFunctionType != value->getType()) {
2463 value = intermediate.addConversion(EOpReturn, *currentFunctionType, value);
2464 if (value && *currentFunctionType != value->getType())
2465 value = intermediate.addUniShapeConversion(EOpReturn, *currentFunctionType, value);
2466 if (value == nullptr || *currentFunctionType != value->getType()) {
2467 error(loc, "type does not match, or is not convertible to, the function's return type", "return", "");
2468 return value;
2469 }
2470 }
2471
2472 return intermediate.addBranch(EOpReturn, value, loc);
2473 }
2474
handleFunctionArgument(TFunction * function,TIntermTyped * & arguments,TIntermTyped * newArg)2475 void HlslParseContext::handleFunctionArgument(TFunction* function,
2476 TIntermTyped*& arguments, TIntermTyped* newArg)
2477 {
2478 TParameter param = { nullptr, new TType, nullptr };
2479 param.type->shallowCopy(newArg->getType());
2480
2481 function->addParameter(param);
2482 if (arguments)
2483 arguments = intermediate.growAggregate(arguments, newArg);
2484 else
2485 arguments = newArg;
2486 }
2487
2488 // FragCoord may require special loading: we can optionally reciprocate W.
assignFromFragCoord(const TSourceLoc & loc,TOperator op,TIntermTyped * left,TIntermTyped * right)2489 TIntermTyped* HlslParseContext::assignFromFragCoord(const TSourceLoc& loc, TOperator op,
2490 TIntermTyped* left, TIntermTyped* right)
2491 {
2492 // If we are not asked for reciprocal W, use a plain old assign.
2493 if (!intermediate.getDxPositionW())
2494 return intermediate.addAssign(op, left, right, loc);
2495
2496 // If we get here, we should reciprocate W.
2497 TIntermAggregate* assignList = nullptr;
2498
2499 // If this is a complex rvalue, we don't want to dereference it many times. Create a temporary.
2500 TVariable* rhsTempVar = nullptr;
2501 rhsTempVar = makeInternalVariable("@fragcoord", right->getType());
2502 rhsTempVar->getWritableType().getQualifier().makeTemporary();
2503
2504 {
2505 TIntermTyped* rhsTempSym = intermediate.addSymbol(*rhsTempVar, loc);
2506 assignList = intermediate.growAggregate(assignList,
2507 intermediate.addAssign(EOpAssign, rhsTempSym, right, loc), loc);
2508 }
2509
2510 // tmp.w = 1.0 / tmp.w
2511 {
2512 const int W = 3;
2513
2514 TIntermTyped* tempSymL = intermediate.addSymbol(*rhsTempVar, loc);
2515 TIntermTyped* tempSymR = intermediate.addSymbol(*rhsTempVar, loc);
2516 TIntermTyped* index = intermediate.addConstantUnion(W, loc);
2517
2518 TIntermTyped* lhsElement = intermediate.addIndex(EOpIndexDirect, tempSymL, index, loc);
2519 TIntermTyped* rhsElement = intermediate.addIndex(EOpIndexDirect, tempSymR, index, loc);
2520
2521 const TType derefType(right->getType(), 0);
2522
2523 lhsElement->setType(derefType);
2524 rhsElement->setType(derefType);
2525
2526 auto one = intermediate.addConstantUnion(1.0, EbtFloat, loc);
2527 auto recip_w = intermediate.addBinaryMath(EOpDiv, one, rhsElement, loc);
2528
2529 assignList = intermediate.growAggregate(assignList, intermediate.addAssign(EOpAssign, lhsElement, recip_w, loc));
2530 }
2531
2532 // Assign the rhs temp (now with W reciprocal) to the final output
2533 {
2534 TIntermTyped* rhsTempSym = intermediate.addSymbol(*rhsTempVar, loc);
2535 assignList = intermediate.growAggregate(assignList, intermediate.addAssign(op, left, rhsTempSym, loc));
2536 }
2537
2538 assert(assignList != nullptr);
2539 assignList->setOperator(EOpSequence);
2540
2541 return assignList;
2542 }
2543
2544 // Position may require special handling: we can optionally invert Y.
2545 // See: https://github.com/KhronosGroup/glslang/issues/1173
2546 // https://github.com/KhronosGroup/glslang/issues/494
assignPosition(const TSourceLoc & loc,TOperator op,TIntermTyped * left,TIntermTyped * right)2547 TIntermTyped* HlslParseContext::assignPosition(const TSourceLoc& loc, TOperator op,
2548 TIntermTyped* left, TIntermTyped* right)
2549 {
2550 // If we are not asked for Y inversion, use a plain old assign.
2551 if (!intermediate.getInvertY())
2552 return intermediate.addAssign(op, left, right, loc);
2553
2554 // If we get here, we should invert Y.
2555 TIntermAggregate* assignList = nullptr;
2556
2557 // If this is a complex rvalue, we don't want to dereference it many times. Create a temporary.
2558 TVariable* rhsTempVar = nullptr;
2559 rhsTempVar = makeInternalVariable("@position", right->getType());
2560 rhsTempVar->getWritableType().getQualifier().makeTemporary();
2561
2562 {
2563 TIntermTyped* rhsTempSym = intermediate.addSymbol(*rhsTempVar, loc);
2564 assignList = intermediate.growAggregate(assignList,
2565 intermediate.addAssign(EOpAssign, rhsTempSym, right, loc), loc);
2566 }
2567
2568 // pos.y = -pos.y
2569 {
2570 const int Y = 1;
2571
2572 TIntermTyped* tempSymL = intermediate.addSymbol(*rhsTempVar, loc);
2573 TIntermTyped* tempSymR = intermediate.addSymbol(*rhsTempVar, loc);
2574 TIntermTyped* index = intermediate.addConstantUnion(Y, loc);
2575
2576 TIntermTyped* lhsElement = intermediate.addIndex(EOpIndexDirect, tempSymL, index, loc);
2577 TIntermTyped* rhsElement = intermediate.addIndex(EOpIndexDirect, tempSymR, index, loc);
2578
2579 const TType derefType(right->getType(), 0);
2580
2581 lhsElement->setType(derefType);
2582 rhsElement->setType(derefType);
2583
2584 TIntermTyped* yNeg = intermediate.addUnaryMath(EOpNegative, rhsElement, loc);
2585
2586 assignList = intermediate.growAggregate(assignList, intermediate.addAssign(EOpAssign, lhsElement, yNeg, loc));
2587 }
2588
2589 // Assign the rhs temp (now with Y inversion) to the final output
2590 {
2591 TIntermTyped* rhsTempSym = intermediate.addSymbol(*rhsTempVar, loc);
2592 assignList = intermediate.growAggregate(assignList, intermediate.addAssign(op, left, rhsTempSym, loc));
2593 }
2594
2595 assert(assignList != nullptr);
2596 assignList->setOperator(EOpSequence);
2597
2598 return assignList;
2599 }
2600
2601 // Clip and cull distance require special handling due to a semantic mismatch. In HLSL,
2602 // these can be float scalar, float vector, or arrays of float scalar or float vector.
2603 // In SPIR-V, they are arrays of scalar floats in all cases. We must copy individual components
2604 // (e.g, both x and y components of a float2) out into the destination float array.
2605 //
2606 // The values are assigned to sequential members of the output array. The inner dimension
2607 // is vector components. The outer dimension is array elements.
assignClipCullDistance(const TSourceLoc & loc,TOperator op,int semanticId,TIntermTyped * left,TIntermTyped * right)2608 TIntermAggregate* HlslParseContext::assignClipCullDistance(const TSourceLoc& loc, TOperator op, int semanticId,
2609 TIntermTyped* left, TIntermTyped* right)
2610 {
2611 switch (language) {
2612 case EShLangFragment:
2613 case EShLangVertex:
2614 case EShLangGeometry:
2615 break;
2616 default:
2617 error(loc, "unimplemented: clip/cull not currently implemented for this stage", "", "");
2618 return nullptr;
2619 }
2620
2621 TVariable** clipCullVar = nullptr;
2622
2623 // Figure out if we are assigning to, or from, clip or cull distance.
2624 const bool isOutput = isClipOrCullDistance(left->getType());
2625
2626 // This is the rvalue or lvalue holding the clip or cull distance.
2627 TIntermTyped* clipCullNode = isOutput ? left : right;
2628 // This is the value going into or out of the clip or cull distance.
2629 TIntermTyped* internalNode = isOutput ? right : left;
2630
2631 const TBuiltInVariable builtInType = clipCullNode->getQualifier().builtIn;
2632
2633 decltype(clipSemanticNSizeIn)* semanticNSize = nullptr;
2634
2635 // Refer to either the clip or the cull distance, depending on semantic.
2636 switch (builtInType) {
2637 case EbvClipDistance:
2638 clipCullVar = isOutput ? &clipDistanceOutput : &clipDistanceInput;
2639 semanticNSize = isOutput ? &clipSemanticNSizeOut : &clipSemanticNSizeIn;
2640 break;
2641 case EbvCullDistance:
2642 clipCullVar = isOutput ? &cullDistanceOutput : &cullDistanceInput;
2643 semanticNSize = isOutput ? &cullSemanticNSizeOut : &cullSemanticNSizeIn;
2644 break;
2645
2646 // called invalidly: we expected a clip or a cull distance.
2647 // static compile time problem: should not happen.
2648 default: assert(0); return nullptr;
2649 }
2650
2651 // This is the offset in the destination array of a given semantic's data
2652 std::array<int, maxClipCullRegs> semanticOffset;
2653
2654 // Calculate offset of variable of semantic N in destination array
2655 int arrayLoc = 0;
2656 int vecItems = 0;
2657
2658 for (int x = 0; x < maxClipCullRegs; ++x) {
2659 // See if we overflowed the vec4 packing
2660 if ((vecItems + (*semanticNSize)[x]) > 4) {
2661 arrayLoc = (arrayLoc + 3) & (~0x3); // round up to next multiple of 4
2662 vecItems = 0;
2663 }
2664
2665 semanticOffset[x] = arrayLoc;
2666 vecItems += (*semanticNSize)[x];
2667 arrayLoc += (*semanticNSize)[x];
2668 }
2669
2670
2671 // It can have up to 2 array dimensions (in the case of geometry shader inputs)
2672 const TArraySizes* const internalArraySizes = internalNode->getType().getArraySizes();
2673 const int internalArrayDims = internalNode->getType().isArray() ? internalArraySizes->getNumDims() : 0;
2674 // vector sizes:
2675 const int internalVectorSize = internalNode->getType().getVectorSize();
2676 // array sizes, or 1 if it's not an array:
2677 const int internalInnerArraySize = (internalArrayDims > 0 ? internalArraySizes->getDimSize(internalArrayDims-1) : 1);
2678 const int internalOuterArraySize = (internalArrayDims > 1 ? internalArraySizes->getDimSize(0) : 1);
2679
2680 // The created type may be an array of arrays, e.g, for geometry shader inputs.
2681 const bool isImplicitlyArrayed = (language == EShLangGeometry && !isOutput);
2682
2683 // If we haven't created the output already, create it now.
2684 if (*clipCullVar == nullptr) {
2685 // ClipDistance and CullDistance are handled specially in the entry point input/output copy
2686 // algorithm, because they may need to be unpacked from components of vectors (or a scalar)
2687 // into a float array, or vice versa. Here, we make the array the right size and type,
2688 // which depends on the incoming data, which has several potential dimensions:
2689 // * Semantic ID
2690 // * vector size
2691 // * array size
2692 // Of those, semantic ID and array size cannot appear simultaneously.
2693 //
2694 // Also to note: for implicitly arrayed forms (e.g, geometry shader inputs), we need to create two
2695 // array dimensions. The shader's declaration may have one or two array dimensions. One is always
2696 // the geometry's dimension.
2697
2698 const bool useInnerSize = internalArrayDims > 1 || !isImplicitlyArrayed;
2699
2700 const int requiredInnerArraySize = arrayLoc * (useInnerSize ? internalInnerArraySize : 1);
2701 const int requiredOuterArraySize = (internalArrayDims > 0) ? internalArraySizes->getDimSize(0) : 1;
2702
2703 TType clipCullType(EbtFloat, clipCullNode->getType().getQualifier().storage, 1);
2704 clipCullType.getQualifier() = clipCullNode->getType().getQualifier();
2705
2706 // Create required array dimension
2707 TArraySizes* arraySizes = new TArraySizes;
2708 if (isImplicitlyArrayed)
2709 arraySizes->addInnerSize(requiredOuterArraySize);
2710 arraySizes->addInnerSize(requiredInnerArraySize);
2711 clipCullType.transferArraySizes(arraySizes);
2712
2713 // Obtain symbol name: we'll use that for the symbol we introduce.
2714 TIntermSymbol* sym = clipCullNode->getAsSymbolNode();
2715 assert(sym != nullptr);
2716
2717 // We are moving the semantic ID from the layout location, so it is no longer needed or
2718 // desired there.
2719 clipCullType.getQualifier().layoutLocation = TQualifier::layoutLocationEnd;
2720
2721 // Create variable and track its linkage
2722 *clipCullVar = makeInternalVariable(sym->getName().c_str(), clipCullType);
2723
2724 trackLinkage(**clipCullVar);
2725 }
2726
2727 // Create symbol for the clip or cull variable.
2728 TIntermSymbol* clipCullSym = intermediate.addSymbol(**clipCullVar);
2729
2730 // vector sizes:
2731 const int clipCullVectorSize = clipCullSym->getType().getVectorSize();
2732
2733 // array sizes, or 1 if it's not an array:
2734 const TArraySizes* const clipCullArraySizes = clipCullSym->getType().getArraySizes();
2735 const int clipCullOuterArraySize = isImplicitlyArrayed ? clipCullArraySizes->getDimSize(0) : 1;
2736 const int clipCullInnerArraySize = clipCullArraySizes->getDimSize(isImplicitlyArrayed ? 1 : 0);
2737
2738 // clipCullSym has got to be an array of scalar floats, per SPIR-V semantics.
2739 // fixBuiltInIoType() should have handled that upstream.
2740 assert(clipCullSym->getType().isArray());
2741 assert(clipCullSym->getType().getVectorSize() == 1);
2742 assert(clipCullSym->getType().getBasicType() == EbtFloat);
2743
2744 // We may be creating multiple sub-assignments. This is an aggregate to hold them.
2745 // TODO: it would be possible to be clever sometimes and avoid the sequence node if not needed.
2746 TIntermAggregate* assignList = nullptr;
2747
2748 // Holds individual component assignments as we make them.
2749 TIntermTyped* clipCullAssign = nullptr;
2750
2751 // If the types are homomorphic, use a simple assign. No need to mess about with
2752 // individual components.
2753 if (clipCullSym->getType().isArray() == internalNode->getType().isArray() &&
2754 clipCullInnerArraySize == internalInnerArraySize &&
2755 clipCullOuterArraySize == internalOuterArraySize &&
2756 clipCullVectorSize == internalVectorSize) {
2757
2758 if (isOutput)
2759 clipCullAssign = intermediate.addAssign(op, clipCullSym, internalNode, loc);
2760 else
2761 clipCullAssign = intermediate.addAssign(op, internalNode, clipCullSym, loc);
2762
2763 assignList = intermediate.growAggregate(assignList, clipCullAssign);
2764 assignList->setOperator(EOpSequence);
2765
2766 return assignList;
2767 }
2768
2769 // We are going to copy each component of the internal (per array element if indicated) to sequential
2770 // array elements of the clipCullSym. This tracks the lhs element we're writing to as we go along.
2771 // We may be starting in the middle - e.g, for a non-zero semantic ID calculated above.
2772 int clipCullInnerArrayPos = semanticOffset[semanticId];
2773 int clipCullOuterArrayPos = 0;
2774
2775 // Lambda to add an index to a node, set the type of the result, and return the new node.
2776 const auto addIndex = [this, &loc](TIntermTyped* node, int pos) -> TIntermTyped* {
2777 const TType derefType(node->getType(), 0);
2778 node = intermediate.addIndex(EOpIndexDirect, node, intermediate.addConstantUnion(pos, loc), loc);
2779 node->setType(derefType);
2780 return node;
2781 };
2782
2783 // Loop through every component of every element of the internal, and copy to or from the matching external.
2784 for (int internalOuterArrayPos = 0; internalOuterArrayPos < internalOuterArraySize; ++internalOuterArrayPos) {
2785 for (int internalInnerArrayPos = 0; internalInnerArrayPos < internalInnerArraySize; ++internalInnerArrayPos) {
2786 for (int internalComponent = 0; internalComponent < internalVectorSize; ++internalComponent) {
2787 // clip/cull array member to read from / write to:
2788 TIntermTyped* clipCullMember = clipCullSym;
2789
2790 // If implicitly arrayed, there is an outer array dimension involved
2791 if (isImplicitlyArrayed)
2792 clipCullMember = addIndex(clipCullMember, clipCullOuterArrayPos);
2793
2794 // Index into proper array position for clip cull member
2795 clipCullMember = addIndex(clipCullMember, clipCullInnerArrayPos++);
2796
2797 // if needed, start over with next outer array slice.
2798 if (isImplicitlyArrayed && clipCullInnerArrayPos >= clipCullInnerArraySize) {
2799 clipCullInnerArrayPos = semanticOffset[semanticId];
2800 ++clipCullOuterArrayPos;
2801 }
2802
2803 // internal member to read from / write to:
2804 TIntermTyped* internalMember = internalNode;
2805
2806 // If internal node has outer array dimension, index appropriately.
2807 if (internalArrayDims > 1)
2808 internalMember = addIndex(internalMember, internalOuterArrayPos);
2809
2810 // If internal node has inner array dimension, index appropriately.
2811 if (internalArrayDims > 0)
2812 internalMember = addIndex(internalMember, internalInnerArrayPos);
2813
2814 // If internal node is a vector, extract the component of interest.
2815 if (internalNode->getType().isVector())
2816 internalMember = addIndex(internalMember, internalComponent);
2817
2818 // Create an assignment: output from internal to clip cull, or input from clip cull to internal.
2819 if (isOutput)
2820 clipCullAssign = intermediate.addAssign(op, clipCullMember, internalMember, loc);
2821 else
2822 clipCullAssign = intermediate.addAssign(op, internalMember, clipCullMember, loc);
2823
2824 // Track assignment in the sequence.
2825 assignList = intermediate.growAggregate(assignList, clipCullAssign);
2826 }
2827 }
2828 }
2829
2830 assert(assignList != nullptr);
2831 assignList->setOperator(EOpSequence);
2832
2833 return assignList;
2834 }
2835
2836 // Some simple source assignments need to be flattened to a sequence
2837 // of AST assignments. Catch these and flatten, otherwise, pass through
2838 // to intermediate.addAssign().
2839 //
2840 // Also, assignment to matrix swizzles requires multiple component assignments,
2841 // intercept those as well.
handleAssign(const TSourceLoc & loc,TOperator op,TIntermTyped * left,TIntermTyped * right)2842 TIntermTyped* HlslParseContext::handleAssign(const TSourceLoc& loc, TOperator op, TIntermTyped* left,
2843 TIntermTyped* right)
2844 {
2845 if (left == nullptr || right == nullptr)
2846 return nullptr;
2847
2848 // writing to opaques will require fixing transforms
2849 if (left->getType().containsOpaque())
2850 intermediate.setNeedsLegalization();
2851
2852 if (left->getAsOperator() && left->getAsOperator()->getOp() == EOpMatrixSwizzle)
2853 return handleAssignToMatrixSwizzle(loc, op, left, right);
2854
2855 // Return true if the given node is an index operation into a split variable.
2856 const auto indexesSplit = [this](const TIntermTyped* node) -> bool {
2857 const TIntermBinary* binaryNode = node->getAsBinaryNode();
2858
2859 if (binaryNode == nullptr)
2860 return false;
2861
2862 return (binaryNode->getOp() == EOpIndexDirect || binaryNode->getOp() == EOpIndexIndirect) &&
2863 wasSplit(binaryNode->getLeft());
2864 };
2865
2866 // Return symbol if node is symbol or index ref
2867 const auto getSymbol = [](const TIntermTyped* node) -> const TIntermSymbol* {
2868 const TIntermSymbol* symbolNode = node->getAsSymbolNode();
2869 if (symbolNode != nullptr)
2870 return symbolNode;
2871
2872 const TIntermBinary* binaryNode = node->getAsBinaryNode();
2873 if (binaryNode != nullptr && (binaryNode->getOp() == EOpIndexDirect || binaryNode->getOp() == EOpIndexIndirect))
2874 return binaryNode->getLeft()->getAsSymbolNode();
2875
2876 return nullptr;
2877 };
2878
2879 // Return true if this stage assigns clip position with potentially inverted Y
2880 const auto assignsClipPos = [this](const TIntermTyped* node) -> bool {
2881 return node->getType().getQualifier().builtIn == EbvPosition &&
2882 (language == EShLangVertex || language == EShLangGeometry || language == EShLangTessEvaluation);
2883 };
2884
2885 const TIntermSymbol* leftSymbol = getSymbol(left);
2886 const TIntermSymbol* rightSymbol = getSymbol(right);
2887
2888 const bool isSplitLeft = wasSplit(left) || indexesSplit(left);
2889 const bool isSplitRight = wasSplit(right) || indexesSplit(right);
2890
2891 const bool isFlattenLeft = wasFlattened(leftSymbol);
2892 const bool isFlattenRight = wasFlattened(rightSymbol);
2893
2894 // OK to do a single assign if neither side is split or flattened. Otherwise,
2895 // fall through to a member-wise copy.
2896 if (!isFlattenLeft && !isFlattenRight && !isSplitLeft && !isSplitRight) {
2897 // Clip and cull distance requires more processing. See comment above assignClipCullDistance.
2898 if (isClipOrCullDistance(left->getType()) || isClipOrCullDistance(right->getType())) {
2899 const bool isOutput = isClipOrCullDistance(left->getType());
2900
2901 const int semanticId = (isOutput ? left : right)->getType().getQualifier().layoutLocation;
2902 return assignClipCullDistance(loc, op, semanticId, left, right);
2903 } else if (assignsClipPos(left)) {
2904 // Position can require special handling: see comment above assignPosition
2905 return assignPosition(loc, op, left, right);
2906 } else if (left->getQualifier().builtIn == EbvSampleMask) {
2907 // Certain builtins are required to be arrayed outputs in SPIR-V, but may internally be scalars
2908 // in the shader. Copy the scalar RHS into the LHS array element zero, if that happens.
2909 if (left->isArray() && !right->isArray()) {
2910 const TType derefType(left->getType(), 0);
2911 left = intermediate.addIndex(EOpIndexDirect, left, intermediate.addConstantUnion(0, loc), loc);
2912 left->setType(derefType);
2913 // Fall through to add assign.
2914 }
2915 }
2916
2917 return intermediate.addAssign(op, left, right, loc);
2918 }
2919
2920 TIntermAggregate* assignList = nullptr;
2921 const TVector<TVariable*>* leftVariables = nullptr;
2922 const TVector<TVariable*>* rightVariables = nullptr;
2923
2924 // A temporary to store the right node's value, so we don't keep indirecting into it
2925 // if it's not a simple symbol.
2926 TVariable* rhsTempVar = nullptr;
2927
2928 // If the RHS is a simple symbol node, we'll copy it for each member.
2929 TIntermSymbol* cloneSymNode = nullptr;
2930
2931 int memberCount = 0;
2932
2933 // Track how many items there are to copy.
2934 if (left->getType().isStruct())
2935 memberCount = (int)left->getType().getStruct()->size();
2936 if (left->getType().isArray())
2937 memberCount = left->getType().getCumulativeArraySize();
2938
2939 if (isFlattenLeft)
2940 leftVariables = &flattenMap.find(leftSymbol->getId())->second.members;
2941
2942 if (isFlattenRight) {
2943 rightVariables = &flattenMap.find(rightSymbol->getId())->second.members;
2944 } else {
2945 // The RHS is not flattened. There are several cases:
2946 // 1. 1 item to copy: Use the RHS directly.
2947 // 2. >1 item, simple symbol RHS: we'll create a new TIntermSymbol node for each, but no assign to temp.
2948 // 3. >1 item, complex RHS: assign it to a new temp variable, and create a TIntermSymbol for each member.
2949
2950 if (memberCount <= 1) {
2951 // case 1: we'll use the symbol directly below. Nothing to do.
2952 } else {
2953 if (right->getAsSymbolNode() != nullptr) {
2954 // case 2: we'll copy the symbol per iteration below.
2955 cloneSymNode = right->getAsSymbolNode();
2956 } else {
2957 // case 3: assign to a temp, and indirect into that.
2958 rhsTempVar = makeInternalVariable("flattenTemp", right->getType());
2959 rhsTempVar->getWritableType().getQualifier().makeTemporary();
2960 TIntermTyped* noFlattenRHS = intermediate.addSymbol(*rhsTempVar, loc);
2961
2962 // Add this to the aggregate being built.
2963 assignList = intermediate.growAggregate(assignList,
2964 intermediate.addAssign(op, noFlattenRHS, right, loc), loc);
2965 }
2966 }
2967 }
2968
2969 // When dealing with split arrayed structures of built-ins, the arrayness is moved to the extracted built-in
2970 // variables, which is awkward when copying between split and unsplit structures. This variable tracks
2971 // array indirections so they can be percolated from outer structs to inner variables.
2972 std::vector <int> arrayElement;
2973
2974 TStorageQualifier leftStorage = left->getType().getQualifier().storage;
2975 TStorageQualifier rightStorage = right->getType().getQualifier().storage;
2976
2977 int leftOffsetStart = findSubtreeOffset(*left);
2978 int rightOffsetStart = findSubtreeOffset(*right);
2979 int leftOffset = leftOffsetStart;
2980 int rightOffset = rightOffsetStart;
2981
2982 const auto getMember = [&](bool isLeft, const TType& type, int member, TIntermTyped* splitNode, int splitMember,
2983 bool flattened)
2984 -> TIntermTyped * {
2985 const bool split = isLeft ? isSplitLeft : isSplitRight;
2986
2987 TIntermTyped* subTree;
2988 const TType derefType(type, member);
2989 const TVariable* builtInVar = nullptr;
2990 if ((flattened || split) && derefType.isBuiltIn()) {
2991 auto splitPair = splitBuiltIns.find(HlslParseContext::tInterstageIoData(
2992 derefType.getQualifier().builtIn,
2993 isLeft ? leftStorage : rightStorage));
2994 if (splitPair != splitBuiltIns.end())
2995 builtInVar = splitPair->second;
2996 }
2997 if (builtInVar != nullptr) {
2998 // copy from interstage IO built-in if needed
2999 subTree = intermediate.addSymbol(*builtInVar);
3000
3001 if (subTree->getType().isArray()) {
3002 // Arrayness of builtIn symbols isn't handled by the normal recursion:
3003 // it's been extracted and moved to the built-in.
3004 if (!arrayElement.empty()) {
3005 const TType splitDerefType(subTree->getType(), arrayElement.back());
3006 subTree = intermediate.addIndex(EOpIndexDirect, subTree,
3007 intermediate.addConstantUnion(arrayElement.back(), loc), loc);
3008 subTree->setType(splitDerefType);
3009 } else if (splitNode->getAsOperator() != nullptr && (splitNode->getAsOperator()->getOp() == EOpIndexIndirect)) {
3010 // This might also be a stage with arrayed outputs, in which case there's an index
3011 // operation we should transfer to the output builtin.
3012
3013 const TType splitDerefType(subTree->getType(), 0);
3014 subTree = intermediate.addIndex(splitNode->getAsOperator()->getOp(), subTree,
3015 splitNode->getAsBinaryNode()->getRight(), loc);
3016 subTree->setType(splitDerefType);
3017 }
3018 }
3019 } else if (flattened && !shouldFlatten(derefType, isLeft ? leftStorage : rightStorage, false)) {
3020 if (isLeft) {
3021 // offset will cycle through variables for arrayed io
3022 if (leftOffset >= static_cast<int>(leftVariables->size()))
3023 leftOffset = leftOffsetStart;
3024 subTree = intermediate.addSymbol(*(*leftVariables)[leftOffset++]);
3025 } else {
3026 // offset will cycle through variables for arrayed io
3027 if (rightOffset >= static_cast<int>(rightVariables->size()))
3028 rightOffset = rightOffsetStart;
3029 subTree = intermediate.addSymbol(*(*rightVariables)[rightOffset++]);
3030 }
3031
3032 // arrayed io
3033 if (subTree->getType().isArray()) {
3034 if (!arrayElement.empty()) {
3035 const TType derefType(subTree->getType(), arrayElement.front());
3036 subTree = intermediate.addIndex(EOpIndexDirect, subTree,
3037 intermediate.addConstantUnion(arrayElement.front(), loc), loc);
3038 subTree->setType(derefType);
3039 } else {
3040 // There's an index operation we should transfer to the output builtin.
3041 assert(splitNode->getAsOperator() != nullptr &&
3042 splitNode->getAsOperator()->getOp() == EOpIndexIndirect);
3043 const TType splitDerefType(subTree->getType(), 0);
3044 subTree = intermediate.addIndex(splitNode->getAsOperator()->getOp(), subTree,
3045 splitNode->getAsBinaryNode()->getRight(), loc);
3046 subTree->setType(splitDerefType);
3047 }
3048 }
3049 } else {
3050 // Index operator if it's an aggregate, else EOpNull
3051 const TOperator accessOp = type.isArray() ? EOpIndexDirect
3052 : type.isStruct() ? EOpIndexDirectStruct
3053 : EOpNull;
3054 if (accessOp == EOpNull) {
3055 subTree = splitNode;
3056 } else {
3057 subTree = intermediate.addIndex(accessOp, splitNode, intermediate.addConstantUnion(splitMember, loc),
3058 loc);
3059 const TType splitDerefType(splitNode->getType(), splitMember);
3060 subTree->setType(splitDerefType);
3061 }
3062 }
3063
3064 return subTree;
3065 };
3066
3067 // Use the proper RHS node: a new symbol from a TVariable, copy
3068 // of an TIntermSymbol node, or sometimes the right node directly.
3069 right = rhsTempVar != nullptr ? intermediate.addSymbol(*rhsTempVar, loc) :
3070 cloneSymNode != nullptr ? intermediate.addSymbol(*cloneSymNode) :
3071 right;
3072
3073 // Cannot use auto here, because this is recursive, and auto can't work out the type without seeing the
3074 // whole thing. So, we'll resort to an explicit type via std::function.
3075 const std::function<void(TIntermTyped* left, TIntermTyped* right, TIntermTyped* splitLeft, TIntermTyped* splitRight,
3076 bool topLevel)>
3077 traverse = [&](TIntermTyped* left, TIntermTyped* right, TIntermTyped* splitLeft, TIntermTyped* splitRight,
3078 bool topLevel) -> void {
3079 // If we get here, we are assigning to or from a whole array or struct that must be
3080 // flattened, so have to do member-by-member assignment:
3081
3082 bool shouldFlattenSubsetLeft = isFlattenLeft && shouldFlatten(left->getType(), leftStorage, topLevel);
3083 bool shouldFlattenSubsetRight = isFlattenRight && shouldFlatten(right->getType(), rightStorage, topLevel);
3084
3085 if ((left->getType().isArray() || right->getType().isArray()) &&
3086 (shouldFlattenSubsetLeft || isSplitLeft ||
3087 shouldFlattenSubsetRight || isSplitRight)) {
3088 const int elementsL = left->getType().isArray() ? left->getType().getOuterArraySize() : 1;
3089 const int elementsR = right->getType().isArray() ? right->getType().getOuterArraySize() : 1;
3090
3091 // The arrays might not be the same size,
3092 // e.g., if the size has been forced for EbvTessLevelInner/Outer.
3093 const int elementsToCopy = std::min(elementsL, elementsR);
3094
3095 // array case
3096 for (int element = 0; element < elementsToCopy; ++element) {
3097 arrayElement.push_back(element);
3098
3099 // Add a new AST symbol node if we have a temp variable holding a complex RHS.
3100 TIntermTyped* subLeft = getMember(true, left->getType(), element, left, element,
3101 shouldFlattenSubsetLeft);
3102 TIntermTyped* subRight = getMember(false, right->getType(), element, right, element,
3103 shouldFlattenSubsetRight);
3104
3105 TIntermTyped* subSplitLeft = isSplitLeft ? getMember(true, left->getType(), element, splitLeft,
3106 element, shouldFlattenSubsetLeft)
3107 : subLeft;
3108 TIntermTyped* subSplitRight = isSplitRight ? getMember(false, right->getType(), element, splitRight,
3109 element, shouldFlattenSubsetRight)
3110 : subRight;
3111
3112 traverse(subLeft, subRight, subSplitLeft, subSplitRight, false);
3113
3114 arrayElement.pop_back();
3115 }
3116 } else if (left->getType().isStruct() && (shouldFlattenSubsetLeft || isSplitLeft ||
3117 shouldFlattenSubsetRight || isSplitRight)) {
3118 // struct case
3119 const auto& membersL = *left->getType().getStruct();
3120 const auto& membersR = *right->getType().getStruct();
3121
3122 // These track the members in the split structures corresponding to the same in the unsplit structures,
3123 // which we traverse in parallel.
3124 int memberL = 0;
3125 int memberR = 0;
3126
3127 // Handle empty structure assignment
3128 if (int(membersL.size()) == 0 && int(membersR.size()) == 0)
3129 assignList = intermediate.growAggregate(assignList, intermediate.addAssign(op, left, right, loc), loc);
3130
3131 for (int member = 0; member < int(membersL.size()); ++member) {
3132 const TType& typeL = *membersL[member].type;
3133 const TType& typeR = *membersR[member].type;
3134
3135 TIntermTyped* subLeft = getMember(true, left->getType(), member, left, member,
3136 shouldFlattenSubsetLeft);
3137 TIntermTyped* subRight = getMember(false, right->getType(), member, right, member,
3138 shouldFlattenSubsetRight);
3139
3140 // If there is no splitting, use the same values to avoid inefficiency.
3141 TIntermTyped* subSplitLeft = isSplitLeft ? getMember(true, left->getType(), member, splitLeft,
3142 memberL, shouldFlattenSubsetLeft)
3143 : subLeft;
3144 TIntermTyped* subSplitRight = isSplitRight ? getMember(false, right->getType(), member, splitRight,
3145 memberR, shouldFlattenSubsetRight)
3146 : subRight;
3147
3148 if (isClipOrCullDistance(subSplitLeft->getType()) || isClipOrCullDistance(subSplitRight->getType())) {
3149 // Clip and cull distance built-in assignment is complex in its own right, and is handled in
3150 // a separate function dedicated to that task. See comment above assignClipCullDistance;
3151
3152 const bool isOutput = isClipOrCullDistance(subSplitLeft->getType());
3153
3154 // Since all clip/cull semantics boil down to the same built-in type, we need to get the
3155 // semantic ID from the dereferenced type's layout location, to avoid an N-1 mapping.
3156 const TType derefType((isOutput ? left : right)->getType(), member);
3157 const int semanticId = derefType.getQualifier().layoutLocation;
3158
3159 TIntermAggregate* clipCullAssign = assignClipCullDistance(loc, op, semanticId,
3160 subSplitLeft, subSplitRight);
3161
3162 assignList = intermediate.growAggregate(assignList, clipCullAssign, loc);
3163 } else if (subSplitRight->getType().getQualifier().builtIn == EbvFragCoord) {
3164 // FragCoord can require special handling: see comment above assignFromFragCoord
3165 TIntermTyped* fragCoordAssign = assignFromFragCoord(loc, op, subSplitLeft, subSplitRight);
3166 assignList = intermediate.growAggregate(assignList, fragCoordAssign, loc);
3167 } else if (assignsClipPos(subSplitLeft)) {
3168 // Position can require special handling: see comment above assignPosition
3169 TIntermTyped* positionAssign = assignPosition(loc, op, subSplitLeft, subSplitRight);
3170 assignList = intermediate.growAggregate(assignList, positionAssign, loc);
3171 } else if (!shouldFlattenSubsetLeft && !shouldFlattenSubsetRight &&
3172 !typeL.containsBuiltIn() && !typeR.containsBuiltIn()) {
3173 // If this is the final flattening (no nested types below to flatten)
3174 // we'll copy the member, else recurse into the type hierarchy.
3175 // However, if splitting the struct, that means we can copy a whole
3176 // subtree here IFF it does not itself contain any interstage built-in
3177 // IO variables, so we only have to recurse into it if there's something
3178 // for splitting to do. That can save a lot of AST verbosity for
3179 // a bunch of memberwise copies.
3180
3181 assignList = intermediate.growAggregate(assignList,
3182 intermediate.addAssign(op, subSplitLeft, subSplitRight, loc),
3183 loc);
3184 } else {
3185 traverse(subLeft, subRight, subSplitLeft, subSplitRight, false);
3186 }
3187
3188 memberL += (typeL.isBuiltIn() ? 0 : 1);
3189 memberR += (typeR.isBuiltIn() ? 0 : 1);
3190 }
3191 } else {
3192 // Member copy
3193 assignList = intermediate.growAggregate(assignList, intermediate.addAssign(op, left, right, loc), loc);
3194 }
3195
3196 };
3197
3198 TIntermTyped* splitLeft = left;
3199 TIntermTyped* splitRight = right;
3200
3201 // If either left or right was a split structure, we must read or write it, but still have to
3202 // parallel-recurse through the unsplit structure to identify the built-in IO vars.
3203 // The left can be either a symbol, or an index into a symbol (e.g, array reference)
3204 if (isSplitLeft) {
3205 if (indexesSplit(left)) {
3206 // Index case: Refer to the indexed symbol, if the left is an index operator.
3207 const TIntermSymbol* symNode = left->getAsBinaryNode()->getLeft()->getAsSymbolNode();
3208
3209 TIntermTyped* splitLeftNonIo = intermediate.addSymbol(*getSplitNonIoVar(symNode->getId()), loc);
3210
3211 splitLeft = intermediate.addIndex(left->getAsBinaryNode()->getOp(), splitLeftNonIo,
3212 left->getAsBinaryNode()->getRight(), loc);
3213
3214 const TType derefType(splitLeftNonIo->getType(), 0);
3215 splitLeft->setType(derefType);
3216 } else {
3217 // Symbol case: otherwise, if not indexed, we have the symbol directly.
3218 const TIntermSymbol* symNode = left->getAsSymbolNode();
3219 splitLeft = intermediate.addSymbol(*getSplitNonIoVar(symNode->getId()), loc);
3220 }
3221 }
3222
3223 if (isSplitRight)
3224 splitRight = intermediate.addSymbol(*getSplitNonIoVar(right->getAsSymbolNode()->getId()), loc);
3225
3226 // This makes the whole assignment, recursing through subtypes as needed.
3227 traverse(left, right, splitLeft, splitRight, true);
3228
3229 assert(assignList != nullptr);
3230 assignList->setOperator(EOpSequence);
3231
3232 return assignList;
3233 }
3234
3235 // An assignment to matrix swizzle must be decomposed into individual assignments.
3236 // These must be selected component-wise from the RHS and stored component-wise
3237 // into the LHS.
handleAssignToMatrixSwizzle(const TSourceLoc & loc,TOperator op,TIntermTyped * left,TIntermTyped * right)3238 TIntermTyped* HlslParseContext::handleAssignToMatrixSwizzle(const TSourceLoc& loc, TOperator op, TIntermTyped* left,
3239 TIntermTyped* right)
3240 {
3241 assert(left->getAsOperator() && left->getAsOperator()->getOp() == EOpMatrixSwizzle);
3242
3243 if (op != EOpAssign)
3244 error(loc, "only simple assignment to non-simple matrix swizzle is supported", "assign", "");
3245
3246 // isolate the matrix and swizzle nodes
3247 TIntermTyped* matrix = left->getAsBinaryNode()->getLeft()->getAsTyped();
3248 const TIntermSequence& swizzle = left->getAsBinaryNode()->getRight()->getAsAggregate()->getSequence();
3249
3250 // if the RHS isn't already a simple vector, let's store into one
3251 TIntermSymbol* vector = right->getAsSymbolNode();
3252 TIntermTyped* vectorAssign = nullptr;
3253 if (vector == nullptr) {
3254 // create a new intermediate vector variable to assign to
3255 TType vectorType(matrix->getBasicType(), EvqTemporary, matrix->getQualifier().precision, (int)swizzle.size()/2);
3256 vector = intermediate.addSymbol(*makeInternalVariable("intermVec", vectorType), loc);
3257
3258 // assign the right to the new vector
3259 vectorAssign = handleAssign(loc, op, vector, right);
3260 }
3261
3262 // Assign the vector components to the matrix components.
3263 // Store this as a sequence, so a single aggregate node represents this
3264 // entire operation.
3265 TIntermAggregate* result = intermediate.makeAggregate(vectorAssign);
3266 TType columnType(matrix->getType(), 0);
3267 TType componentType(columnType, 0);
3268 TType indexType(EbtInt);
3269 for (int i = 0; i < (int)swizzle.size(); i += 2) {
3270 // the right component, single index into the RHS vector
3271 TIntermTyped* rightComp = intermediate.addIndex(EOpIndexDirect, vector,
3272 intermediate.addConstantUnion(i/2, loc), loc);
3273
3274 // the left component, double index into the LHS matrix
3275 TIntermTyped* leftComp = intermediate.addIndex(EOpIndexDirect, matrix,
3276 intermediate.addConstantUnion(swizzle[i]->getAsConstantUnion()->getConstArray(),
3277 indexType, loc),
3278 loc);
3279 leftComp->setType(columnType);
3280 leftComp = intermediate.addIndex(EOpIndexDirect, leftComp,
3281 intermediate.addConstantUnion(swizzle[i+1]->getAsConstantUnion()->getConstArray(),
3282 indexType, loc),
3283 loc);
3284 leftComp->setType(componentType);
3285
3286 // Add the assignment to the aggregate
3287 result = intermediate.growAggregate(result, intermediate.addAssign(op, leftComp, rightComp, loc));
3288 }
3289
3290 result->setOp(EOpSequence);
3291
3292 return result;
3293 }
3294
3295 //
3296 // HLSL atomic operations have slightly different arguments than
3297 // GLSL/AST/SPIRV. The semantics are converted below in decomposeIntrinsic.
3298 // This provides the post-decomposition equivalent opcode.
3299 //
mapAtomicOp(const TSourceLoc & loc,TOperator op,bool isImage)3300 TOperator HlslParseContext::mapAtomicOp(const TSourceLoc& loc, TOperator op, bool isImage)
3301 {
3302 switch (op) {
3303 case EOpInterlockedAdd: return isImage ? EOpImageAtomicAdd : EOpAtomicAdd;
3304 case EOpInterlockedAnd: return isImage ? EOpImageAtomicAnd : EOpAtomicAnd;
3305 case EOpInterlockedCompareExchange: return isImage ? EOpImageAtomicCompSwap : EOpAtomicCompSwap;
3306 case EOpInterlockedMax: return isImage ? EOpImageAtomicMax : EOpAtomicMax;
3307 case EOpInterlockedMin: return isImage ? EOpImageAtomicMin : EOpAtomicMin;
3308 case EOpInterlockedOr: return isImage ? EOpImageAtomicOr : EOpAtomicOr;
3309 case EOpInterlockedXor: return isImage ? EOpImageAtomicXor : EOpAtomicXor;
3310 case EOpInterlockedExchange: return isImage ? EOpImageAtomicExchange : EOpAtomicExchange;
3311 case EOpInterlockedCompareStore: // TODO: ...
3312 default:
3313 error(loc, "unknown atomic operation", "unknown op", "");
3314 return EOpNull;
3315 }
3316 }
3317
3318 //
3319 // Create a combined sampler/texture from separate sampler and texture.
3320 //
handleSamplerTextureCombine(const TSourceLoc & loc,TIntermTyped * argTex,TIntermTyped * argSampler)3321 TIntermAggregate* HlslParseContext::handleSamplerTextureCombine(const TSourceLoc& loc, TIntermTyped* argTex,
3322 TIntermTyped* argSampler)
3323 {
3324 TIntermAggregate* txcombine = new TIntermAggregate(EOpConstructTextureSampler);
3325
3326 txcombine->getSequence().push_back(argTex);
3327 txcombine->getSequence().push_back(argSampler);
3328
3329 TSampler samplerType = argTex->getType().getSampler();
3330 samplerType.combined = true;
3331
3332 // TODO:
3333 // This block exists until the spec no longer requires shadow modes on texture objects.
3334 // It can be deleted after that, along with the shadowTextureVariant member.
3335 {
3336 const bool shadowMode = argSampler->getType().getSampler().shadow;
3337
3338 TIntermSymbol* texSymbol = argTex->getAsSymbolNode();
3339
3340 if (texSymbol == nullptr)
3341 texSymbol = argTex->getAsBinaryNode()->getLeft()->getAsSymbolNode();
3342
3343 if (texSymbol == nullptr) {
3344 error(loc, "unable to find texture symbol", "", "");
3345 return nullptr;
3346 }
3347
3348 // This forces the texture's shadow state to be the sampler's
3349 // shadow state. This depends on downstream optimization to
3350 // DCE one variant in [shadow, nonshadow] if both are present,
3351 // or the SPIR-V module would be invalid.
3352 long long newId = texSymbol->getId();
3353
3354 // Check to see if this texture has been given a shadow mode already.
3355 // If so, look up the one we already have.
3356 const auto textureShadowEntry = textureShadowVariant.find(texSymbol->getId());
3357
3358 if (textureShadowEntry != textureShadowVariant.end())
3359 newId = textureShadowEntry->second->get(shadowMode);
3360 else
3361 textureShadowVariant[texSymbol->getId()] = NewPoolObject(tShadowTextureSymbols(), 1);
3362
3363 // Sometimes we have to create another symbol (if this texture has been seen before,
3364 // and we haven't created the form for this shadow mode).
3365 if (newId == -1) {
3366 TType texType;
3367 texType.shallowCopy(argTex->getType());
3368 texType.getSampler().shadow = shadowMode; // set appropriate shadow mode.
3369 globalQualifierFix(loc, texType.getQualifier());
3370
3371 TVariable* newTexture = makeInternalVariable(texSymbol->getName(), texType);
3372
3373 trackLinkage(*newTexture);
3374
3375 newId = newTexture->getUniqueId();
3376 }
3377
3378 assert(newId != -1);
3379
3380 if (textureShadowVariant.find(newId) == textureShadowVariant.end())
3381 textureShadowVariant[newId] = textureShadowVariant[texSymbol->getId()];
3382
3383 textureShadowVariant[newId]->set(shadowMode, newId);
3384
3385 // Remember this shadow mode in the texture and the merged type.
3386 argTex->getWritableType().getSampler().shadow = shadowMode;
3387 samplerType.shadow = shadowMode;
3388
3389 texSymbol->switchId(newId);
3390 }
3391
3392 txcombine->setType(TType(samplerType, EvqTemporary));
3393 txcombine->setLoc(loc);
3394
3395 return txcombine;
3396 }
3397
3398 // Return true if this a buffer type that has an associated counter buffer.
hasStructBuffCounter(const TType & type) const3399 bool HlslParseContext::hasStructBuffCounter(const TType& type) const
3400 {
3401 switch (type.getQualifier().declaredBuiltIn) {
3402 case EbvAppendConsume: // fall through...
3403 case EbvRWStructuredBuffer: // ...
3404 return true;
3405 default:
3406 return false; // the other structuredbuffer types do not have a counter.
3407 }
3408 }
3409
counterBufferType(const TSourceLoc & loc,TType & type)3410 void HlslParseContext::counterBufferType(const TSourceLoc& loc, TType& type)
3411 {
3412 // Counter type
3413 TType* counterType = new TType(EbtUint, EvqBuffer);
3414 counterType->setFieldName(intermediate.implicitCounterName);
3415
3416 TTypeList* blockStruct = new TTypeList;
3417 TTypeLoc member = { counterType, loc };
3418 blockStruct->push_back(member);
3419
3420 TType blockType(blockStruct, "", counterType->getQualifier());
3421 blockType.getQualifier().storage = EvqBuffer;
3422
3423 type.shallowCopy(blockType);
3424 shareStructBufferType(type);
3425 }
3426
3427 // declare counter for a structured buffer type
declareStructBufferCounter(const TSourceLoc & loc,const TType & bufferType,const TString & name)3428 void HlslParseContext::declareStructBufferCounter(const TSourceLoc& loc, const TType& bufferType, const TString& name)
3429 {
3430 // Bail out if not a struct buffer
3431 if (! isStructBufferType(bufferType))
3432 return;
3433
3434 if (! hasStructBuffCounter(bufferType))
3435 return;
3436
3437 TType blockType;
3438 counterBufferType(loc, blockType);
3439
3440 TString* blockName = NewPoolTString(intermediate.addCounterBufferName(name).c_str());
3441
3442 // Counter buffer is not yet in use
3443 structBufferCounter[*blockName] = false;
3444
3445 shareStructBufferType(blockType);
3446 declareBlock(loc, blockType, blockName);
3447 }
3448
3449 // return the counter that goes with a given structuredbuffer
getStructBufferCounter(const TSourceLoc & loc,TIntermTyped * buffer)3450 TIntermTyped* HlslParseContext::getStructBufferCounter(const TSourceLoc& loc, TIntermTyped* buffer)
3451 {
3452 // Bail out if not a struct buffer
3453 if (buffer == nullptr || ! isStructBufferType(buffer->getType()))
3454 return nullptr;
3455
3456 const TString counterBlockName(intermediate.addCounterBufferName(buffer->getAsSymbolNode()->getName()));
3457
3458 // Mark the counter as being used
3459 structBufferCounter[counterBlockName] = true;
3460
3461 TIntermTyped* counterVar = handleVariable(loc, &counterBlockName); // find the block structure
3462 TIntermTyped* index = intermediate.addConstantUnion(0, loc); // index to counter inside block struct
3463
3464 TIntermTyped* counterMember = intermediate.addIndex(EOpIndexDirectStruct, counterVar, index, loc);
3465 counterMember->setType(TType(EbtUint));
3466 return counterMember;
3467 }
3468
3469 //
3470 // Decompose structure buffer methods into AST
3471 //
decomposeStructBufferMethods(const TSourceLoc & loc,TIntermTyped * & node,TIntermNode * arguments)3472 void HlslParseContext::decomposeStructBufferMethods(const TSourceLoc& loc, TIntermTyped*& node, TIntermNode* arguments)
3473 {
3474 if (node == nullptr || node->getAsOperator() == nullptr || arguments == nullptr)
3475 return;
3476
3477 const TOperator op = node->getAsOperator()->getOp();
3478 TIntermAggregate* argAggregate = arguments->getAsAggregate();
3479
3480 // Buffer is the object upon which method is called, so always arg 0
3481 TIntermTyped* bufferObj = nullptr;
3482
3483 // The parameters can be an aggregate, or just a the object as a symbol if there are no fn params.
3484 if (argAggregate) {
3485 if (argAggregate->getSequence().empty())
3486 return;
3487 if (argAggregate->getSequence()[0])
3488 bufferObj = argAggregate->getSequence()[0]->getAsTyped();
3489 } else {
3490 bufferObj = arguments->getAsSymbolNode();
3491 }
3492
3493 if (bufferObj == nullptr || bufferObj->getAsSymbolNode() == nullptr)
3494 return;
3495
3496 // Some methods require a hidden internal counter, obtained via getStructBufferCounter().
3497 // This lambda adds something to it and returns the old value.
3498 const auto incDecCounter = [&](int incval) -> TIntermTyped* {
3499 TIntermTyped* incrementValue = intermediate.addConstantUnion(static_cast<unsigned int>(incval), loc, true);
3500 TIntermTyped* counter = getStructBufferCounter(loc, bufferObj); // obtain the counter member
3501
3502 if (counter == nullptr)
3503 return nullptr;
3504
3505 TIntermAggregate* counterIncrement = new TIntermAggregate(EOpAtomicAdd);
3506 counterIncrement->setType(TType(EbtUint, EvqTemporary));
3507 counterIncrement->setLoc(loc);
3508 counterIncrement->getSequence().push_back(counter);
3509 counterIncrement->getSequence().push_back(incrementValue);
3510
3511 return counterIncrement;
3512 };
3513
3514 // Index to obtain the runtime sized array out of the buffer.
3515 TIntermTyped* argArray = indexStructBufferContent(loc, bufferObj);
3516 if (argArray == nullptr)
3517 return; // It might not be a struct buffer method.
3518
3519 switch (op) {
3520 case EOpMethodLoad:
3521 {
3522 TIntermTyped* argIndex = makeIntegerIndex(argAggregate->getSequence()[1]->getAsTyped()); // index
3523
3524 const TType& bufferType = bufferObj->getType();
3525
3526 const TBuiltInVariable builtInType = bufferType.getQualifier().declaredBuiltIn;
3527
3528 // Byte address buffers index in bytes (only multiples of 4 permitted... not so much a byte address
3529 // buffer then, but that's what it calls itself.
3530 const bool isByteAddressBuffer = (builtInType == EbvByteAddressBuffer ||
3531 builtInType == EbvRWByteAddressBuffer);
3532
3533
3534 if (isByteAddressBuffer)
3535 argIndex = intermediate.addBinaryNode(EOpRightShift, argIndex,
3536 intermediate.addConstantUnion(2, loc, true),
3537 loc, TType(EbtInt));
3538
3539 // Index into the array to find the item being loaded.
3540 const TOperator idxOp = (argIndex->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
3541
3542 node = intermediate.addIndex(idxOp, argArray, argIndex, loc);
3543
3544 const TType derefType(argArray->getType(), 0);
3545 node->setType(derefType);
3546 }
3547
3548 break;
3549
3550 case EOpMethodLoad2:
3551 case EOpMethodLoad3:
3552 case EOpMethodLoad4:
3553 {
3554 TIntermTyped* argIndex = makeIntegerIndex(argAggregate->getSequence()[1]->getAsTyped()); // index
3555
3556 TOperator constructOp = EOpNull;
3557 int size = 0;
3558
3559 switch (op) {
3560 case EOpMethodLoad2: size = 2; constructOp = EOpConstructVec2; break;
3561 case EOpMethodLoad3: size = 3; constructOp = EOpConstructVec3; break;
3562 case EOpMethodLoad4: size = 4; constructOp = EOpConstructVec4; break;
3563 default: assert(0);
3564 }
3565
3566 TIntermTyped* body = nullptr;
3567
3568 // First, we'll store the address in a variable to avoid multiple shifts
3569 // (we must convert the byte address to an item address)
3570 TIntermTyped* byteAddrIdx = intermediate.addBinaryNode(EOpRightShift, argIndex,
3571 intermediate.addConstantUnion(2, loc, true),
3572 loc, TType(EbtInt));
3573
3574 TVariable* byteAddrSym = makeInternalVariable("byteAddrTemp", TType(EbtInt, EvqTemporary));
3575 TIntermTyped* byteAddrIdxVar = intermediate.addSymbol(*byteAddrSym, loc);
3576
3577 body = intermediate.growAggregate(body, intermediate.addAssign(EOpAssign, byteAddrIdxVar, byteAddrIdx, loc));
3578
3579 TIntermTyped* vec = nullptr;
3580
3581 // These are only valid on (rw)byteaddressbuffers, so we can always perform the >>2
3582 // address conversion.
3583 for (int idx=0; idx<size; ++idx) {
3584 TIntermTyped* offsetIdx = byteAddrIdxVar;
3585
3586 // add index offset
3587 if (idx != 0)
3588 offsetIdx = intermediate.addBinaryNode(EOpAdd, offsetIdx,
3589 intermediate.addConstantUnion(idx, loc, true),
3590 loc, TType(EbtInt));
3591
3592 const TOperator idxOp = (offsetIdx->getQualifier().storage == EvqConst) ? EOpIndexDirect
3593 : EOpIndexIndirect;
3594
3595 TIntermTyped* indexVal = intermediate.addIndex(idxOp, argArray, offsetIdx, loc);
3596
3597 TType derefType(argArray->getType(), 0);
3598 derefType.getQualifier().makeTemporary();
3599 indexVal->setType(derefType);
3600
3601 vec = intermediate.growAggregate(vec, indexVal);
3602 }
3603
3604 vec->setType(TType(argArray->getBasicType(), EvqTemporary, size));
3605 vec->getAsAggregate()->setOperator(constructOp);
3606
3607 body = intermediate.growAggregate(body, vec);
3608 body->setType(vec->getType());
3609 body->getAsAggregate()->setOperator(EOpSequence);
3610
3611 node = body;
3612 }
3613
3614 break;
3615
3616 case EOpMethodStore:
3617 case EOpMethodStore2:
3618 case EOpMethodStore3:
3619 case EOpMethodStore4:
3620 {
3621 TIntermTyped* argIndex = makeIntegerIndex(argAggregate->getSequence()[1]->getAsTyped()); // index
3622 TIntermTyped* argValue = argAggregate->getSequence()[2]->getAsTyped(); // value
3623
3624 // Index into the array to find the item being loaded.
3625 // Byte address buffers index in bytes (only multiples of 4 permitted... not so much a byte address
3626 // buffer then, but that's what it calls itself).
3627
3628 int size = 0;
3629
3630 switch (op) {
3631 case EOpMethodStore: size = 1; break;
3632 case EOpMethodStore2: size = 2; break;
3633 case EOpMethodStore3: size = 3; break;
3634 case EOpMethodStore4: size = 4; break;
3635 default: assert(0);
3636 }
3637
3638 TIntermAggregate* body = nullptr;
3639
3640 // First, we'll store the address in a variable to avoid multiple shifts
3641 // (we must convert the byte address to an item address)
3642 TIntermTyped* byteAddrIdx = intermediate.addBinaryNode(EOpRightShift, argIndex,
3643 intermediate.addConstantUnion(2, loc, true), loc, TType(EbtInt));
3644
3645 TVariable* byteAddrSym = makeInternalVariable("byteAddrTemp", TType(EbtInt, EvqTemporary));
3646 TIntermTyped* byteAddrIdxVar = intermediate.addSymbol(*byteAddrSym, loc);
3647
3648 body = intermediate.growAggregate(body, intermediate.addAssign(EOpAssign, byteAddrIdxVar, byteAddrIdx, loc));
3649
3650 for (int idx=0; idx<size; ++idx) {
3651 TIntermTyped* offsetIdx = byteAddrIdxVar;
3652 TIntermTyped* idxConst = intermediate.addConstantUnion(idx, loc, true);
3653
3654 // add index offset
3655 if (idx != 0)
3656 offsetIdx = intermediate.addBinaryNode(EOpAdd, offsetIdx, idxConst, loc, TType(EbtInt));
3657
3658 const TOperator idxOp = (offsetIdx->getQualifier().storage == EvqConst) ? EOpIndexDirect
3659 : EOpIndexIndirect;
3660
3661 TIntermTyped* lValue = intermediate.addIndex(idxOp, argArray, offsetIdx, loc);
3662 const TType derefType(argArray->getType(), 0);
3663 lValue->setType(derefType);
3664
3665 TIntermTyped* rValue;
3666 if (size == 1) {
3667 rValue = argValue;
3668 } else {
3669 rValue = intermediate.addIndex(EOpIndexDirect, argValue, idxConst, loc);
3670 const TType indexType(argValue->getType(), 0);
3671 rValue->setType(indexType);
3672 }
3673
3674 TIntermTyped* assign = intermediate.addAssign(EOpAssign, lValue, rValue, loc);
3675
3676 body = intermediate.growAggregate(body, assign);
3677 }
3678
3679 body->setOperator(EOpSequence);
3680 node = body;
3681 }
3682
3683 break;
3684
3685 case EOpMethodGetDimensions:
3686 {
3687 const int numArgs = (int)argAggregate->getSequence().size();
3688 TIntermTyped* argNumItems = argAggregate->getSequence()[1]->getAsTyped(); // out num items
3689 TIntermTyped* argStride = numArgs > 2 ? argAggregate->getSequence()[2]->getAsTyped() : nullptr; // out stride
3690
3691 TIntermAggregate* body = nullptr;
3692
3693 // Length output:
3694 if (argArray->getType().isSizedArray()) {
3695 const int length = argArray->getType().getOuterArraySize();
3696 TIntermTyped* assign = intermediate.addAssign(EOpAssign, argNumItems,
3697 intermediate.addConstantUnion(length, loc, true), loc);
3698 body = intermediate.growAggregate(body, assign, loc);
3699 } else {
3700 TIntermTyped* lengthCall = intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, argArray,
3701 argNumItems->getType());
3702 TIntermTyped* assign = intermediate.addAssign(EOpAssign, argNumItems, lengthCall, loc);
3703 body = intermediate.growAggregate(body, assign, loc);
3704 }
3705
3706 // Stride output:
3707 if (argStride != nullptr) {
3708 int size;
3709 int stride;
3710 intermediate.getMemberAlignment(argArray->getType(), size, stride, argArray->getType().getQualifier().layoutPacking,
3711 argArray->getType().getQualifier().layoutMatrix == ElmRowMajor);
3712
3713 TIntermTyped* assign = intermediate.addAssign(EOpAssign, argStride,
3714 intermediate.addConstantUnion(stride, loc, true), loc);
3715
3716 body = intermediate.growAggregate(body, assign);
3717 }
3718
3719 body->setOperator(EOpSequence);
3720 node = body;
3721 }
3722
3723 break;
3724
3725 case EOpInterlockedAdd:
3726 case EOpInterlockedAnd:
3727 case EOpInterlockedExchange:
3728 case EOpInterlockedMax:
3729 case EOpInterlockedMin:
3730 case EOpInterlockedOr:
3731 case EOpInterlockedXor:
3732 case EOpInterlockedCompareExchange:
3733 case EOpInterlockedCompareStore:
3734 {
3735 // We'll replace the first argument with the block dereference, and let
3736 // downstream decomposition handle the rest.
3737
3738 TIntermSequence& sequence = argAggregate->getSequence();
3739
3740 TIntermTyped* argIndex = makeIntegerIndex(sequence[1]->getAsTyped()); // index
3741 argIndex = intermediate.addBinaryNode(EOpRightShift, argIndex, intermediate.addConstantUnion(2, loc, true),
3742 loc, TType(EbtInt));
3743
3744 const TOperator idxOp = (argIndex->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
3745 TIntermTyped* element = intermediate.addIndex(idxOp, argArray, argIndex, loc);
3746
3747 const TType derefType(argArray->getType(), 0);
3748 element->setType(derefType);
3749
3750 // Replace the numeric byte offset parameter with array reference.
3751 sequence[1] = element;
3752 sequence.erase(sequence.begin(), sequence.begin()+1);
3753 }
3754 break;
3755
3756 case EOpMethodIncrementCounter:
3757 {
3758 node = incDecCounter(1);
3759 break;
3760 }
3761
3762 case EOpMethodDecrementCounter:
3763 {
3764 TIntermTyped* preIncValue = incDecCounter(-1); // result is original value
3765 node = intermediate.addBinaryNode(EOpAdd, preIncValue, intermediate.addConstantUnion(-1, loc, true), loc,
3766 preIncValue->getType());
3767 break;
3768 }
3769
3770 case EOpMethodAppend:
3771 {
3772 TIntermTyped* oldCounter = incDecCounter(1);
3773
3774 TIntermTyped* lValue = intermediate.addIndex(EOpIndexIndirect, argArray, oldCounter, loc);
3775 TIntermTyped* rValue = argAggregate->getSequence()[1]->getAsTyped();
3776
3777 const TType derefType(argArray->getType(), 0);
3778 lValue->setType(derefType);
3779
3780 node = intermediate.addAssign(EOpAssign, lValue, rValue, loc);
3781
3782 break;
3783 }
3784
3785 case EOpMethodConsume:
3786 {
3787 TIntermTyped* oldCounter = incDecCounter(-1);
3788
3789 TIntermTyped* newCounter = intermediate.addBinaryNode(EOpAdd, oldCounter,
3790 intermediate.addConstantUnion(-1, loc, true), loc,
3791 oldCounter->getType());
3792
3793 node = intermediate.addIndex(EOpIndexIndirect, argArray, newCounter, loc);
3794
3795 const TType derefType(argArray->getType(), 0);
3796 node->setType(derefType);
3797
3798 break;
3799 }
3800
3801 default:
3802 break; // most pass through unchanged
3803 }
3804 }
3805
3806 // Create array of standard sample positions for given sample count.
3807 // TODO: remove when a real method to query sample pos exists in SPIR-V.
getSamplePosArray(int count)3808 TIntermConstantUnion* HlslParseContext::getSamplePosArray(int count)
3809 {
3810 struct tSamplePos { float x, y; };
3811
3812 static const tSamplePos pos1[] = {
3813 { 0.0/16.0, 0.0/16.0 },
3814 };
3815
3816 // standard sample positions for 2, 4, 8, and 16 samples.
3817 static const tSamplePos pos2[] = {
3818 { 4.0/16.0, 4.0/16.0 }, {-4.0/16.0, -4.0/16.0 },
3819 };
3820
3821 static const tSamplePos pos4[] = {
3822 {-2.0/16.0, -6.0/16.0 }, { 6.0/16.0, -2.0/16.0 }, {-6.0/16.0, 2.0/16.0 }, { 2.0/16.0, 6.0/16.0 },
3823 };
3824
3825 static const tSamplePos pos8[] = {
3826 { 1.0/16.0, -3.0/16.0 }, {-1.0/16.0, 3.0/16.0 }, { 5.0/16.0, 1.0/16.0 }, {-3.0/16.0, -5.0/16.0 },
3827 {-5.0/16.0, 5.0/16.0 }, {-7.0/16.0, -1.0/16.0 }, { 3.0/16.0, 7.0/16.0 }, { 7.0/16.0, -7.0/16.0 },
3828 };
3829
3830 static const tSamplePos pos16[] = {
3831 { 1.0/16.0, 1.0/16.0 }, {-1.0/16.0, -3.0/16.0 }, {-3.0/16.0, 2.0/16.0 }, { 4.0/16.0, -1.0/16.0 },
3832 {-5.0/16.0, -2.0/16.0 }, { 2.0/16.0, 5.0/16.0 }, { 5.0/16.0, 3.0/16.0 }, { 3.0/16.0, -5.0/16.0 },
3833 {-2.0/16.0, 6.0/16.0 }, { 0.0/16.0, -7.0/16.0 }, {-4.0/16.0, -6.0/16.0 }, {-6.0/16.0, 4.0/16.0 },
3834 {-8.0/16.0, 0.0/16.0 }, { 7.0/16.0, -4.0/16.0 }, { 6.0/16.0, 7.0/16.0 }, {-7.0/16.0, -8.0/16.0 },
3835 };
3836
3837 const tSamplePos* sampleLoc = nullptr;
3838 int numSamples = count;
3839
3840 switch (count) {
3841 case 2: sampleLoc = pos2; break;
3842 case 4: sampleLoc = pos4; break;
3843 case 8: sampleLoc = pos8; break;
3844 case 16: sampleLoc = pos16; break;
3845 default:
3846 sampleLoc = pos1;
3847 numSamples = 1;
3848 }
3849
3850 TConstUnionArray* values = new TConstUnionArray(numSamples*2);
3851
3852 for (int pos=0; pos<count; ++pos) {
3853 TConstUnion x, y;
3854 x.setDConst(sampleLoc[pos].x);
3855 y.setDConst(sampleLoc[pos].y);
3856
3857 (*values)[pos*2+0] = x;
3858 (*values)[pos*2+1] = y;
3859 }
3860
3861 TType retType(EbtFloat, EvqConst, 2);
3862
3863 if (numSamples != 1) {
3864 TArraySizes* arraySizes = new TArraySizes;
3865 arraySizes->addInnerSize(numSamples);
3866 retType.transferArraySizes(arraySizes);
3867 }
3868
3869 return new TIntermConstantUnion(*values, retType);
3870 }
3871
3872 //
3873 // Decompose DX9 and DX10 sample intrinsics & object methods into AST
3874 //
decomposeSampleMethods(const TSourceLoc & loc,TIntermTyped * & node,TIntermNode * arguments)3875 void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermTyped*& node, TIntermNode* arguments)
3876 {
3877 if (node == nullptr || !node->getAsOperator())
3878 return;
3879
3880 // Sampler return must always be a vec4, but we can construct a shorter vector or a structure from it.
3881 const auto convertReturn = [&loc, &node, this](TIntermTyped* result, const TSampler& sampler) -> TIntermTyped* {
3882 result->setType(TType(node->getType().getBasicType(), EvqTemporary, node->getVectorSize()));
3883
3884 TIntermTyped* convertedResult = nullptr;
3885
3886 TType retType;
3887 getTextureReturnType(sampler, retType);
3888
3889 if (retType.isStruct()) {
3890 // For type convenience, conversionAggregate points to the convertedResult (we know it's an aggregate here)
3891 TIntermAggregate* conversionAggregate = new TIntermAggregate;
3892 convertedResult = conversionAggregate;
3893
3894 // Convert vector output to return structure. We will need a temp symbol to copy the results to.
3895 TVariable* structVar = makeInternalVariable("@sampleStructTemp", retType);
3896
3897 // We also need a temp symbol to hold the result of the texture. We don't want to re-fetch the
3898 // sample each time we'll index into the result, so we'll copy to this, and index into the copy.
3899 TVariable* sampleShadow = makeInternalVariable("@sampleResultShadow", result->getType());
3900
3901 // Initial copy from texture to our sample result shadow.
3902 TIntermTyped* shadowCopy = intermediate.addAssign(EOpAssign, intermediate.addSymbol(*sampleShadow, loc),
3903 result, loc);
3904
3905 conversionAggregate->getSequence().push_back(shadowCopy);
3906
3907 unsigned vec4Pos = 0;
3908
3909 for (unsigned m = 0; m < unsigned(retType.getStruct()->size()); ++m) {
3910 const TType memberType(retType, m); // dereferenced type of the member we're about to assign.
3911
3912 // Check for bad struct members. This should have been caught upstream. Complain, because
3913 // wwe don't know what to do with it. This algorithm could be generalized to handle
3914 // other things, e.g, sub-structures, but HLSL doesn't allow them.
3915 if (!memberType.isVector() && !memberType.isScalar()) {
3916 error(loc, "expected: scalar or vector type in texture structure", "", "");
3917 return nullptr;
3918 }
3919
3920 // Index into the struct variable to find the member to assign.
3921 TIntermTyped* structMember = intermediate.addIndex(EOpIndexDirectStruct,
3922 intermediate.addSymbol(*structVar, loc),
3923 intermediate.addConstantUnion(m, loc), loc);
3924
3925 structMember->setType(memberType);
3926
3927 // Assign each component of (possible) vector in struct member.
3928 for (int component = 0; component < memberType.getVectorSize(); ++component) {
3929 TIntermTyped* vec4Member = intermediate.addIndex(EOpIndexDirect,
3930 intermediate.addSymbol(*sampleShadow, loc),
3931 intermediate.addConstantUnion(vec4Pos++, loc), loc);
3932 vec4Member->setType(TType(memberType.getBasicType(), EvqTemporary, 1));
3933
3934 TIntermTyped* memberAssign = nullptr;
3935
3936 if (memberType.isVector()) {
3937 // Vector member: we need to create an access chain to the vector component.
3938
3939 TIntermTyped* structVecComponent = intermediate.addIndex(EOpIndexDirect, structMember,
3940 intermediate.addConstantUnion(component, loc), loc);
3941
3942 memberAssign = intermediate.addAssign(EOpAssign, structVecComponent, vec4Member, loc);
3943 } else {
3944 // Scalar member: we can assign to it directly.
3945 memberAssign = intermediate.addAssign(EOpAssign, structMember, vec4Member, loc);
3946 }
3947
3948
3949 conversionAggregate->getSequence().push_back(memberAssign);
3950 }
3951 }
3952
3953 // Add completed variable so the expression results in the whole struct value we just built.
3954 conversionAggregate->getSequence().push_back(intermediate.addSymbol(*structVar, loc));
3955
3956 // Make it a sequence.
3957 intermediate.setAggregateOperator(conversionAggregate, EOpSequence, retType, loc);
3958 } else {
3959 // vector clamp the output if template vector type is smaller than sample result.
3960 if (retType.getVectorSize() < node->getVectorSize()) {
3961 // Too many components. Construct shorter vector from it.
3962 const TOperator op = intermediate.mapTypeToConstructorOp(retType);
3963
3964 convertedResult = constructBuiltIn(retType, op, result, loc, false);
3965 } else {
3966 // Enough components. Use directly.
3967 convertedResult = result;
3968 }
3969 }
3970
3971 convertedResult->setLoc(loc);
3972 return convertedResult;
3973 };
3974
3975 const TOperator op = node->getAsOperator()->getOp();
3976 const TIntermAggregate* argAggregate = arguments ? arguments->getAsAggregate() : nullptr;
3977
3978 // Bail out if not a sampler method.
3979 // Note though this is odd to do before checking the op, because the op
3980 // could be something that takes the arguments, and the function in question
3981 // takes the result of the op. So, this is not the final word.
3982 if (arguments != nullptr) {
3983 if (argAggregate == nullptr) {
3984 if (arguments->getAsTyped()->getBasicType() != EbtSampler)
3985 return;
3986 } else {
3987 if (argAggregate->getSequence().size() == 0 ||
3988 argAggregate->getSequence()[0] == nullptr ||
3989 argAggregate->getSequence()[0]->getAsTyped()->getBasicType() != EbtSampler)
3990 return;
3991 }
3992 }
3993
3994 switch (op) {
3995 // **** DX9 intrinsics: ****
3996 case EOpTexture:
3997 {
3998 // Texture with ddx & ddy is really gradient form in HLSL
3999 if (argAggregate->getSequence().size() == 4)
4000 node->getAsAggregate()->setOperator(EOpTextureGrad);
4001
4002 break;
4003 }
4004 case EOpTextureLod: //is almost EOpTextureBias (only args & operations are different)
4005 {
4006 TIntermTyped *argSamp = argAggregate->getSequence()[0]->getAsTyped(); // sampler
4007 TIntermTyped *argCoord = argAggregate->getSequence()[1]->getAsTyped(); // coord
4008
4009 assert(argCoord->getVectorSize() == 4);
4010 TIntermTyped *w = intermediate.addConstantUnion(3, loc, true);
4011 TIntermTyped *argLod = intermediate.addIndex(EOpIndexDirect, argCoord, w, loc);
4012
4013 TOperator constructOp = EOpNull;
4014 const TSampler &sampler = argSamp->getType().getSampler();
4015 int coordSize = 0;
4016
4017 switch (sampler.dim)
4018 {
4019 case Esd1D: constructOp = EOpConstructFloat; coordSize = 1; break; // 1D
4020 case Esd2D: constructOp = EOpConstructVec2; coordSize = 2; break; // 2D
4021 case Esd3D: constructOp = EOpConstructVec3; coordSize = 3; break; // 3D
4022 case EsdCube: constructOp = EOpConstructVec3; coordSize = 3; break; // also 3D
4023 default:
4024 error(loc, "unhandled DX9 texture LoD dimension", "", "");
4025 break;
4026 }
4027
4028 TIntermAggregate *constructCoord = new TIntermAggregate(constructOp);
4029 constructCoord->getSequence().push_back(argCoord);
4030 constructCoord->setLoc(loc);
4031 constructCoord->setType(TType(argCoord->getBasicType(), EvqTemporary, coordSize));
4032
4033 TIntermAggregate *tex = new TIntermAggregate(EOpTextureLod);
4034 tex->getSequence().push_back(argSamp); // sampler
4035 tex->getSequence().push_back(constructCoord); // coordinate
4036 tex->getSequence().push_back(argLod); // lod
4037
4038 node = convertReturn(tex, sampler);
4039
4040 break;
4041 }
4042
4043 case EOpTextureBias:
4044 {
4045 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped(); // sampler
4046 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped(); // coord
4047
4048 // HLSL puts bias in W component of coordinate. We extract it and add it to
4049 // the argument list, instead
4050 TIntermTyped* w = intermediate.addConstantUnion(3, loc, true);
4051 TIntermTyped* bias = intermediate.addIndex(EOpIndexDirect, arg1, w, loc);
4052
4053 TOperator constructOp = EOpNull;
4054 const TSampler& sampler = arg0->getType().getSampler();
4055
4056 switch (sampler.dim) {
4057 case Esd1D: constructOp = EOpConstructFloat; break; // 1D
4058 case Esd2D: constructOp = EOpConstructVec2; break; // 2D
4059 case Esd3D: constructOp = EOpConstructVec3; break; // 3D
4060 case EsdCube: constructOp = EOpConstructVec3; break; // also 3D
4061 default:
4062 error(loc, "unhandled DX9 texture bias dimension", "", "");
4063 break;
4064 }
4065
4066 TIntermAggregate* constructCoord = new TIntermAggregate(constructOp);
4067 constructCoord->getSequence().push_back(arg1);
4068 constructCoord->setLoc(loc);
4069
4070 // The input vector should never be less than 2, since there's always a bias.
4071 // The max is for safety, and should be a no-op.
4072 constructCoord->setType(TType(arg1->getBasicType(), EvqTemporary, std::max(arg1->getVectorSize() - 1, 0)));
4073
4074 TIntermAggregate* tex = new TIntermAggregate(EOpTexture);
4075 tex->getSequence().push_back(arg0); // sampler
4076 tex->getSequence().push_back(constructCoord); // coordinate
4077 tex->getSequence().push_back(bias); // bias
4078
4079 node = convertReturn(tex, sampler);
4080
4081 break;
4082 }
4083
4084 // **** DX10 methods: ****
4085 case EOpMethodSample: // fall through
4086 case EOpMethodSampleBias: // ...
4087 {
4088 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4089 TIntermTyped* argSamp = argAggregate->getSequence()[1]->getAsTyped();
4090 TIntermTyped* argCoord = argAggregate->getSequence()[2]->getAsTyped();
4091 TIntermTyped* argBias = nullptr;
4092 TIntermTyped* argOffset = nullptr;
4093 const TSampler& sampler = argTex->getType().getSampler();
4094
4095 int nextArg = 3;
4096
4097 if (op == EOpMethodSampleBias) // SampleBias has a bias arg
4098 argBias = argAggregate->getSequence()[nextArg++]->getAsTyped();
4099
4100 TOperator textureOp = EOpTexture;
4101
4102 if ((int)argAggregate->getSequence().size() == (nextArg+1)) { // last parameter is offset form
4103 textureOp = EOpTextureOffset;
4104 argOffset = argAggregate->getSequence()[nextArg++]->getAsTyped();
4105 }
4106
4107 TIntermAggregate* txcombine = handleSamplerTextureCombine(loc, argTex, argSamp);
4108
4109 TIntermAggregate* txsample = new TIntermAggregate(textureOp);
4110 txsample->getSequence().push_back(txcombine);
4111 txsample->getSequence().push_back(argCoord);
4112
4113 if (argOffset != nullptr)
4114 txsample->getSequence().push_back(argOffset);
4115
4116 if (argBias != nullptr)
4117 txsample->getSequence().push_back(argBias);
4118
4119 node = convertReturn(txsample, sampler);
4120
4121 break;
4122 }
4123
4124 case EOpMethodSampleGrad: // ...
4125 {
4126 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4127 TIntermTyped* argSamp = argAggregate->getSequence()[1]->getAsTyped();
4128 TIntermTyped* argCoord = argAggregate->getSequence()[2]->getAsTyped();
4129 TIntermTyped* argDDX = argAggregate->getSequence()[3]->getAsTyped();
4130 TIntermTyped* argDDY = argAggregate->getSequence()[4]->getAsTyped();
4131 TIntermTyped* argOffset = nullptr;
4132 const TSampler& sampler = argTex->getType().getSampler();
4133
4134 TOperator textureOp = EOpTextureGrad;
4135
4136 if (argAggregate->getSequence().size() == 6) { // last parameter is offset form
4137 textureOp = EOpTextureGradOffset;
4138 argOffset = argAggregate->getSequence()[5]->getAsTyped();
4139 }
4140
4141 TIntermAggregate* txcombine = handleSamplerTextureCombine(loc, argTex, argSamp);
4142
4143 TIntermAggregate* txsample = new TIntermAggregate(textureOp);
4144 txsample->getSequence().push_back(txcombine);
4145 txsample->getSequence().push_back(argCoord);
4146 txsample->getSequence().push_back(argDDX);
4147 txsample->getSequence().push_back(argDDY);
4148
4149 if (argOffset != nullptr)
4150 txsample->getSequence().push_back(argOffset);
4151
4152 node = convertReturn(txsample, sampler);
4153
4154 break;
4155 }
4156
4157 case EOpMethodGetDimensions:
4158 {
4159 // AST returns a vector of results, which we break apart component-wise into
4160 // separate values to assign to the HLSL method's outputs, ala:
4161 // tx . GetDimensions(width, height);
4162 // float2 sizeQueryTemp = EOpTextureQuerySize
4163 // width = sizeQueryTemp.X;
4164 // height = sizeQueryTemp.Y;
4165
4166 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4167 const TType& texType = argTex->getType();
4168
4169 assert(texType.getBasicType() == EbtSampler);
4170
4171 const TSampler& sampler = texType.getSampler();
4172 const TSamplerDim dim = sampler.dim;
4173 const bool isImage = sampler.isImage();
4174 const bool isMs = sampler.isMultiSample();
4175 const int numArgs = (int)argAggregate->getSequence().size();
4176
4177 int numDims = 0;
4178
4179 switch (dim) {
4180 case Esd1D: numDims = 1; break; // W
4181 case Esd2D: numDims = 2; break; // W, H
4182 case Esd3D: numDims = 3; break; // W, H, D
4183 case EsdCube: numDims = 2; break; // W, H (cube)
4184 case EsdBuffer: numDims = 1; break; // W (buffers)
4185 case EsdRect: numDims = 2; break; // W, H (rect)
4186 default:
4187 error(loc, "unhandled DX10 MethodGet dimension", "", "");
4188 break;
4189 }
4190
4191 // Arrayed adds another dimension for the number of array elements
4192 if (sampler.isArrayed())
4193 ++numDims;
4194
4195 // Establish whether the method itself is querying mip levels. This can be false even
4196 // if the underlying query requires a MIP level, due to the available HLSL method overloads.
4197 const bool mipQuery = (numArgs > (numDims + 1 + (isMs ? 1 : 0)));
4198
4199 // Establish whether we must use the LOD form of query (even if the method did not supply a mip level to query).
4200 // True if:
4201 // 1. 1D/2D/3D/Cube AND multisample==0 AND NOT image (those can be sent to the non-LOD query)
4202 // or,
4203 // 2. There is a LOD (because the non-LOD query cannot be used in that case, per spec)
4204 const bool mipRequired =
4205 ((dim == Esd1D || dim == Esd2D || dim == Esd3D || dim == EsdCube) && !isMs && !isImage) || // 1...
4206 mipQuery; // 2...
4207
4208 // AST assumes integer return. Will be converted to float if required.
4209 TIntermAggregate* sizeQuery = new TIntermAggregate(isImage ? EOpImageQuerySize : EOpTextureQuerySize);
4210 sizeQuery->getSequence().push_back(argTex);
4211
4212 // If we're building an LOD query, add the LOD.
4213 if (mipRequired) {
4214 // If the base HLSL query had no MIP level given, use level 0.
4215 TIntermTyped* queryLod = mipQuery ? argAggregate->getSequence()[1]->getAsTyped() :
4216 intermediate.addConstantUnion(0, loc, true);
4217 sizeQuery->getSequence().push_back(queryLod);
4218 }
4219
4220 sizeQuery->setType(TType(EbtUint, EvqTemporary, numDims));
4221 sizeQuery->setLoc(loc);
4222
4223 // Return value from size query
4224 TVariable* tempArg = makeInternalVariable("sizeQueryTemp", sizeQuery->getType());
4225 tempArg->getWritableType().getQualifier().makeTemporary();
4226 TIntermTyped* sizeQueryAssign = intermediate.addAssign(EOpAssign,
4227 intermediate.addSymbol(*tempArg, loc),
4228 sizeQuery, loc);
4229
4230 // Compound statement for assigning outputs
4231 TIntermAggregate* compoundStatement = intermediate.makeAggregate(sizeQueryAssign, loc);
4232 // Index of first output parameter
4233 const int outParamBase = mipQuery ? 2 : 1;
4234
4235 for (int compNum = 0; compNum < numDims; ++compNum) {
4236 TIntermTyped* indexedOut = nullptr;
4237 TIntermSymbol* sizeQueryReturn = intermediate.addSymbol(*tempArg, loc);
4238
4239 if (numDims > 1) {
4240 TIntermTyped* component = intermediate.addConstantUnion(compNum, loc, true);
4241 indexedOut = intermediate.addIndex(EOpIndexDirect, sizeQueryReturn, component, loc);
4242 indexedOut->setType(TType(EbtUint, EvqTemporary, 1));
4243 indexedOut->setLoc(loc);
4244 } else {
4245 indexedOut = sizeQueryReturn;
4246 }
4247
4248 TIntermTyped* outParam = argAggregate->getSequence()[outParamBase + compNum]->getAsTyped();
4249 TIntermTyped* compAssign = intermediate.addAssign(EOpAssign, outParam, indexedOut, loc);
4250
4251 compoundStatement = intermediate.growAggregate(compoundStatement, compAssign);
4252 }
4253
4254 // handle mip level parameter
4255 if (mipQuery) {
4256 TIntermTyped* outParam = argAggregate->getSequence()[outParamBase + numDims]->getAsTyped();
4257
4258 TIntermAggregate* levelsQuery = new TIntermAggregate(EOpTextureQueryLevels);
4259 levelsQuery->getSequence().push_back(argTex);
4260 levelsQuery->setType(TType(EbtUint, EvqTemporary, 1));
4261 levelsQuery->setLoc(loc);
4262
4263 TIntermTyped* compAssign = intermediate.addAssign(EOpAssign, outParam, levelsQuery, loc);
4264 compoundStatement = intermediate.growAggregate(compoundStatement, compAssign);
4265 }
4266
4267 // 2DMS formats query # samples, which needs a different query op
4268 if (sampler.isMultiSample()) {
4269 TIntermTyped* outParam = argAggregate->getSequence()[outParamBase + numDims]->getAsTyped();
4270
4271 TIntermAggregate* samplesQuery = new TIntermAggregate(EOpImageQuerySamples);
4272 samplesQuery->getSequence().push_back(argTex);
4273 samplesQuery->setType(TType(EbtUint, EvqTemporary, 1));
4274 samplesQuery->setLoc(loc);
4275
4276 TIntermTyped* compAssign = intermediate.addAssign(EOpAssign, outParam, samplesQuery, loc);
4277 compoundStatement = intermediate.growAggregate(compoundStatement, compAssign);
4278 }
4279
4280 compoundStatement->setOperator(EOpSequence);
4281 compoundStatement->setLoc(loc);
4282 compoundStatement->setType(TType(EbtVoid));
4283
4284 node = compoundStatement;
4285
4286 break;
4287 }
4288
4289 case EOpMethodSampleCmp: // fall through...
4290 case EOpMethodSampleCmpLevelZero:
4291 {
4292 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4293 TIntermTyped* argSamp = argAggregate->getSequence()[1]->getAsTyped();
4294 TIntermTyped* argCoord = argAggregate->getSequence()[2]->getAsTyped();
4295 TIntermTyped* argCmpVal = argAggregate->getSequence()[3]->getAsTyped();
4296 TIntermTyped* argOffset = nullptr;
4297
4298 // Sampler argument should be a sampler.
4299 if (argSamp->getType().getBasicType() != EbtSampler) {
4300 error(loc, "expected: sampler type", "", "");
4301 return;
4302 }
4303
4304 // Sampler should be a SamplerComparisonState
4305 if (! argSamp->getType().getSampler().isShadow()) {
4306 error(loc, "expected: SamplerComparisonState", "", "");
4307 return;
4308 }
4309
4310 // optional offset value
4311 if (argAggregate->getSequence().size() > 4)
4312 argOffset = argAggregate->getSequence()[4]->getAsTyped();
4313
4314 const int coordDimWithCmpVal = argCoord->getType().getVectorSize() + 1; // +1 for cmp
4315
4316 // AST wants comparison value as one of the texture coordinates
4317 TOperator constructOp = EOpNull;
4318 switch (coordDimWithCmpVal) {
4319 // 1D can't happen: there's always at least 1 coordinate dimension + 1 cmp val
4320 case 2: constructOp = EOpConstructVec2; break;
4321 case 3: constructOp = EOpConstructVec3; break;
4322 case 4: constructOp = EOpConstructVec4; break;
4323 case 5: constructOp = EOpConstructVec4; break; // cubeArrayShadow, cmp value is separate arg.
4324 default:
4325 error(loc, "unhandled DX10 MethodSample dimension", "", "");
4326 break;
4327 }
4328
4329 TIntermAggregate* coordWithCmp = new TIntermAggregate(constructOp);
4330 coordWithCmp->getSequence().push_back(argCoord);
4331 if (coordDimWithCmpVal != 5) // cube array shadow is special.
4332 coordWithCmp->getSequence().push_back(argCmpVal);
4333 coordWithCmp->setLoc(loc);
4334 coordWithCmp->setType(TType(argCoord->getBasicType(), EvqTemporary, std::min(coordDimWithCmpVal, 4)));
4335
4336 TOperator textureOp = (op == EOpMethodSampleCmpLevelZero ? EOpTextureLod : EOpTexture);
4337 if (argOffset != nullptr)
4338 textureOp = (op == EOpMethodSampleCmpLevelZero ? EOpTextureLodOffset : EOpTextureOffset);
4339
4340 // Create combined sampler & texture op
4341 TIntermAggregate* txcombine = handleSamplerTextureCombine(loc, argTex, argSamp);
4342 TIntermAggregate* txsample = new TIntermAggregate(textureOp);
4343 txsample->getSequence().push_back(txcombine);
4344 txsample->getSequence().push_back(coordWithCmp);
4345
4346 if (coordDimWithCmpVal == 5) // cube array shadow is special: cmp val follows coord.
4347 txsample->getSequence().push_back(argCmpVal);
4348
4349 // the LevelZero form uses 0 as an explicit LOD
4350 if (op == EOpMethodSampleCmpLevelZero)
4351 txsample->getSequence().push_back(intermediate.addConstantUnion(0.0, EbtFloat, loc, true));
4352
4353 // Add offset if present
4354 if (argOffset != nullptr)
4355 txsample->getSequence().push_back(argOffset);
4356
4357 txsample->setType(node->getType());
4358 txsample->setLoc(loc);
4359 node = txsample;
4360
4361 break;
4362 }
4363
4364 case EOpMethodLoad:
4365 {
4366 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4367 TIntermTyped* argCoord = argAggregate->getSequence()[1]->getAsTyped();
4368 TIntermTyped* argOffset = nullptr;
4369 TIntermTyped* lodComponent = nullptr;
4370 TIntermTyped* coordSwizzle = nullptr;
4371
4372 const TSampler& sampler = argTex->getType().getSampler();
4373 const bool isMS = sampler.isMultiSample();
4374 const bool isBuffer = sampler.dim == EsdBuffer;
4375 const bool isImage = sampler.isImage();
4376 const TBasicType coordBaseType = argCoord->getType().getBasicType();
4377
4378 // Last component of coordinate is the mip level, for non-MS. we separate them here:
4379 if (isMS || isBuffer || isImage) {
4380 // MS, Buffer, and Image have no LOD
4381 coordSwizzle = argCoord;
4382 } else {
4383 // Extract coordinate
4384 int swizzleSize = argCoord->getType().getVectorSize() - (isMS ? 0 : 1);
4385 TSwizzleSelectors<TVectorSelector> coordFields;
4386 for (int i = 0; i < swizzleSize; ++i)
4387 coordFields.push_back(i);
4388 TIntermTyped* coordIdx = intermediate.addSwizzle(coordFields, loc);
4389 coordSwizzle = intermediate.addIndex(EOpVectorSwizzle, argCoord, coordIdx, loc);
4390 coordSwizzle->setType(TType(coordBaseType, EvqTemporary, coordFields.size()));
4391
4392 // Extract LOD
4393 TIntermTyped* lodIdx = intermediate.addConstantUnion(coordFields.size(), loc, true);
4394 lodComponent = intermediate.addIndex(EOpIndexDirect, argCoord, lodIdx, loc);
4395 lodComponent->setType(TType(coordBaseType, EvqTemporary, 1));
4396 }
4397
4398 const int numArgs = (int)argAggregate->getSequence().size();
4399 const bool hasOffset = ((!isMS && numArgs == 3) || (isMS && numArgs == 4));
4400
4401 // Create texel fetch
4402 const TOperator fetchOp = (isImage ? EOpImageLoad :
4403 hasOffset ? EOpTextureFetchOffset :
4404 EOpTextureFetch);
4405 TIntermAggregate* txfetch = new TIntermAggregate(fetchOp);
4406
4407 // Build up the fetch
4408 txfetch->getSequence().push_back(argTex);
4409 txfetch->getSequence().push_back(coordSwizzle);
4410
4411 if (isMS) {
4412 // add 2DMS sample index
4413 TIntermTyped* argSampleIdx = argAggregate->getSequence()[2]->getAsTyped();
4414 txfetch->getSequence().push_back(argSampleIdx);
4415 } else if (isBuffer) {
4416 // Nothing else to do for buffers.
4417 } else if (isImage) {
4418 // Nothing else to do for images.
4419 } else {
4420 // 2DMS and buffer have no LOD, but everything else does.
4421 txfetch->getSequence().push_back(lodComponent);
4422 }
4423
4424 // Obtain offset arg, if there is one.
4425 if (hasOffset) {
4426 const int offsetPos = (isMS ? 3 : 2);
4427 argOffset = argAggregate->getSequence()[offsetPos]->getAsTyped();
4428 txfetch->getSequence().push_back(argOffset);
4429 }
4430
4431 node = convertReturn(txfetch, sampler);
4432
4433 break;
4434 }
4435
4436 case EOpMethodSampleLevel:
4437 {
4438 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4439 TIntermTyped* argSamp = argAggregate->getSequence()[1]->getAsTyped();
4440 TIntermTyped* argCoord = argAggregate->getSequence()[2]->getAsTyped();
4441 TIntermTyped* argLod = argAggregate->getSequence()[3]->getAsTyped();
4442 TIntermTyped* argOffset = nullptr;
4443 const TSampler& sampler = argTex->getType().getSampler();
4444
4445 const int numArgs = (int)argAggregate->getSequence().size();
4446
4447 if (numArgs == 5) // offset, if present
4448 argOffset = argAggregate->getSequence()[4]->getAsTyped();
4449
4450 const TOperator textureOp = (argOffset == nullptr ? EOpTextureLod : EOpTextureLodOffset);
4451 TIntermAggregate* txsample = new TIntermAggregate(textureOp);
4452
4453 TIntermAggregate* txcombine = handleSamplerTextureCombine(loc, argTex, argSamp);
4454
4455 txsample->getSequence().push_back(txcombine);
4456 txsample->getSequence().push_back(argCoord);
4457 txsample->getSequence().push_back(argLod);
4458
4459 if (argOffset != nullptr)
4460 txsample->getSequence().push_back(argOffset);
4461
4462 node = convertReturn(txsample, sampler);
4463
4464 break;
4465 }
4466
4467 case EOpMethodGather:
4468 {
4469 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4470 TIntermTyped* argSamp = argAggregate->getSequence()[1]->getAsTyped();
4471 TIntermTyped* argCoord = argAggregate->getSequence()[2]->getAsTyped();
4472 TIntermTyped* argOffset = nullptr;
4473
4474 // Offset is optional
4475 if (argAggregate->getSequence().size() > 3)
4476 argOffset = argAggregate->getSequence()[3]->getAsTyped();
4477
4478 const TOperator textureOp = (argOffset == nullptr ? EOpTextureGather : EOpTextureGatherOffset);
4479 TIntermAggregate* txgather = new TIntermAggregate(textureOp);
4480
4481 TIntermAggregate* txcombine = handleSamplerTextureCombine(loc, argTex, argSamp);
4482
4483 txgather->getSequence().push_back(txcombine);
4484 txgather->getSequence().push_back(argCoord);
4485 // Offset if not given is implicitly channel 0 (red)
4486
4487 if (argOffset != nullptr)
4488 txgather->getSequence().push_back(argOffset);
4489
4490 txgather->setType(node->getType());
4491 txgather->setLoc(loc);
4492 node = txgather;
4493
4494 break;
4495 }
4496
4497 case EOpMethodGatherRed: // fall through...
4498 case EOpMethodGatherGreen: // ...
4499 case EOpMethodGatherBlue: // ...
4500 case EOpMethodGatherAlpha: // ...
4501 case EOpMethodGatherCmpRed: // ...
4502 case EOpMethodGatherCmpGreen: // ...
4503 case EOpMethodGatherCmpBlue: // ...
4504 case EOpMethodGatherCmpAlpha: // ...
4505 {
4506 int channel = 0; // the channel we are gathering
4507 int cmpValues = 0; // 1 if there is a compare value (handier than a bool below)
4508
4509 switch (op) {
4510 case EOpMethodGatherCmpRed: cmpValues = 1; // fall through
4511 case EOpMethodGatherRed: channel = 0; break;
4512 case EOpMethodGatherCmpGreen: cmpValues = 1; // fall through
4513 case EOpMethodGatherGreen: channel = 1; break;
4514 case EOpMethodGatherCmpBlue: cmpValues = 1; // fall through
4515 case EOpMethodGatherBlue: channel = 2; break;
4516 case EOpMethodGatherCmpAlpha: cmpValues = 1; // fall through
4517 case EOpMethodGatherAlpha: channel = 3; break;
4518 default: assert(0); break;
4519 }
4520
4521 // For now, we have nothing to map the component-wise comparison forms
4522 // to, because neither GLSL nor SPIR-V has such an opcode. Issue an
4523 // unimplemented error instead. Most of the machinery is here if that
4524 // should ever become available. However, red can be passed through
4525 // to OpImageDrefGather. G/B/A cannot, because that opcode does not
4526 // accept a component.
4527 if (cmpValues != 0 && op != EOpMethodGatherCmpRed) {
4528 error(loc, "unimplemented: component-level gather compare", "", "");
4529 return;
4530 }
4531
4532 int arg = 0;
4533
4534 TIntermTyped* argTex = argAggregate->getSequence()[arg++]->getAsTyped();
4535 TIntermTyped* argSamp = argAggregate->getSequence()[arg++]->getAsTyped();
4536 TIntermTyped* argCoord = argAggregate->getSequence()[arg++]->getAsTyped();
4537 TIntermTyped* argOffset = nullptr;
4538 TIntermTyped* argOffsets[4] = { nullptr, nullptr, nullptr, nullptr };
4539 // TIntermTyped* argStatus = nullptr; // TODO: residency
4540 TIntermTyped* argCmp = nullptr;
4541
4542 const TSamplerDim dim = argTex->getType().getSampler().dim;
4543
4544 const int argSize = (int)argAggregate->getSequence().size();
4545 bool hasStatus = (argSize == (5+cmpValues) || argSize == (8+cmpValues));
4546 bool hasOffset1 = false;
4547 bool hasOffset4 = false;
4548
4549 // Sampler argument should be a sampler.
4550 if (argSamp->getType().getBasicType() != EbtSampler) {
4551 error(loc, "expected: sampler type", "", "");
4552 return;
4553 }
4554
4555 // Cmp forms require SamplerComparisonState
4556 if (cmpValues > 0 && ! argSamp->getType().getSampler().isShadow()) {
4557 error(loc, "expected: SamplerComparisonState", "", "");
4558 return;
4559 }
4560
4561 // Only 2D forms can have offsets. Discover if we have 0, 1 or 4 offsets.
4562 if (dim == Esd2D) {
4563 hasOffset1 = (argSize == (4+cmpValues) || argSize == (5+cmpValues));
4564 hasOffset4 = (argSize == (7+cmpValues) || argSize == (8+cmpValues));
4565 }
4566
4567 assert(!(hasOffset1 && hasOffset4));
4568
4569 TOperator textureOp = EOpTextureGather;
4570
4571 // Compare forms have compare value
4572 if (cmpValues != 0)
4573 argCmp = argOffset = argAggregate->getSequence()[arg++]->getAsTyped();
4574
4575 // Some forms have single offset
4576 if (hasOffset1) {
4577 textureOp = EOpTextureGatherOffset; // single offset form
4578 argOffset = argAggregate->getSequence()[arg++]->getAsTyped();
4579 }
4580
4581 // Some forms have 4 gather offsets
4582 if (hasOffset4) {
4583 textureOp = EOpTextureGatherOffsets; // note plural, for 4 offset form
4584 for (int offsetNum = 0; offsetNum < 4; ++offsetNum)
4585 argOffsets[offsetNum] = argAggregate->getSequence()[arg++]->getAsTyped();
4586 }
4587
4588 // Residency status
4589 if (hasStatus) {
4590 // argStatus = argAggregate->getSequence()[arg++]->getAsTyped();
4591 error(loc, "unimplemented: residency status", "", "");
4592 return;
4593 }
4594
4595 TIntermAggregate* txgather = new TIntermAggregate(textureOp);
4596 TIntermAggregate* txcombine = handleSamplerTextureCombine(loc, argTex, argSamp);
4597
4598 TIntermTyped* argChannel = intermediate.addConstantUnion(channel, loc, true);
4599
4600 txgather->getSequence().push_back(txcombine);
4601 txgather->getSequence().push_back(argCoord);
4602
4603 // AST wants an array of 4 offsets, where HLSL has separate args. Here
4604 // we construct an array from the separate args.
4605 if (hasOffset4) {
4606 TType arrayType(EbtInt, EvqTemporary, 2);
4607 TArraySizes* arraySizes = new TArraySizes;
4608 arraySizes->addInnerSize(4);
4609 arrayType.transferArraySizes(arraySizes);
4610
4611 TIntermAggregate* initList = new TIntermAggregate(EOpNull);
4612
4613 for (int offsetNum = 0; offsetNum < 4; ++offsetNum)
4614 initList->getSequence().push_back(argOffsets[offsetNum]);
4615
4616 argOffset = addConstructor(loc, initList, arrayType);
4617 }
4618
4619 // Add comparison value if we have one
4620 if (argCmp != nullptr)
4621 txgather->getSequence().push_back(argCmp);
4622
4623 // Add offset (either 1, or an array of 4) if we have one
4624 if (argOffset != nullptr)
4625 txgather->getSequence().push_back(argOffset);
4626
4627 // Add channel value if the sampler is not shadow
4628 if (! argSamp->getType().getSampler().isShadow())
4629 txgather->getSequence().push_back(argChannel);
4630
4631 txgather->setType(node->getType());
4632 txgather->setLoc(loc);
4633 node = txgather;
4634
4635 break;
4636 }
4637
4638 case EOpMethodCalculateLevelOfDetail:
4639 case EOpMethodCalculateLevelOfDetailUnclamped:
4640 {
4641 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4642 TIntermTyped* argSamp = argAggregate->getSequence()[1]->getAsTyped();
4643 TIntermTyped* argCoord = argAggregate->getSequence()[2]->getAsTyped();
4644
4645 TIntermAggregate* txquerylod = new TIntermAggregate(EOpTextureQueryLod);
4646
4647 TIntermAggregate* txcombine = handleSamplerTextureCombine(loc, argTex, argSamp);
4648 txquerylod->getSequence().push_back(txcombine);
4649 txquerylod->getSequence().push_back(argCoord);
4650
4651 TIntermTyped* lodComponent = intermediate.addConstantUnion(
4652 op == EOpMethodCalculateLevelOfDetail ? 0 : 1,
4653 loc, true);
4654 TIntermTyped* lodComponentIdx = intermediate.addIndex(EOpIndexDirect, txquerylod, lodComponent, loc);
4655 lodComponentIdx->setType(TType(EbtFloat, EvqTemporary, 1));
4656 node = lodComponentIdx;
4657
4658 break;
4659 }
4660
4661 case EOpMethodGetSamplePosition:
4662 {
4663 // TODO: this entire decomposition exists because there is not yet a way to query
4664 // the sample position directly through SPIR-V. Instead, we return fixed sample
4665 // positions for common cases. *** If the sample positions are set differently,
4666 // this will be wrong. ***
4667
4668 TIntermTyped* argTex = argAggregate->getSequence()[0]->getAsTyped();
4669 TIntermTyped* argSampIdx = argAggregate->getSequence()[1]->getAsTyped();
4670
4671 TIntermAggregate* samplesQuery = new TIntermAggregate(EOpImageQuerySamples);
4672 samplesQuery->getSequence().push_back(argTex);
4673 samplesQuery->setType(TType(EbtUint, EvqTemporary, 1));
4674 samplesQuery->setLoc(loc);
4675
4676 TIntermAggregate* compoundStatement = nullptr;
4677
4678 TVariable* outSampleCount = makeInternalVariable("@sampleCount", TType(EbtUint));
4679 outSampleCount->getWritableType().getQualifier().makeTemporary();
4680 TIntermTyped* compAssign = intermediate.addAssign(EOpAssign, intermediate.addSymbol(*outSampleCount, loc),
4681 samplesQuery, loc);
4682 compoundStatement = intermediate.growAggregate(compoundStatement, compAssign);
4683
4684 TIntermTyped* idxtest[4];
4685
4686 // Create tests against 2, 4, 8, and 16 sample values
4687 int count = 0;
4688 for (int val = 2; val <= 16; val *= 2)
4689 idxtest[count++] =
4690 intermediate.addBinaryNode(EOpEqual,
4691 intermediate.addSymbol(*outSampleCount, loc),
4692 intermediate.addConstantUnion(val, loc),
4693 loc, TType(EbtBool));
4694
4695 const TOperator idxOp = (argSampIdx->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
4696
4697 // Create index ops into position arrays given sample index.
4698 // TODO: should it be clamped?
4699 TIntermTyped* index[4];
4700 count = 0;
4701 for (int val = 2; val <= 16; val *= 2) {
4702 index[count] = intermediate.addIndex(idxOp, getSamplePosArray(val), argSampIdx, loc);
4703 index[count++]->setType(TType(EbtFloat, EvqTemporary, 2));
4704 }
4705
4706 // Create expression as:
4707 // (sampleCount == 2) ? pos2[idx] :
4708 // (sampleCount == 4) ? pos4[idx] :
4709 // (sampleCount == 8) ? pos8[idx] :
4710 // (sampleCount == 16) ? pos16[idx] : float2(0,0);
4711 TIntermTyped* test =
4712 intermediate.addSelection(idxtest[0], index[0],
4713 intermediate.addSelection(idxtest[1], index[1],
4714 intermediate.addSelection(idxtest[2], index[2],
4715 intermediate.addSelection(idxtest[3], index[3],
4716 getSamplePosArray(1), loc), loc), loc), loc);
4717
4718 compoundStatement = intermediate.growAggregate(compoundStatement, test);
4719 compoundStatement->setOperator(EOpSequence);
4720 compoundStatement->setLoc(loc);
4721 compoundStatement->setType(TType(EbtFloat, EvqTemporary, 2));
4722
4723 node = compoundStatement;
4724
4725 break;
4726 }
4727
4728 case EOpSubpassLoad:
4729 {
4730 const TIntermTyped* argSubpass =
4731 argAggregate ? argAggregate->getSequence()[0]->getAsTyped() :
4732 arguments->getAsTyped();
4733
4734 const TSampler& sampler = argSubpass->getType().getSampler();
4735
4736 // subpass load: the multisample form is overloaded. Here, we convert that to
4737 // the EOpSubpassLoadMS opcode.
4738 if (argAggregate != nullptr && argAggregate->getSequence().size() > 1)
4739 node->getAsOperator()->setOp(EOpSubpassLoadMS);
4740
4741 node = convertReturn(node, sampler);
4742
4743 break;
4744 }
4745
4746
4747 default:
4748 break; // most pass through unchanged
4749 }
4750 }
4751
4752 //
4753 // Decompose geometry shader methods
4754 //
decomposeGeometryMethods(const TSourceLoc & loc,TIntermTyped * & node,TIntermNode * arguments)4755 void HlslParseContext::decomposeGeometryMethods(const TSourceLoc& loc, TIntermTyped*& node, TIntermNode* arguments)
4756 {
4757 if (node == nullptr || !node->getAsOperator())
4758 return;
4759
4760 const TOperator op = node->getAsOperator()->getOp();
4761 const TIntermAggregate* argAggregate = arguments ? arguments->getAsAggregate() : nullptr;
4762
4763 switch (op) {
4764 case EOpMethodAppend:
4765 if (argAggregate) {
4766 // Don't emit these for non-GS stage, since we won't have the gsStreamOutput symbol.
4767 if (language != EShLangGeometry) {
4768 node = nullptr;
4769 return;
4770 }
4771
4772 TIntermAggregate* sequence = nullptr;
4773 TIntermAggregate* emit = new TIntermAggregate(EOpEmitVertex);
4774
4775 emit->setLoc(loc);
4776 emit->setType(TType(EbtVoid));
4777
4778 TIntermTyped* data = argAggregate->getSequence()[1]->getAsTyped();
4779
4780 // This will be patched in finalization during finalizeAppendMethods()
4781 sequence = intermediate.growAggregate(sequence, data, loc);
4782 sequence = intermediate.growAggregate(sequence, emit);
4783
4784 sequence->setOperator(EOpSequence);
4785 sequence->setLoc(loc);
4786 sequence->setType(TType(EbtVoid));
4787
4788 gsAppends.push_back({sequence, loc});
4789
4790 node = sequence;
4791 }
4792 break;
4793
4794 case EOpMethodRestartStrip:
4795 {
4796 // Don't emit these for non-GS stage, since we won't have the gsStreamOutput symbol.
4797 if (language != EShLangGeometry) {
4798 node = nullptr;
4799 return;
4800 }
4801
4802 TIntermAggregate* cut = new TIntermAggregate(EOpEndPrimitive);
4803 cut->setLoc(loc);
4804 cut->setType(TType(EbtVoid));
4805 node = cut;
4806 }
4807 break;
4808
4809 default:
4810 break; // most pass through unchanged
4811 }
4812 }
4813
4814 //
4815 // Optionally decompose intrinsics to AST opcodes.
4816 //
decomposeIntrinsic(const TSourceLoc & loc,TIntermTyped * & node,TIntermNode * arguments)4817 void HlslParseContext::decomposeIntrinsic(const TSourceLoc& loc, TIntermTyped*& node, TIntermNode* arguments)
4818 {
4819 // Helper to find image data for image atomics:
4820 // OpImageLoad(image[idx])
4821 // We take the image load apart and add its params to the atomic op aggregate node
4822 const auto imageAtomicParams = [this, &loc, &node](TIntermAggregate* atomic, TIntermTyped* load) {
4823 TIntermAggregate* loadOp = load->getAsAggregate();
4824 if (loadOp == nullptr) {
4825 error(loc, "unknown image type in atomic operation", "", "");
4826 node = nullptr;
4827 return;
4828 }
4829
4830 atomic->getSequence().push_back(loadOp->getSequence()[0]);
4831 atomic->getSequence().push_back(loadOp->getSequence()[1]);
4832 };
4833
4834 // Return true if this is an imageLoad, which we will change to an image atomic.
4835 const auto isImageParam = [](TIntermTyped* image) -> bool {
4836 TIntermAggregate* imageAggregate = image->getAsAggregate();
4837 return imageAggregate != nullptr && imageAggregate->getOp() == EOpImageLoad;
4838 };
4839
4840 const auto lookupBuiltinVariable = [&](const char* name, TBuiltInVariable builtin, TType& type) -> TIntermTyped* {
4841 TSymbol* symbol = symbolTable.find(name);
4842 if (nullptr == symbol) {
4843 type.getQualifier().builtIn = builtin;
4844
4845 TVariable* variable = new TVariable(NewPoolTString(name), type);
4846
4847 symbolTable.insert(*variable);
4848
4849 symbol = symbolTable.find(name);
4850 assert(symbol && "Inserted symbol could not be found!");
4851 }
4852
4853 return intermediate.addSymbol(*(symbol->getAsVariable()), loc);
4854 };
4855
4856 // HLSL intrinsics can be pass through to native AST opcodes, or decomposed here to existing AST
4857 // opcodes for compatibility with existing software stacks.
4858 static const bool decomposeHlslIntrinsics = true;
4859
4860 if (!decomposeHlslIntrinsics || !node || !node->getAsOperator())
4861 return;
4862
4863 const TIntermAggregate* argAggregate = arguments ? arguments->getAsAggregate() : nullptr;
4864 TIntermUnary* fnUnary = node->getAsUnaryNode();
4865 const TOperator op = node->getAsOperator()->getOp();
4866
4867 switch (op) {
4868 case EOpGenMul:
4869 {
4870 // mul(a,b) -> MatrixTimesMatrix, MatrixTimesVector, MatrixTimesScalar, VectorTimesScalar, Dot, Mul
4871 // Since we are treating HLSL rows like GLSL columns (the first matrix indirection),
4872 // we must reverse the operand order here. Hence, arg0 gets sequence[1], etc.
4873 TIntermTyped* arg0 = argAggregate->getSequence()[1]->getAsTyped();
4874 TIntermTyped* arg1 = argAggregate->getSequence()[0]->getAsTyped();
4875
4876 if (arg0->isVector() && arg1->isVector()) { // vec * vec
4877 node->getAsAggregate()->setOperator(EOpDot);
4878 } else {
4879 node = handleBinaryMath(loc, "mul", EOpMul, arg0, arg1);
4880 }
4881
4882 break;
4883 }
4884
4885 case EOpRcp:
4886 {
4887 // rcp(a) -> 1 / a
4888 TIntermTyped* arg0 = fnUnary->getOperand();
4889 TBasicType type0 = arg0->getBasicType();
4890 TIntermTyped* one = intermediate.addConstantUnion(1, type0, loc, true);
4891 node = handleBinaryMath(loc, "rcp", EOpDiv, one, arg0);
4892
4893 break;
4894 }
4895
4896 case EOpAny: // fall through
4897 case EOpAll:
4898 {
4899 TIntermTyped* typedArg = arguments->getAsTyped();
4900
4901 // HLSL allows float/etc types here, and the SPIR-V opcode requires a bool.
4902 // We'll convert here. Note that for efficiency, we could add a smarter
4903 // decomposition for some type cases, e.g, maybe by decomposing a dot product.
4904 if (typedArg->getType().getBasicType() != EbtBool) {
4905 const TType boolType(EbtBool, EvqTemporary,
4906 typedArg->getVectorSize(),
4907 typedArg->getMatrixCols(),
4908 typedArg->getMatrixRows(),
4909 typedArg->isVector());
4910
4911 typedArg = intermediate.addConversion(EOpConstructBool, boolType, typedArg);
4912 node->getAsUnaryNode()->setOperand(typedArg);
4913 }
4914
4915 break;
4916 }
4917
4918 case EOpSaturate:
4919 {
4920 // saturate(a) -> clamp(a,0,1)
4921 TIntermTyped* arg0 = fnUnary->getOperand();
4922 TBasicType type0 = arg0->getBasicType();
4923 TIntermAggregate* clamp = new TIntermAggregate(EOpClamp);
4924
4925 clamp->getSequence().push_back(arg0);
4926 clamp->getSequence().push_back(intermediate.addConstantUnion(0, type0, loc, true));
4927 clamp->getSequence().push_back(intermediate.addConstantUnion(1, type0, loc, true));
4928 clamp->setLoc(loc);
4929 clamp->setType(node->getType());
4930 clamp->getWritableType().getQualifier().makeTemporary();
4931 node = clamp;
4932
4933 break;
4934 }
4935
4936 case EOpSinCos:
4937 {
4938 // sincos(a,b,c) -> b = sin(a), c = cos(a)
4939 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped();
4940 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped();
4941 TIntermTyped* arg2 = argAggregate->getSequence()[2]->getAsTyped();
4942
4943 TIntermTyped* sinStatement = handleUnaryMath(loc, "sin", EOpSin, arg0);
4944 TIntermTyped* cosStatement = handleUnaryMath(loc, "cos", EOpCos, arg0);
4945 TIntermTyped* sinAssign = intermediate.addAssign(EOpAssign, arg1, sinStatement, loc);
4946 TIntermTyped* cosAssign = intermediate.addAssign(EOpAssign, arg2, cosStatement, loc);
4947
4948 TIntermAggregate* compoundStatement = intermediate.makeAggregate(sinAssign, loc);
4949 compoundStatement = intermediate.growAggregate(compoundStatement, cosAssign);
4950 compoundStatement->setOperator(EOpSequence);
4951 compoundStatement->setLoc(loc);
4952 compoundStatement->setType(TType(EbtVoid));
4953
4954 node = compoundStatement;
4955
4956 break;
4957 }
4958
4959 case EOpClip:
4960 {
4961 // clip(a) -> if (any(a<0)) discard;
4962 TIntermTyped* arg0 = fnUnary->getOperand();
4963 TBasicType type0 = arg0->getBasicType();
4964 TIntermTyped* compareNode = nullptr;
4965
4966 // For non-scalars: per experiment with FXC compiler, discard if any component < 0.
4967 if (!arg0->isScalar()) {
4968 // component-wise compare: a < 0
4969 TIntermAggregate* less = new TIntermAggregate(EOpLessThan);
4970 less->getSequence().push_back(arg0);
4971 less->setLoc(loc);
4972
4973 // make vec or mat of bool matching dimensions of input
4974 less->setType(TType(EbtBool, EvqTemporary,
4975 arg0->getType().getVectorSize(),
4976 arg0->getType().getMatrixCols(),
4977 arg0->getType().getMatrixRows(),
4978 arg0->getType().isVector()));
4979
4980 // calculate # of components for comparison const
4981 const int constComponentCount =
4982 std::max(arg0->getType().getVectorSize(), 1) *
4983 std::max(arg0->getType().getMatrixCols(), 1) *
4984 std::max(arg0->getType().getMatrixRows(), 1);
4985
4986 TConstUnion zero;
4987 if (arg0->getType().isIntegerDomain())
4988 zero.setDConst(0);
4989 else
4990 zero.setDConst(0.0);
4991 TConstUnionArray zeros(constComponentCount, zero);
4992
4993 less->getSequence().push_back(intermediate.addConstantUnion(zeros, arg0->getType(), loc, true));
4994
4995 compareNode = intermediate.addBuiltInFunctionCall(loc, EOpAny, true, less, TType(EbtBool));
4996 } else {
4997 TIntermTyped* zero;
4998 if (arg0->getType().isIntegerDomain())
4999 zero = intermediate.addConstantUnion(0, loc, true);
5000 else
5001 zero = intermediate.addConstantUnion(0.0, type0, loc, true);
5002 compareNode = handleBinaryMath(loc, "clip", EOpLessThan, arg0, zero);
5003 }
5004
5005 TIntermBranch* killNode = intermediate.addBranch(EOpKill, loc);
5006
5007 node = new TIntermSelection(compareNode, killNode, nullptr);
5008 node->setLoc(loc);
5009
5010 break;
5011 }
5012
5013 case EOpLog10:
5014 {
5015 // log10(a) -> log2(a) * 0.301029995663981 (== 1/log2(10))
5016 TIntermTyped* arg0 = fnUnary->getOperand();
5017 TIntermTyped* log2 = handleUnaryMath(loc, "log2", EOpLog2, arg0);
5018 TIntermTyped* base = intermediate.addConstantUnion(0.301029995663981f, EbtFloat, loc, true);
5019
5020 node = handleBinaryMath(loc, "mul", EOpMul, log2, base);
5021
5022 break;
5023 }
5024
5025 case EOpDst:
5026 {
5027 // dest.x = 1;
5028 // dest.y = src0.y * src1.y;
5029 // dest.z = src0.z;
5030 // dest.w = src1.w;
5031
5032 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped();
5033 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped();
5034
5035 TIntermTyped* y = intermediate.addConstantUnion(1, loc, true);
5036 TIntermTyped* z = intermediate.addConstantUnion(2, loc, true);
5037 TIntermTyped* w = intermediate.addConstantUnion(3, loc, true);
5038
5039 TIntermTyped* src0y = intermediate.addIndex(EOpIndexDirect, arg0, y, loc);
5040 TIntermTyped* src1y = intermediate.addIndex(EOpIndexDirect, arg1, y, loc);
5041 TIntermTyped* src0z = intermediate.addIndex(EOpIndexDirect, arg0, z, loc);
5042 TIntermTyped* src1w = intermediate.addIndex(EOpIndexDirect, arg1, w, loc);
5043
5044 TIntermAggregate* dst = new TIntermAggregate(EOpConstructVec4);
5045
5046 dst->getSequence().push_back(intermediate.addConstantUnion(1.0, EbtFloat, loc, true));
5047 dst->getSequence().push_back(handleBinaryMath(loc, "mul", EOpMul, src0y, src1y));
5048 dst->getSequence().push_back(src0z);
5049 dst->getSequence().push_back(src1w);
5050 dst->setType(TType(EbtFloat, EvqTemporary, 4));
5051 dst->setLoc(loc);
5052 node = dst;
5053
5054 break;
5055 }
5056
5057 case EOpInterlockedAdd: // optional last argument (if present) is assigned from return value
5058 case EOpInterlockedMin: // ...
5059 case EOpInterlockedMax: // ...
5060 case EOpInterlockedAnd: // ...
5061 case EOpInterlockedOr: // ...
5062 case EOpInterlockedXor: // ...
5063 case EOpInterlockedExchange: // always has output arg
5064 {
5065 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped(); // dest
5066 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped(); // value
5067 TIntermTyped* arg2 = nullptr;
5068
5069 if (argAggregate->getSequence().size() > 2)
5070 arg2 = argAggregate->getSequence()[2]->getAsTyped();
5071
5072 const bool isImage = isImageParam(arg0);
5073 const TOperator atomicOp = mapAtomicOp(loc, op, isImage);
5074 TIntermAggregate* atomic = new TIntermAggregate(atomicOp);
5075 atomic->setType(arg0->getType());
5076 atomic->getWritableType().getQualifier().makeTemporary();
5077 atomic->setLoc(loc);
5078
5079 if (isImage) {
5080 // orig_value = imageAtomicOp(image, loc, data)
5081 imageAtomicParams(atomic, arg0);
5082 atomic->getSequence().push_back(arg1);
5083
5084 if (argAggregate->getSequence().size() > 2) {
5085 node = intermediate.addAssign(EOpAssign, arg2, atomic, loc);
5086 } else {
5087 node = atomic; // no assignment needed, as there was no out var.
5088 }
5089 } else {
5090 // Normal memory variable:
5091 // arg0 = mem, arg1 = data, arg2(optional,out) = orig_value
5092 if (argAggregate->getSequence().size() > 2) {
5093 // optional output param is present. return value goes to arg2.
5094 atomic->getSequence().push_back(arg0);
5095 atomic->getSequence().push_back(arg1);
5096
5097 node = intermediate.addAssign(EOpAssign, arg2, atomic, loc);
5098 } else {
5099 // Set the matching operator. Since output is absent, this is all we need to do.
5100 node->getAsAggregate()->setOperator(atomicOp);
5101 node->setType(atomic->getType());
5102 }
5103 }
5104
5105 break;
5106 }
5107
5108 case EOpInterlockedCompareExchange:
5109 {
5110 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped(); // dest
5111 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped(); // cmp
5112 TIntermTyped* arg2 = argAggregate->getSequence()[2]->getAsTyped(); // value
5113 TIntermTyped* arg3 = argAggregate->getSequence()[3]->getAsTyped(); // orig
5114
5115 const bool isImage = isImageParam(arg0);
5116 TIntermAggregate* atomic = new TIntermAggregate(mapAtomicOp(loc, op, isImage));
5117 atomic->setLoc(loc);
5118 atomic->setType(arg2->getType());
5119 atomic->getWritableType().getQualifier().makeTemporary();
5120
5121 if (isImage) {
5122 imageAtomicParams(atomic, arg0);
5123 } else {
5124 atomic->getSequence().push_back(arg0);
5125 }
5126
5127 atomic->getSequence().push_back(arg1);
5128 atomic->getSequence().push_back(arg2);
5129 node = intermediate.addAssign(EOpAssign, arg3, atomic, loc);
5130
5131 break;
5132 }
5133
5134 case EOpEvaluateAttributeSnapped:
5135 {
5136 // SPIR-V InterpolateAtOffset uses float vec2 offset in pixels
5137 // HLSL uses int2 offset on a 16x16 grid in [-8..7] on x & y:
5138 // iU = (iU<<28)>>28
5139 // fU = ((float)iU)/16
5140 // Targets might handle this natively, in which case they can disable
5141 // decompositions.
5142
5143 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped(); // value
5144 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped(); // offset
5145
5146 TIntermTyped* i28 = intermediate.addConstantUnion(28, loc, true);
5147 TIntermTyped* iU = handleBinaryMath(loc, ">>", EOpRightShift,
5148 handleBinaryMath(loc, "<<", EOpLeftShift, arg1, i28),
5149 i28);
5150
5151 TIntermTyped* recip16 = intermediate.addConstantUnion((1.0/16.0), EbtFloat, loc, true);
5152 TIntermTyped* floatOffset = handleBinaryMath(loc, "mul", EOpMul,
5153 intermediate.addConversion(EOpConstructFloat,
5154 TType(EbtFloat, EvqTemporary, 2), iU),
5155 recip16);
5156
5157 TIntermAggregate* interp = new TIntermAggregate(EOpInterpolateAtOffset);
5158 interp->getSequence().push_back(arg0);
5159 interp->getSequence().push_back(floatOffset);
5160 interp->setLoc(loc);
5161 interp->setType(arg0->getType());
5162 interp->getWritableType().getQualifier().makeTemporary();
5163
5164 node = interp;
5165
5166 break;
5167 }
5168
5169 case EOpLit:
5170 {
5171 TIntermTyped* n_dot_l = argAggregate->getSequence()[0]->getAsTyped();
5172 TIntermTyped* n_dot_h = argAggregate->getSequence()[1]->getAsTyped();
5173 TIntermTyped* m = argAggregate->getSequence()[2]->getAsTyped();
5174
5175 TIntermAggregate* dst = new TIntermAggregate(EOpConstructVec4);
5176
5177 // Ambient
5178 dst->getSequence().push_back(intermediate.addConstantUnion(1.0, EbtFloat, loc, true));
5179
5180 // Diffuse:
5181 TIntermTyped* zero = intermediate.addConstantUnion(0.0, EbtFloat, loc, true);
5182 TIntermAggregate* diffuse = new TIntermAggregate(EOpMax);
5183 diffuse->getSequence().push_back(n_dot_l);
5184 diffuse->getSequence().push_back(zero);
5185 diffuse->setLoc(loc);
5186 diffuse->setType(TType(EbtFloat));
5187 dst->getSequence().push_back(diffuse);
5188
5189 // Specular:
5190 TIntermAggregate* min_ndot = new TIntermAggregate(EOpMin);
5191 min_ndot->getSequence().push_back(n_dot_l);
5192 min_ndot->getSequence().push_back(n_dot_h);
5193 min_ndot->setLoc(loc);
5194 min_ndot->setType(TType(EbtFloat));
5195
5196 TIntermTyped* compare = handleBinaryMath(loc, "<", EOpLessThan, min_ndot, zero);
5197 TIntermTyped* n_dot_h_m = handleBinaryMath(loc, "mul", EOpMul, n_dot_h, m); // n_dot_h * m
5198
5199 dst->getSequence().push_back(intermediate.addSelection(compare, zero, n_dot_h_m, loc));
5200
5201 // One:
5202 dst->getSequence().push_back(intermediate.addConstantUnion(1.0, EbtFloat, loc, true));
5203
5204 dst->setLoc(loc);
5205 dst->setType(TType(EbtFloat, EvqTemporary, 4));
5206 node = dst;
5207 break;
5208 }
5209
5210 case EOpAsDouble:
5211 {
5212 // asdouble accepts two 32 bit ints. we can use EOpUint64BitsToDouble, but must
5213 // first construct a uint64.
5214 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped();
5215 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped();
5216
5217 if (arg0->getType().isVector()) { // TODO: ...
5218 error(loc, "double2 conversion not implemented", "asdouble", "");
5219 break;
5220 }
5221
5222 TIntermAggregate* uint64 = new TIntermAggregate(EOpConstructUVec2);
5223
5224 uint64->getSequence().push_back(arg0);
5225 uint64->getSequence().push_back(arg1);
5226 uint64->setType(TType(EbtUint, EvqTemporary, 2)); // convert 2 uints to a uint2
5227 uint64->setLoc(loc);
5228
5229 // bitcast uint2 to a double
5230 TIntermTyped* convert = new TIntermUnary(EOpUint64BitsToDouble);
5231 convert->getAsUnaryNode()->setOperand(uint64);
5232 convert->setLoc(loc);
5233 convert->setType(TType(EbtDouble, EvqTemporary));
5234 node = convert;
5235
5236 break;
5237 }
5238
5239 case EOpF16tof32:
5240 {
5241 // input uvecN with low 16 bits of each component holding a float16. convert to float32.
5242 TIntermTyped* argValue = node->getAsUnaryNode()->getOperand();
5243 TIntermTyped* zero = intermediate.addConstantUnion(0, loc, true);
5244 const int vecSize = argValue->getType().getVectorSize();
5245
5246 TOperator constructOp = EOpNull;
5247 switch (vecSize) {
5248 case 1: constructOp = EOpNull; break; // direct use, no construct needed
5249 case 2: constructOp = EOpConstructVec2; break;
5250 case 3: constructOp = EOpConstructVec3; break;
5251 case 4: constructOp = EOpConstructVec4; break;
5252 default: assert(0); break;
5253 }
5254
5255 // For scalar case, we don't need to construct another type.
5256 TIntermAggregate* result = (vecSize > 1) ? new TIntermAggregate(constructOp) : nullptr;
5257
5258 if (result) {
5259 result->setType(TType(EbtFloat, EvqTemporary, vecSize));
5260 result->setLoc(loc);
5261 }
5262
5263 for (int idx = 0; idx < vecSize; ++idx) {
5264 TIntermTyped* idxConst = intermediate.addConstantUnion(idx, loc, true);
5265 TIntermTyped* component = argValue->getType().isVector() ?
5266 intermediate.addIndex(EOpIndexDirect, argValue, idxConst, loc) : argValue;
5267
5268 if (component != argValue)
5269 component->setType(TType(argValue->getBasicType(), EvqTemporary));
5270
5271 TIntermTyped* unpackOp = new TIntermUnary(EOpUnpackHalf2x16);
5272 unpackOp->setType(TType(EbtFloat, EvqTemporary, 2));
5273 unpackOp->getAsUnaryNode()->setOperand(component);
5274 unpackOp->setLoc(loc);
5275
5276 TIntermTyped* lowOrder = intermediate.addIndex(EOpIndexDirect, unpackOp, zero, loc);
5277
5278 if (result != nullptr) {
5279 result->getSequence().push_back(lowOrder);
5280 node = result;
5281 } else {
5282 node = lowOrder;
5283 }
5284 }
5285
5286 break;
5287 }
5288
5289 case EOpF32tof16:
5290 {
5291 // input floatN converted to 16 bit float in low order bits of each component of uintN
5292 TIntermTyped* argValue = node->getAsUnaryNode()->getOperand();
5293
5294 TIntermTyped* zero = intermediate.addConstantUnion(0.0, EbtFloat, loc, true);
5295 const int vecSize = argValue->getType().getVectorSize();
5296
5297 TOperator constructOp = EOpNull;
5298 switch (vecSize) {
5299 case 1: constructOp = EOpNull; break; // direct use, no construct needed
5300 case 2: constructOp = EOpConstructUVec2; break;
5301 case 3: constructOp = EOpConstructUVec3; break;
5302 case 4: constructOp = EOpConstructUVec4; break;
5303 default: assert(0); break;
5304 }
5305
5306 // For scalar case, we don't need to construct another type.
5307 TIntermAggregate* result = (vecSize > 1) ? new TIntermAggregate(constructOp) : nullptr;
5308
5309 if (result) {
5310 result->setType(TType(EbtUint, EvqTemporary, vecSize));
5311 result->setLoc(loc);
5312 }
5313
5314 for (int idx = 0; idx < vecSize; ++idx) {
5315 TIntermTyped* idxConst = intermediate.addConstantUnion(idx, loc, true);
5316 TIntermTyped* component = argValue->getType().isVector() ?
5317 intermediate.addIndex(EOpIndexDirect, argValue, idxConst, loc) : argValue;
5318
5319 if (component != argValue)
5320 component->setType(TType(argValue->getBasicType(), EvqTemporary));
5321
5322 TIntermAggregate* vec2ComponentAndZero = new TIntermAggregate(EOpConstructVec2);
5323 vec2ComponentAndZero->getSequence().push_back(component);
5324 vec2ComponentAndZero->getSequence().push_back(zero);
5325 vec2ComponentAndZero->setType(TType(EbtFloat, EvqTemporary, 2));
5326 vec2ComponentAndZero->setLoc(loc);
5327
5328 TIntermTyped* packOp = new TIntermUnary(EOpPackHalf2x16);
5329 packOp->getAsUnaryNode()->setOperand(vec2ComponentAndZero);
5330 packOp->setLoc(loc);
5331 packOp->setType(TType(EbtUint, EvqTemporary));
5332
5333 if (result != nullptr) {
5334 result->getSequence().push_back(packOp);
5335 node = result;
5336 } else {
5337 node = packOp;
5338 }
5339 }
5340
5341 break;
5342 }
5343
5344 case EOpD3DCOLORtoUBYTE4:
5345 {
5346 // ivec4 ( x.zyxw * 255.001953 );
5347 TIntermTyped* arg0 = node->getAsUnaryNode()->getOperand();
5348 TSwizzleSelectors<TVectorSelector> selectors;
5349 selectors.push_back(2);
5350 selectors.push_back(1);
5351 selectors.push_back(0);
5352 selectors.push_back(3);
5353 TIntermTyped* swizzleIdx = intermediate.addSwizzle(selectors, loc);
5354 TIntermTyped* swizzled = intermediate.addIndex(EOpVectorSwizzle, arg0, swizzleIdx, loc);
5355 swizzled->setType(arg0->getType());
5356 swizzled->getWritableType().getQualifier().makeTemporary();
5357
5358 TIntermTyped* conversion = intermediate.addConstantUnion(255.001953f, EbtFloat, loc, true);
5359 TIntermTyped* rangeConverted = handleBinaryMath(loc, "mul", EOpMul, conversion, swizzled);
5360 rangeConverted->setType(arg0->getType());
5361 rangeConverted->getWritableType().getQualifier().makeTemporary();
5362
5363 node = intermediate.addConversion(EOpConstructInt, TType(EbtInt, EvqTemporary, 4), rangeConverted);
5364 node->setLoc(loc);
5365 node->setType(TType(EbtInt, EvqTemporary, 4));
5366 break;
5367 }
5368
5369 case EOpIsFinite:
5370 {
5371 // Since OPIsFinite in SPIR-V is only supported with the Kernel capability, we translate
5372 // it to !isnan && !isinf
5373
5374 TIntermTyped* arg0 = node->getAsUnaryNode()->getOperand();
5375
5376 // We'll make a temporary in case the RHS is cmoplex
5377 TVariable* tempArg = makeInternalVariable("@finitetmp", arg0->getType());
5378 tempArg->getWritableType().getQualifier().makeTemporary();
5379
5380 TIntermTyped* tmpArgAssign = intermediate.addAssign(EOpAssign,
5381 intermediate.addSymbol(*tempArg, loc),
5382 arg0, loc);
5383
5384 TIntermAggregate* compoundStatement = intermediate.makeAggregate(tmpArgAssign, loc);
5385
5386 const TType boolType(EbtBool, EvqTemporary, arg0->getVectorSize(), arg0->getMatrixCols(),
5387 arg0->getMatrixRows());
5388
5389 TIntermTyped* isnan = handleUnaryMath(loc, "isnan", EOpIsNan, intermediate.addSymbol(*tempArg, loc));
5390 isnan->setType(boolType);
5391
5392 TIntermTyped* notnan = handleUnaryMath(loc, "!", EOpLogicalNot, isnan);
5393 notnan->setType(boolType);
5394
5395 TIntermTyped* isinf = handleUnaryMath(loc, "isinf", EOpIsInf, intermediate.addSymbol(*tempArg, loc));
5396 isinf->setType(boolType);
5397
5398 TIntermTyped* notinf = handleUnaryMath(loc, "!", EOpLogicalNot, isinf);
5399 notinf->setType(boolType);
5400
5401 TIntermTyped* andNode = handleBinaryMath(loc, "and", EOpLogicalAnd, notnan, notinf);
5402 andNode->setType(boolType);
5403
5404 compoundStatement = intermediate.growAggregate(compoundStatement, andNode);
5405 compoundStatement->setOperator(EOpSequence);
5406 compoundStatement->setLoc(loc);
5407 compoundStatement->setType(boolType);
5408
5409 node = compoundStatement;
5410
5411 break;
5412 }
5413 case EOpWaveGetLaneCount:
5414 {
5415 // Mapped to gl_SubgroupSize builtin (We preprend @ to the symbol
5416 // so that it inhabits the symbol table, but has a user-invalid name
5417 // in-case some source HLSL defined the symbol also).
5418 TType type(EbtUint, EvqVaryingIn);
5419 node = lookupBuiltinVariable("@gl_SubgroupSize", EbvSubgroupSize2, type);
5420 break;
5421 }
5422 case EOpWaveGetLaneIndex:
5423 {
5424 // Mapped to gl_SubgroupInvocationID builtin (We preprend @ to the
5425 // symbol so that it inhabits the symbol table, but has a
5426 // user-invalid name in-case some source HLSL defined the symbol
5427 // also).
5428 TType type(EbtUint, EvqVaryingIn);
5429 node = lookupBuiltinVariable("@gl_SubgroupInvocationID", EbvSubgroupInvocation2, type);
5430 break;
5431 }
5432 case EOpWaveActiveCountBits:
5433 {
5434 // Mapped to subgroupBallotBitCount(subgroupBallot()) builtin
5435
5436 // uvec4 type.
5437 TType uvec4Type(EbtUint, EvqTemporary, 4);
5438
5439 // Get the uvec4 return from subgroupBallot().
5440 TIntermTyped* res = intermediate.addBuiltInFunctionCall(loc,
5441 EOpSubgroupBallot, true, arguments, uvec4Type);
5442
5443 // uint type.
5444 TType uintType(EbtUint, EvqTemporary);
5445
5446 node = intermediate.addBuiltInFunctionCall(loc,
5447 EOpSubgroupBallotBitCount, true, res, uintType);
5448
5449 break;
5450 }
5451 case EOpWavePrefixCountBits:
5452 {
5453 // Mapped to subgroupBallotExclusiveBitCount(subgroupBallot())
5454 // builtin
5455
5456 // uvec4 type.
5457 TType uvec4Type(EbtUint, EvqTemporary, 4);
5458
5459 // Get the uvec4 return from subgroupBallot().
5460 TIntermTyped* res = intermediate.addBuiltInFunctionCall(loc,
5461 EOpSubgroupBallot, true, arguments, uvec4Type);
5462
5463 // uint type.
5464 TType uintType(EbtUint, EvqTemporary);
5465
5466 node = intermediate.addBuiltInFunctionCall(loc,
5467 EOpSubgroupBallotExclusiveBitCount, true, res, uintType);
5468
5469 break;
5470 }
5471
5472 default:
5473 break; // most pass through unchanged
5474 }
5475 }
5476
5477 //
5478 // Handle seeing function call syntax in the grammar, which could be any of
5479 // - .length() method
5480 // - constructor
5481 // - a call to a built-in function mapped to an operator
5482 // - a call to a built-in function that will remain a function call (e.g., texturing)
5483 // - user function
5484 // - subroutine call (not implemented yet)
5485 //
handleFunctionCall(const TSourceLoc & loc,TFunction * function,TIntermTyped * arguments)5486 TIntermTyped* HlslParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermTyped* arguments)
5487 {
5488 TIntermTyped* result = nullptr;
5489
5490 TOperator op = function->getBuiltInOp();
5491 if (op != EOpNull) {
5492 //
5493 // Then this should be a constructor.
5494 // Don't go through the symbol table for constructors.
5495 // Their parameters will be verified algorithmically.
5496 //
5497 TType type(EbtVoid); // use this to get the type back
5498 if (! constructorError(loc, arguments, *function, op, type)) {
5499 //
5500 // It's a constructor, of type 'type'.
5501 //
5502 result = handleConstructor(loc, arguments, type);
5503 if (result == nullptr) {
5504 error(loc, "cannot construct with these arguments", type.getCompleteString().c_str(), "");
5505 return nullptr;
5506 }
5507 }
5508 } else {
5509 //
5510 // Find it in the symbol table.
5511 //
5512 const TFunction* fnCandidate = nullptr;
5513 bool builtIn = false;
5514 int thisDepth = 0;
5515
5516 // For mat mul, the situation is unusual: we have to compare vector sizes to mat row or col sizes,
5517 // and clamp the opposite arg. Since that's complex, we farm it off to a separate method.
5518 // It doesn't naturally fall out of processing an argument at a time in isolation.
5519 if (function->getName() == "mul")
5520 addGenMulArgumentConversion(loc, *function, arguments);
5521
5522 TIntermAggregate* aggregate = arguments ? arguments->getAsAggregate() : nullptr;
5523
5524 // TODO: this needs improvement: there's no way at present to look up a signature in
5525 // the symbol table for an arbitrary type. This is a temporary hack until that ability exists.
5526 // It will have false positives, since it doesn't check arg counts or types.
5527 if (arguments) {
5528 // Check if first argument is struct buffer type. It may be an aggregate or a symbol, so we
5529 // look for either case.
5530
5531 TIntermTyped* arg0 = nullptr;
5532
5533 if (aggregate && aggregate->getSequence().size() > 0 && aggregate->getSequence()[0])
5534 arg0 = aggregate->getSequence()[0]->getAsTyped();
5535 else if (arguments->getAsSymbolNode())
5536 arg0 = arguments->getAsSymbolNode();
5537
5538 if (arg0 != nullptr && isStructBufferType(arg0->getType())) {
5539 static const int methodPrefixSize = sizeof(BUILTIN_PREFIX)-1;
5540
5541 if (function->getName().length() > methodPrefixSize &&
5542 isStructBufferMethod(function->getName().substr(methodPrefixSize))) {
5543 const TString mangle = function->getName() + "(";
5544 TSymbol* symbol = symbolTable.find(mangle, &builtIn);
5545
5546 if (symbol)
5547 fnCandidate = symbol->getAsFunction();
5548 }
5549 }
5550 }
5551
5552 if (fnCandidate == nullptr)
5553 fnCandidate = findFunction(loc, *function, builtIn, thisDepth, arguments);
5554
5555 if (fnCandidate) {
5556 // This is a declared function that might map to
5557 // - a built-in operator,
5558 // - a built-in function not mapped to an operator, or
5559 // - a user function.
5560
5561 // turn an implicit member-function resolution into an explicit call
5562 TString callerName;
5563 if (thisDepth == 0)
5564 callerName = fnCandidate->getMangledName();
5565 else {
5566 // get the explicit (full) name of the function
5567 callerName = currentTypePrefix[currentTypePrefix.size() - thisDepth];
5568 callerName += fnCandidate->getMangledName();
5569 // insert the implicit calling argument
5570 pushFrontArguments(intermediate.addSymbol(*getImplicitThis(thisDepth)), arguments);
5571 }
5572
5573 // Convert 'in' arguments, so that types match.
5574 // However, skip those that need expansion, that is covered next.
5575 if (arguments)
5576 addInputArgumentConversions(*fnCandidate, arguments);
5577
5578 // Expand arguments. Some arguments must physically expand to a different set
5579 // than what the shader declared and passes.
5580 if (arguments && !builtIn)
5581 expandArguments(loc, *fnCandidate, arguments);
5582
5583 // Expansion may have changed the form of arguments
5584 aggregate = arguments ? arguments->getAsAggregate() : nullptr;
5585
5586 op = fnCandidate->getBuiltInOp();
5587 if (builtIn && op != EOpNull) {
5588 // SM 4.0 and above guarantees roundEven semantics for round()
5589 if (!hlslDX9Compatible() && op == EOpRound)
5590 op = EOpRoundEven;
5591
5592 // A function call mapped to a built-in operation.
5593 result = intermediate.addBuiltInFunctionCall(loc, op, fnCandidate->getParamCount() == 1, arguments,
5594 fnCandidate->getType());
5595 if (result == nullptr) {
5596 error(arguments->getLoc(), " wrong operand type", "Internal Error",
5597 "built in unary operator function. Type: %s",
5598 static_cast<TIntermTyped*>(arguments)->getCompleteString().c_str());
5599 } else if (result->getAsOperator()) {
5600 builtInOpCheck(loc, *fnCandidate, *result->getAsOperator());
5601 }
5602 } else {
5603 // This is a function call not mapped to built-in operator.
5604 // It could still be a built-in function, but only if PureOperatorBuiltins == false.
5605 result = intermediate.setAggregateOperator(arguments, EOpFunctionCall, fnCandidate->getType(), loc);
5606 TIntermAggregate* call = result->getAsAggregate();
5607 call->setName(callerName);
5608
5609 // this is how we know whether the given function is a built-in function or a user-defined function
5610 // if builtIn == false, it's a userDefined -> could be an overloaded built-in function also
5611 // if builtIn == true, it's definitely a built-in function with EOpNull
5612 if (! builtIn) {
5613 call->setUserDefined();
5614 intermediate.addToCallGraph(infoSink, currentCaller, callerName);
5615 }
5616 }
5617
5618 // for decompositions, since we want to operate on the function node, not the aggregate holding
5619 // output conversions.
5620 const TIntermTyped* fnNode = result;
5621
5622 decomposeStructBufferMethods(loc, result, arguments); // HLSL->AST struct buffer method decompositions
5623 decomposeIntrinsic(loc, result, arguments); // HLSL->AST intrinsic decompositions
5624 decomposeSampleMethods(loc, result, arguments); // HLSL->AST sample method decompositions
5625 decomposeGeometryMethods(loc, result, arguments); // HLSL->AST geometry method decompositions
5626
5627 // Create the qualifier list, carried in the AST for the call.
5628 // Because some arguments expand to multiple arguments, the qualifier list will
5629 // be longer than the formal parameter list.
5630 if (result == fnNode && result->getAsAggregate()) {
5631 TQualifierList& qualifierList = result->getAsAggregate()->getQualifierList();
5632 for (int i = 0; i < fnCandidate->getParamCount(); ++i) {
5633 TStorageQualifier qual = (*fnCandidate)[i].type->getQualifier().storage;
5634 if (hasStructBuffCounter(*(*fnCandidate)[i].type)) {
5635 // add buffer and counter buffer argument qualifier
5636 qualifierList.push_back(qual);
5637 qualifierList.push_back(qual);
5638 } else if (shouldFlatten(*(*fnCandidate)[i].type, (*fnCandidate)[i].type->getQualifier().storage,
5639 true)) {
5640 // add structure member expansion
5641 for (int memb = 0; memb < (int)(*fnCandidate)[i].type->getStruct()->size(); ++memb)
5642 qualifierList.push_back(qual);
5643 } else {
5644 // Normal 1:1 case
5645 qualifierList.push_back(qual);
5646 }
5647 }
5648 }
5649
5650 // Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore.
5651 // Built-ins with a single argument aren't called with an aggregate, but they also don't have an output.
5652 // Also, build the qualifier list for user function calls, which are always called with an aggregate.
5653 // We don't do this is if there has been a decomposition, which will have added its own conversions
5654 // for output parameters.
5655 if (result == fnNode && result->getAsAggregate())
5656 result = addOutputArgumentConversions(*fnCandidate, *result->getAsOperator());
5657 }
5658 }
5659
5660 // generic error recovery
5661 // TODO: simplification: localize all the error recoveries that look like this, and taking type into account to
5662 // reduce cascades
5663 if (result == nullptr)
5664 result = intermediate.addConstantUnion(0.0, EbtFloat, loc);
5665
5666 return result;
5667 }
5668
5669 // An initial argument list is difficult: it can be null, or a single node,
5670 // or an aggregate if more than one argument. Add one to the front, maintaining
5671 // this lack of uniformity.
pushFrontArguments(TIntermTyped * front,TIntermTyped * & arguments)5672 void HlslParseContext::pushFrontArguments(TIntermTyped* front, TIntermTyped*& arguments)
5673 {
5674 if (arguments == nullptr)
5675 arguments = front;
5676 else if (arguments->getAsAggregate() != nullptr)
5677 arguments->getAsAggregate()->getSequence().insert(arguments->getAsAggregate()->getSequence().begin(), front);
5678 else
5679 arguments = intermediate.growAggregate(front, arguments);
5680 }
5681
5682 //
5683 // HLSL allows mismatched dimensions on vec*mat, mat*vec, vec*vec, and mat*mat. This is a
5684 // situation not well suited to resolution in intrinsic selection, but we can do so here, since we
5685 // can look at both arguments insert explicit shape changes if required.
5686 //
addGenMulArgumentConversion(const TSourceLoc & loc,TFunction & call,TIntermTyped * & args)5687 void HlslParseContext::addGenMulArgumentConversion(const TSourceLoc& loc, TFunction& call, TIntermTyped*& args)
5688 {
5689 TIntermAggregate* argAggregate = args ? args->getAsAggregate() : nullptr;
5690
5691 if (argAggregate == nullptr || argAggregate->getSequence().size() != 2) {
5692 // It really ought to have two arguments.
5693 error(loc, "expected: mul arguments", "", "");
5694 return;
5695 }
5696
5697 TIntermTyped* arg0 = argAggregate->getSequence()[0]->getAsTyped();
5698 TIntermTyped* arg1 = argAggregate->getSequence()[1]->getAsTyped();
5699
5700 if (arg0->isVector() && arg1->isVector()) {
5701 // For:
5702 // vec * vec: it's handled during intrinsic selection, so while we could do it here,
5703 // we can also ignore it, which is easier.
5704 } else if (arg0->isVector() && arg1->isMatrix()) {
5705 // vec * mat: we clamp the vec if the mat col is smaller, else clamp the mat col.
5706 if (arg0->getVectorSize() < arg1->getMatrixCols()) {
5707 // vec is smaller, so truncate larger mat dimension
5708 const TType truncType(arg1->getBasicType(), arg1->getQualifier().storage, arg1->getQualifier().precision,
5709 0, arg0->getVectorSize(), arg1->getMatrixRows());
5710 arg1 = addConstructor(loc, arg1, truncType);
5711 } else if (arg0->getVectorSize() > arg1->getMatrixCols()) {
5712 // vec is larger, so truncate vec to mat size
5713 const TType truncType(arg0->getBasicType(), arg0->getQualifier().storage, arg0->getQualifier().precision,
5714 arg1->getMatrixCols());
5715 arg0 = addConstructor(loc, arg0, truncType);
5716 }
5717 } else if (arg0->isMatrix() && arg1->isVector()) {
5718 // mat * vec: we clamp the vec if the mat col is smaller, else clamp the mat col.
5719 if (arg1->getVectorSize() < arg0->getMatrixRows()) {
5720 // vec is smaller, so truncate larger mat dimension
5721 const TType truncType(arg0->getBasicType(), arg0->getQualifier().storage, arg0->getQualifier().precision,
5722 0, arg0->getMatrixCols(), arg1->getVectorSize());
5723 arg0 = addConstructor(loc, arg0, truncType);
5724 } else if (arg1->getVectorSize() > arg0->getMatrixRows()) {
5725 // vec is larger, so truncate vec to mat size
5726 const TType truncType(arg1->getBasicType(), arg1->getQualifier().storage, arg1->getQualifier().precision,
5727 arg0->getMatrixRows());
5728 arg1 = addConstructor(loc, arg1, truncType);
5729 }
5730 } else if (arg0->isMatrix() && arg1->isMatrix()) {
5731 // mat * mat: we clamp the smaller inner dimension to match the other matrix size.
5732 // Remember, HLSL Mrc = GLSL/SPIRV Mcr.
5733 if (arg0->getMatrixRows() > arg1->getMatrixCols()) {
5734 const TType truncType(arg0->getBasicType(), arg0->getQualifier().storage, arg0->getQualifier().precision,
5735 0, arg0->getMatrixCols(), arg1->getMatrixCols());
5736 arg0 = addConstructor(loc, arg0, truncType);
5737 } else if (arg0->getMatrixRows() < arg1->getMatrixCols()) {
5738 const TType truncType(arg1->getBasicType(), arg1->getQualifier().storage, arg1->getQualifier().precision,
5739 0, arg0->getMatrixRows(), arg1->getMatrixRows());
5740 arg1 = addConstructor(loc, arg1, truncType);
5741 }
5742 } else {
5743 // It's something with scalars: we'll just leave it alone. Function selection will handle it
5744 // downstream.
5745 }
5746
5747 // Warn if we altered one of the arguments
5748 if (arg0 != argAggregate->getSequence()[0] || arg1 != argAggregate->getSequence()[1])
5749 warn(loc, "mul() matrix size mismatch", "", "");
5750
5751 // Put arguments back. (They might be unchanged, in which case this is harmless).
5752 argAggregate->getSequence()[0] = arg0;
5753 argAggregate->getSequence()[1] = arg1;
5754
5755 call[0].type = &arg0->getWritableType();
5756 call[1].type = &arg1->getWritableType();
5757 }
5758
5759 //
5760 // Add any needed implicit conversions for function-call arguments to input parameters.
5761 //
addInputArgumentConversions(const TFunction & function,TIntermTyped * & arguments)5762 void HlslParseContext::addInputArgumentConversions(const TFunction& function, TIntermTyped*& arguments)
5763 {
5764 TIntermAggregate* aggregate = arguments->getAsAggregate();
5765
5766 // Replace a single argument with a single argument.
5767 const auto setArg = [&](int paramNum, TIntermTyped* arg) {
5768 if (function.getParamCount() == 1)
5769 arguments = arg;
5770 else {
5771 if (aggregate == nullptr)
5772 arguments = arg;
5773 else
5774 aggregate->getSequence()[paramNum] = arg;
5775 }
5776 };
5777
5778 // Process each argument's conversion
5779 for (int param = 0; param < function.getParamCount(); ++param) {
5780 if (! function[param].type->getQualifier().isParamInput())
5781 continue;
5782
5783 // At this early point there is a slight ambiguity between whether an aggregate 'arguments'
5784 // is the single argument itself or its children are the arguments. Only one argument
5785 // means take 'arguments' itself as the one argument.
5786 TIntermTyped* arg = function.getParamCount() == 1
5787 ? arguments->getAsTyped()
5788 : (aggregate ?
5789 aggregate->getSequence()[param]->getAsTyped() :
5790 arguments->getAsTyped());
5791 if (*function[param].type != arg->getType()) {
5792 // In-qualified arguments just need an extra node added above the argument to
5793 // convert to the correct type.
5794 TIntermTyped* convArg = intermediate.addConversion(EOpFunctionCall, *function[param].type, arg);
5795 if (convArg != nullptr)
5796 convArg = intermediate.addUniShapeConversion(EOpFunctionCall, *function[param].type, convArg);
5797 if (convArg != nullptr)
5798 setArg(param, convArg);
5799 else
5800 error(arg->getLoc(), "cannot convert input argument, argument", "", "%d", param);
5801 } else {
5802 if (wasFlattened(arg)) {
5803 // If both formal and calling arg are to be flattened, leave that to argument
5804 // expansion, not conversion.
5805 if (!shouldFlatten(*function[param].type, function[param].type->getQualifier().storage, true)) {
5806 // Will make a two-level subtree.
5807 // The deepest will copy member-by-member to build the structure to pass.
5808 // The level above that will be a two-operand EOpComma sequence that follows the copy by the
5809 // object itself.
5810 TVariable* internalAggregate = makeInternalVariable("aggShadow", *function[param].type);
5811 internalAggregate->getWritableType().getQualifier().makeTemporary();
5812 TIntermSymbol* internalSymbolNode = new TIntermSymbol(internalAggregate->getUniqueId(),
5813 internalAggregate->getName(),
5814 internalAggregate->getType());
5815 internalSymbolNode->setLoc(arg->getLoc());
5816 // This makes the deepest level, the member-wise copy
5817 TIntermAggregate* assignAgg = handleAssign(arg->getLoc(), EOpAssign,
5818 internalSymbolNode, arg)->getAsAggregate();
5819
5820 // Now, pair that with the resulting aggregate.
5821 assignAgg = intermediate.growAggregate(assignAgg, internalSymbolNode, arg->getLoc());
5822 assignAgg->setOperator(EOpComma);
5823 assignAgg->setType(internalAggregate->getType());
5824 setArg(param, assignAgg);
5825 }
5826 }
5827 }
5828 }
5829 }
5830
5831 //
5832 // Add any needed implicit expansion of calling arguments from what the shader listed to what's
5833 // internally needed for the AST (given the constraints downstream).
5834 //
expandArguments(const TSourceLoc & loc,const TFunction & function,TIntermTyped * & arguments)5835 void HlslParseContext::expandArguments(const TSourceLoc& loc, const TFunction& function, TIntermTyped*& arguments)
5836 {
5837 TIntermAggregate* aggregate = arguments->getAsAggregate();
5838 int functionParamNumberOffset = 0;
5839
5840 // Replace a single argument with a single argument.
5841 const auto setArg = [&](int paramNum, TIntermTyped* arg) {
5842 if (function.getParamCount() + functionParamNumberOffset == 1)
5843 arguments = arg;
5844 else {
5845 if (aggregate == nullptr)
5846 arguments = arg;
5847 else
5848 aggregate->getSequence()[paramNum] = arg;
5849 }
5850 };
5851
5852 // Replace a single argument with a list of arguments
5853 const auto setArgList = [&](int paramNum, const TVector<TIntermTyped*>& args) {
5854 if (args.size() == 1)
5855 setArg(paramNum, args.front());
5856 else if (args.size() > 1) {
5857 if (function.getParamCount() + functionParamNumberOffset == 1) {
5858 arguments = intermediate.makeAggregate(args.front());
5859 std::for_each(args.begin() + 1, args.end(),
5860 [&](TIntermTyped* arg) {
5861 arguments = intermediate.growAggregate(arguments, arg);
5862 });
5863 } else {
5864 auto it = aggregate->getSequence().erase(aggregate->getSequence().begin() + paramNum);
5865 aggregate->getSequence().insert(it, args.begin(), args.end());
5866 }
5867 functionParamNumberOffset += (int)(args.size() - 1);
5868 }
5869 };
5870
5871 // Process each argument's conversion
5872 for (int param = 0; param < function.getParamCount(); ++param) {
5873 // At this early point there is a slight ambiguity between whether an aggregate 'arguments'
5874 // is the single argument itself or its children are the arguments. Only one argument
5875 // means take 'arguments' itself as the one argument.
5876 TIntermTyped* arg = function.getParamCount() == 1
5877 ? arguments->getAsTyped()
5878 : (aggregate ?
5879 aggregate->getSequence()[param + functionParamNumberOffset]->getAsTyped() :
5880 arguments->getAsTyped());
5881
5882 if (wasFlattened(arg) && shouldFlatten(*function[param].type, function[param].type->getQualifier().storage, true)) {
5883 // Need to pass the structure members instead of the structure.
5884 TVector<TIntermTyped*> memberArgs;
5885 for (int memb = 0; memb < (int)arg->getType().getStruct()->size(); ++memb)
5886 memberArgs.push_back(flattenAccess(arg, memb));
5887 setArgList(param + functionParamNumberOffset, memberArgs);
5888 }
5889 }
5890
5891 // TODO: if we need both hidden counter args (below) and struct expansion (above)
5892 // the two algorithms need to be merged: Each assumes the list starts out 1:1 between
5893 // parameters and arguments.
5894
5895 // If any argument is a pass-by-reference struct buffer with an associated counter
5896 // buffer, we have to add another hidden parameter for that counter.
5897 if (aggregate)
5898 addStructBuffArguments(loc, aggregate);
5899 }
5900
5901 //
5902 // Add any needed implicit output conversions for function-call arguments. This
5903 // can require a new tree topology, complicated further by whether the function
5904 // has a return value.
5905 //
5906 // Returns a node of a subtree that evaluates to the return value of the function.
5907 //
addOutputArgumentConversions(const TFunction & function,TIntermOperator & intermNode)5908 TIntermTyped* HlslParseContext::addOutputArgumentConversions(const TFunction& function, TIntermOperator& intermNode)
5909 {
5910 assert (intermNode.getAsAggregate() != nullptr || intermNode.getAsUnaryNode() != nullptr);
5911
5912 const TSourceLoc& loc = intermNode.getLoc();
5913
5914 TIntermSequence argSequence; // temp sequence for unary node args
5915
5916 if (intermNode.getAsUnaryNode())
5917 argSequence.push_back(intermNode.getAsUnaryNode()->getOperand());
5918
5919 TIntermSequence& arguments = argSequence.empty() ? intermNode.getAsAggregate()->getSequence() : argSequence;
5920
5921 const auto needsConversion = [&](int argNum) {
5922 return function[argNum].type->getQualifier().isParamOutput() &&
5923 (*function[argNum].type != arguments[argNum]->getAsTyped()->getType() ||
5924 shouldConvertLValue(arguments[argNum]) ||
5925 wasFlattened(arguments[argNum]->getAsTyped()));
5926 };
5927
5928 // Will there be any output conversions?
5929 bool outputConversions = false;
5930 for (int i = 0; i < function.getParamCount(); ++i) {
5931 if (needsConversion(i)) {
5932 outputConversions = true;
5933 break;
5934 }
5935 }
5936
5937 if (! outputConversions)
5938 return &intermNode;
5939
5940 // Setup for the new tree, if needed:
5941 //
5942 // Output conversions need a different tree topology.
5943 // Out-qualified arguments need a temporary of the correct type, with the call
5944 // followed by an assignment of the temporary to the original argument:
5945 // void: function(arg, ...) -> ( function(tempArg, ...), arg = tempArg, ...)
5946 // ret = function(arg, ...) -> ret = (tempRet = function(tempArg, ...), arg = tempArg, ..., tempRet)
5947 // Where the "tempArg" type needs no conversion as an argument, but will convert on assignment.
5948 TIntermTyped* conversionTree = nullptr;
5949 TVariable* tempRet = nullptr;
5950 if (intermNode.getBasicType() != EbtVoid) {
5951 // do the "tempRet = function(...), " bit from above
5952 tempRet = makeInternalVariable("tempReturn", intermNode.getType());
5953 TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, loc);
5954 conversionTree = intermediate.addAssign(EOpAssign, tempRetNode, &intermNode, loc);
5955 } else
5956 conversionTree = &intermNode;
5957
5958 conversionTree = intermediate.makeAggregate(conversionTree);
5959
5960 // Process each argument's conversion
5961 for (int i = 0; i < function.getParamCount(); ++i) {
5962 if (needsConversion(i)) {
5963 // Out-qualified arguments needing conversion need to use the topology setup above.
5964 // Do the " ...(tempArg, ...), arg = tempArg" bit from above.
5965
5966 // Make a temporary for what the function expects the argument to look like.
5967 TVariable* tempArg = makeInternalVariable("tempArg", *function[i].type);
5968 tempArg->getWritableType().getQualifier().makeTemporary();
5969 TIntermSymbol* tempArgNode = intermediate.addSymbol(*tempArg, loc);
5970
5971 // This makes the deepest level, the member-wise copy
5972 TIntermTyped* tempAssign = handleAssign(arguments[i]->getLoc(), EOpAssign, arguments[i]->getAsTyped(),
5973 tempArgNode);
5974 tempAssign = handleLvalue(arguments[i]->getLoc(), "assign", tempAssign);
5975 conversionTree = intermediate.growAggregate(conversionTree, tempAssign, arguments[i]->getLoc());
5976
5977 // replace the argument with another node for the same tempArg variable
5978 arguments[i] = intermediate.addSymbol(*tempArg, loc);
5979 }
5980 }
5981
5982 // Finalize the tree topology (see bigger comment above).
5983 if (tempRet) {
5984 // do the "..., tempRet" bit from above
5985 TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, loc);
5986 conversionTree = intermediate.growAggregate(conversionTree, tempRetNode, loc);
5987 }
5988
5989 conversionTree = intermediate.setAggregateOperator(conversionTree, EOpComma, intermNode.getType(), loc);
5990
5991 return conversionTree;
5992 }
5993
5994 //
5995 // Add any needed "hidden" counter buffer arguments for function calls.
5996 //
5997 // Modifies the 'aggregate' argument if needed. Otherwise, is no-op.
5998 //
addStructBuffArguments(const TSourceLoc & loc,TIntermAggregate * & aggregate)5999 void HlslParseContext::addStructBuffArguments(const TSourceLoc& loc, TIntermAggregate*& aggregate)
6000 {
6001 // See if there are any SB types with counters.
6002 const bool hasStructBuffArg =
6003 std::any_of(aggregate->getSequence().begin(),
6004 aggregate->getSequence().end(),
6005 [this](const TIntermNode* node) {
6006 return (node && node->getAsTyped() != nullptr) && hasStructBuffCounter(node->getAsTyped()->getType());
6007 });
6008
6009 // Nothing to do, if we didn't find one.
6010 if (! hasStructBuffArg)
6011 return;
6012
6013 TIntermSequence argsWithCounterBuffers;
6014
6015 for (int param = 0; param < int(aggregate->getSequence().size()); ++param) {
6016 argsWithCounterBuffers.push_back(aggregate->getSequence()[param]);
6017
6018 if (hasStructBuffCounter(aggregate->getSequence()[param]->getAsTyped()->getType())) {
6019 const TIntermSymbol* blockSym = aggregate->getSequence()[param]->getAsSymbolNode();
6020 if (blockSym != nullptr) {
6021 TType counterType;
6022 counterBufferType(loc, counterType);
6023
6024 const TString counterBlockName(intermediate.addCounterBufferName(blockSym->getName()));
6025
6026 TVariable* variable = makeInternalVariable(counterBlockName, counterType);
6027
6028 // Mark this buffer's counter block as being in use
6029 structBufferCounter[counterBlockName] = true;
6030
6031 TIntermSymbol* sym = intermediate.addSymbol(*variable, loc);
6032 argsWithCounterBuffers.push_back(sym);
6033 }
6034 }
6035 }
6036
6037 // Swap with the temp list we've built up.
6038 aggregate->getSequence().swap(argsWithCounterBuffers);
6039 }
6040
6041
6042 //
6043 // Do additional checking of built-in function calls that is not caught
6044 // by normal semantic checks on argument type, extension tagging, etc.
6045 //
6046 // Assumes there has been a semantically correct match to a built-in function prototype.
6047 //
builtInOpCheck(const TSourceLoc & loc,const TFunction & fnCandidate,TIntermOperator & callNode)6048 void HlslParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermOperator& callNode)
6049 {
6050 // Set up convenience accessors to the argument(s). There is almost always
6051 // multiple arguments for the cases below, but when there might be one,
6052 // check the unaryArg first.
6053 const TIntermSequence* argp = nullptr; // confusing to use [] syntax on a pointer, so this is to help get a reference
6054 const TIntermTyped* unaryArg = nullptr;
6055 const TIntermTyped* arg0 = nullptr;
6056 if (callNode.getAsAggregate()) {
6057 argp = &callNode.getAsAggregate()->getSequence();
6058 if (argp->size() > 0)
6059 arg0 = (*argp)[0]->getAsTyped();
6060 } else {
6061 assert(callNode.getAsUnaryNode());
6062 unaryArg = callNode.getAsUnaryNode()->getOperand();
6063 arg0 = unaryArg;
6064 }
6065 const TIntermSequence& aggArgs = *argp; // only valid when unaryArg is nullptr
6066
6067 switch (callNode.getOp()) {
6068 case EOpTextureGather:
6069 case EOpTextureGatherOffset:
6070 case EOpTextureGatherOffsets:
6071 {
6072 // Figure out which variants are allowed by what extensions,
6073 // and what arguments must be constant for which situations.
6074
6075 TString featureString = fnCandidate.getName() + "(...)";
6076 const char* feature = featureString.c_str();
6077 int compArg = -1; // track which argument, if any, is the constant component argument
6078 switch (callNode.getOp()) {
6079 case EOpTextureGather:
6080 // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5,
6081 // otherwise, need GL_ARB_texture_gather.
6082 if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect ||
6083 fnCandidate[0].type->getSampler().shadow) {
6084 if (! fnCandidate[0].type->getSampler().shadow)
6085 compArg = 2;
6086 }
6087 break;
6088 case EOpTextureGatherOffset:
6089 // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument
6090 if (! fnCandidate[0].type->getSampler().shadow)
6091 compArg = 3;
6092 break;
6093 case EOpTextureGatherOffsets:
6094 if (! fnCandidate[0].type->getSampler().shadow)
6095 compArg = 3;
6096 break;
6097 default:
6098 break;
6099 }
6100
6101 if (compArg > 0 && compArg < fnCandidate.getParamCount()) {
6102 if (aggArgs[compArg]->getAsConstantUnion()) {
6103 int value = aggArgs[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst();
6104 if (value < 0 || value > 3)
6105 error(loc, "must be 0, 1, 2, or 3:", feature, "component argument");
6106 } else
6107 error(loc, "must be a compile-time constant:", feature, "component argument");
6108 }
6109
6110 break;
6111 }
6112
6113 case EOpTextureOffset:
6114 case EOpTextureFetchOffset:
6115 case EOpTextureProjOffset:
6116 case EOpTextureLodOffset:
6117 case EOpTextureProjLodOffset:
6118 case EOpTextureGradOffset:
6119 case EOpTextureProjGradOffset:
6120 {
6121 // Handle texture-offset limits checking
6122 // Pick which argument has to hold constant offsets
6123 int arg = -1;
6124 switch (callNode.getOp()) {
6125 case EOpTextureOffset: arg = 2; break;
6126 case EOpTextureFetchOffset: arg = (arg0->getType().getSampler().dim != EsdRect) ? 3 : 2; break;
6127 case EOpTextureProjOffset: arg = 2; break;
6128 case EOpTextureLodOffset: arg = 3; break;
6129 case EOpTextureProjLodOffset: arg = 3; break;
6130 case EOpTextureGradOffset: arg = 4; break;
6131 case EOpTextureProjGradOffset: arg = 4; break;
6132 default:
6133 assert(0);
6134 break;
6135 }
6136
6137 if (arg > 0) {
6138 if (aggArgs[arg]->getAsConstantUnion() == nullptr)
6139 error(loc, "argument must be compile-time constant", "texel offset", "");
6140 else {
6141 const TType& type = aggArgs[arg]->getAsTyped()->getType();
6142 for (int c = 0; c < type.getVectorSize(); ++c) {
6143 int offset = aggArgs[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
6144 if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
6145 error(loc, "value is out of range:", "texel offset",
6146 "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
6147 }
6148 }
6149 }
6150
6151 break;
6152 }
6153
6154 case EOpTextureQuerySamples:
6155 case EOpImageQuerySamples:
6156 break;
6157
6158 case EOpImageAtomicAdd:
6159 case EOpImageAtomicMin:
6160 case EOpImageAtomicMax:
6161 case EOpImageAtomicAnd:
6162 case EOpImageAtomicOr:
6163 case EOpImageAtomicXor:
6164 case EOpImageAtomicExchange:
6165 case EOpImageAtomicCompSwap:
6166 break;
6167
6168 case EOpInterpolateAtCentroid:
6169 case EOpInterpolateAtSample:
6170 case EOpInterpolateAtOffset:
6171 // TODO(greg-lunarg): Re-enable this check. It currently gives false errors for builtins
6172 // defined and passed as members of a struct. In this case the storage class is showing to be
6173 // Function. See glslang #2584
6174
6175 // Make sure the first argument is an interpolant, or an array element of an interpolant
6176 // if (arg0->getType().getQualifier().storage != EvqVaryingIn) {
6177 // It might still be an array element.
6178 //
6179 // We could check more, but the semantics of the first argument are already met; the
6180 // only way to turn an array into a float/vec* is array dereference and swizzle.
6181 //
6182 // ES and desktop 4.3 and earlier: swizzles may not be used
6183 // desktop 4.4 and later: swizzles may be used
6184 // const TIntermTyped* base = TIntermediate::findLValueBase(arg0, true);
6185 // if (base == nullptr || base->getType().getQualifier().storage != EvqVaryingIn)
6186 // error(loc, "first argument must be an interpolant, or interpolant-array element",
6187 // fnCandidate.getName().c_str(), "");
6188 // }
6189 break;
6190
6191 default:
6192 break;
6193 }
6194 }
6195
6196 //
6197 // Handle seeing something in a grammar production that can be done by calling
6198 // a constructor.
6199 //
6200 // The constructor still must be "handled" by handleFunctionCall(), which will
6201 // then call handleConstructor().
6202 //
makeConstructorCall(const TSourceLoc & loc,const TType & type)6203 TFunction* HlslParseContext::makeConstructorCall(const TSourceLoc& loc, const TType& type)
6204 {
6205 TOperator op = intermediate.mapTypeToConstructorOp(type);
6206
6207 if (op == EOpNull) {
6208 error(loc, "cannot construct this type", type.getBasicString(), "");
6209 return nullptr;
6210 }
6211
6212 TString empty("");
6213
6214 return new TFunction(&empty, type, op);
6215 }
6216
6217 //
6218 // Handle seeing a "COLON semantic" at the end of a type declaration,
6219 // by updating the type according to the semantic.
6220 //
handleSemantic(TSourceLoc loc,TQualifier & qualifier,TBuiltInVariable builtIn,const TString & upperCase)6221 void HlslParseContext::handleSemantic(TSourceLoc loc, TQualifier& qualifier, TBuiltInVariable builtIn,
6222 const TString& upperCase)
6223 {
6224 // Parse and return semantic number. If limit is 0, it will be ignored. Otherwise, if the parsed
6225 // semantic number is >= limit, errorMsg is issued and 0 is returned.
6226 // TODO: it would be nicer if limit and errorMsg had default parameters, but some compilers don't yet
6227 // accept those in lambda functions.
6228 const auto getSemanticNumber = [this, loc](const TString& semantic, unsigned int limit, const char* errorMsg) -> unsigned int {
6229 size_t pos = semantic.find_last_not_of("0123456789");
6230 if (pos == std::string::npos)
6231 return 0u;
6232
6233 unsigned int semanticNum = (unsigned int)atoi(semantic.c_str() + pos + 1);
6234
6235 if (limit != 0 && semanticNum >= limit) {
6236 error(loc, errorMsg, semantic.c_str(), "");
6237 return 0u;
6238 }
6239
6240 return semanticNum;
6241 };
6242
6243 if (builtIn == EbvNone && hlslDX9Compatible()) {
6244 if (language == EShLangVertex) {
6245 if (qualifier.isParamOutput()) {
6246 if (upperCase == "POSITION") {
6247 builtIn = EbvPosition;
6248 }
6249 if (upperCase == "PSIZE") {
6250 builtIn = EbvPointSize;
6251 }
6252 }
6253 } else if (language == EShLangFragment) {
6254 if (qualifier.isParamInput() && upperCase == "VPOS") {
6255 builtIn = EbvFragCoord;
6256 }
6257 if (qualifier.isParamOutput()) {
6258 if (upperCase.compare(0, 5, "COLOR") == 0) {
6259 qualifier.layoutLocation = getSemanticNumber(upperCase, 0, nullptr);
6260 nextOutLocation = std::max(nextOutLocation, qualifier.layoutLocation + 1u);
6261 }
6262 if (upperCase == "DEPTH") {
6263 builtIn = EbvFragDepth;
6264 }
6265 }
6266 }
6267 }
6268
6269 switch(builtIn) {
6270 case EbvNone:
6271 // Get location numbers from fragment outputs, instead of
6272 // auto-assigning them.
6273 if (language == EShLangFragment && upperCase.compare(0, 9, "SV_TARGET") == 0) {
6274 qualifier.layoutLocation = getSemanticNumber(upperCase, 0, nullptr);
6275 nextOutLocation = std::max(nextOutLocation, qualifier.layoutLocation + 1u);
6276 } else if (upperCase.compare(0, 15, "SV_CLIPDISTANCE") == 0) {
6277 builtIn = EbvClipDistance;
6278 qualifier.layoutLocation = getSemanticNumber(upperCase, maxClipCullRegs, "invalid clip semantic");
6279 } else if (upperCase.compare(0, 15, "SV_CULLDISTANCE") == 0) {
6280 builtIn = EbvCullDistance;
6281 qualifier.layoutLocation = getSemanticNumber(upperCase, maxClipCullRegs, "invalid cull semantic");
6282 }
6283 break;
6284 case EbvPosition:
6285 // adjust for stage in/out
6286 if (language == EShLangFragment)
6287 builtIn = EbvFragCoord;
6288 break;
6289 case EbvFragStencilRef:
6290 error(loc, "unimplemented; need ARB_shader_stencil_export", "SV_STENCILREF", "");
6291 break;
6292 case EbvTessLevelInner:
6293 case EbvTessLevelOuter:
6294 qualifier.patch = true;
6295 break;
6296 default:
6297 break;
6298 }
6299
6300 if (qualifier.builtIn == EbvNone)
6301 qualifier.builtIn = builtIn;
6302 qualifier.semanticName = intermediate.addSemanticName(upperCase);
6303 }
6304
6305 //
6306 // Handle seeing something like "PACKOFFSET LEFT_PAREN c[Subcomponent][.component] RIGHT_PAREN"
6307 //
6308 // 'location' has the "c[Subcomponent]" part.
6309 // 'component' points to the "component" part, or nullptr if not present.
6310 //
handlePackOffset(const TSourceLoc & loc,TQualifier & qualifier,const glslang::TString & location,const glslang::TString * component)6311 void HlslParseContext::handlePackOffset(const TSourceLoc& loc, TQualifier& qualifier, const glslang::TString& location,
6312 const glslang::TString* component)
6313 {
6314 if (location.size() == 0 || location[0] != 'c') {
6315 error(loc, "expected 'c'", "packoffset", "");
6316 return;
6317 }
6318 if (location.size() == 1)
6319 return;
6320 if (! isdigit(location[1])) {
6321 error(loc, "expected number after 'c'", "packoffset", "");
6322 return;
6323 }
6324
6325 qualifier.layoutOffset = 16 * atoi(location.substr(1, location.size()).c_str());
6326 if (component != nullptr) {
6327 int componentOffset = 0;
6328 switch ((*component)[0]) {
6329 case 'x': componentOffset = 0; break;
6330 case 'y': componentOffset = 4; break;
6331 case 'z': componentOffset = 8; break;
6332 case 'w': componentOffset = 12; break;
6333 default:
6334 componentOffset = -1;
6335 break;
6336 }
6337 if (componentOffset < 0 || component->size() > 1) {
6338 error(loc, "expected {x, y, z, w} for component", "packoffset", "");
6339 return;
6340 }
6341 qualifier.layoutOffset += componentOffset;
6342 }
6343 }
6344
6345 //
6346 // Handle seeing something like "REGISTER LEFT_PAREN [shader_profile,] Type# RIGHT_PAREN"
6347 //
6348 // 'profile' points to the shader_profile part, or nullptr if not present.
6349 // 'desc' is the type# part.
6350 //
handleRegister(const TSourceLoc & loc,TQualifier & qualifier,const glslang::TString * profile,const glslang::TString & desc,int subComponent,const glslang::TString * spaceDesc)6351 void HlslParseContext::handleRegister(const TSourceLoc& loc, TQualifier& qualifier, const glslang::TString* profile,
6352 const glslang::TString& desc, int subComponent, const glslang::TString* spaceDesc)
6353 {
6354 if (profile != nullptr)
6355 warn(loc, "ignoring shader_profile", "register", "");
6356
6357 if (desc.size() < 1) {
6358 error(loc, "expected register type", "register", "");
6359 return;
6360 }
6361
6362 int regNumber = 0;
6363 if (desc.size() > 1) {
6364 if (isdigit(desc[1]))
6365 regNumber = atoi(desc.substr(1, desc.size()).c_str());
6366 else {
6367 error(loc, "expected register number after register type", "register", "");
6368 return;
6369 }
6370 }
6371
6372 // more information about register types see
6373 // https://docs.microsoft.com/en-us/windows/desktop/direct3dhlsl/dx-graphics-hlsl-variable-register
6374 const std::vector<std::string>& resourceInfo = intermediate.getResourceSetBinding();
6375 switch (std::tolower(desc[0])) {
6376 case 'c':
6377 // c register is the register slot in the global const buffer
6378 // each slot is a vector of 4 32 bit components
6379 qualifier.layoutOffset = regNumber * 4 * 4;
6380 break;
6381 // const buffer register slot
6382 case 'b':
6383 // textrues and structured buffers
6384 case 't':
6385 // samplers
6386 case 's':
6387 // uav resources
6388 case 'u':
6389 // if nothing else has set the binding, do so now
6390 // (other mechanisms override this one)
6391 if (!qualifier.hasBinding())
6392 qualifier.layoutBinding = regNumber + subComponent;
6393
6394 // This handles per-register layout sets numbers. For the global mode which sets
6395 // every symbol to the same value, see setLinkageLayoutSets().
6396 if ((resourceInfo.size() % 3) == 0) {
6397 // Apply per-symbol resource set and binding.
6398 for (auto it = resourceInfo.cbegin(); it != resourceInfo.cend(); it = it + 3) {
6399 if (strcmp(desc.c_str(), it[0].c_str()) == 0) {
6400 qualifier.layoutSet = atoi(it[1].c_str());
6401 qualifier.layoutBinding = atoi(it[2].c_str()) + subComponent;
6402 break;
6403 }
6404 }
6405 }
6406 break;
6407 default:
6408 warn(loc, "ignoring unrecognized register type", "register", "%c", desc[0]);
6409 break;
6410 }
6411
6412 // space
6413 unsigned int setNumber;
6414 const auto crackSpace = [&]() -> bool {
6415 const int spaceLen = 5;
6416 if (spaceDesc->size() < spaceLen + 1)
6417 return false;
6418 if (spaceDesc->compare(0, spaceLen, "space") != 0)
6419 return false;
6420 if (! isdigit((*spaceDesc)[spaceLen]))
6421 return false;
6422 setNumber = atoi(spaceDesc->substr(spaceLen, spaceDesc->size()).c_str());
6423 return true;
6424 };
6425
6426 // if nothing else has set the set, do so now
6427 // (other mechanisms override this one)
6428 if (spaceDesc && !qualifier.hasSet()) {
6429 if (! crackSpace()) {
6430 error(loc, "expected spaceN", "register", "");
6431 return;
6432 }
6433 qualifier.layoutSet = setNumber;
6434 }
6435 }
6436
6437 // Convert to a scalar boolean, or if not allowed by HLSL semantics,
6438 // report an error and return nullptr.
convertConditionalExpression(const TSourceLoc & loc,TIntermTyped * condition,bool mustBeScalar)6439 TIntermTyped* HlslParseContext::convertConditionalExpression(const TSourceLoc& loc, TIntermTyped* condition,
6440 bool mustBeScalar)
6441 {
6442 if (mustBeScalar && !condition->getType().isScalarOrVec1()) {
6443 error(loc, "requires a scalar", "conditional expression", "");
6444 return nullptr;
6445 }
6446
6447 return intermediate.addConversion(EOpConstructBool, TType(EbtBool, EvqTemporary, condition->getVectorSize()),
6448 condition);
6449 }
6450
6451 //
6452 // Same error message for all places assignments don't work.
6453 //
assignError(const TSourceLoc & loc,const char * op,TString left,TString right)6454 void HlslParseContext::assignError(const TSourceLoc& loc, const char* op, TString left, TString right)
6455 {
6456 error(loc, "", op, "cannot convert from '%s' to '%s'",
6457 right.c_str(), left.c_str());
6458 }
6459
6460 //
6461 // Same error message for all places unary operations don't work.
6462 //
unaryOpError(const TSourceLoc & loc,const char * op,TString operand)6463 void HlslParseContext::unaryOpError(const TSourceLoc& loc, const char* op, TString operand)
6464 {
6465 error(loc, " wrong operand type", op,
6466 "no operation '%s' exists that takes an operand of type %s (or there is no acceptable conversion)",
6467 op, operand.c_str());
6468 }
6469
6470 //
6471 // Same error message for all binary operations don't work.
6472 //
binaryOpError(const TSourceLoc & loc,const char * op,TString left,TString right)6473 void HlslParseContext::binaryOpError(const TSourceLoc& loc, const char* op, TString left, TString right)
6474 {
6475 error(loc, " wrong operand types:", op,
6476 "no operation '%s' exists that takes a left-hand operand of type '%s' and "
6477 "a right operand of type '%s' (or there is no acceptable conversion)",
6478 op, left.c_str(), right.c_str());
6479 }
6480
6481 //
6482 // A basic type of EbtVoid is a key that the name string was seen in the source, but
6483 // it was not found as a variable in the symbol table. If so, give the error
6484 // message and insert a dummy variable in the symbol table to prevent future errors.
6485 //
variableCheck(TIntermTyped * & nodePtr)6486 void HlslParseContext::variableCheck(TIntermTyped*& nodePtr)
6487 {
6488 TIntermSymbol* symbol = nodePtr->getAsSymbolNode();
6489 if (! symbol)
6490 return;
6491
6492 if (symbol->getType().getBasicType() == EbtVoid) {
6493 error(symbol->getLoc(), "undeclared identifier", symbol->getName().c_str(), "");
6494
6495 // Add to symbol table to prevent future error messages on the same name
6496 if (symbol->getName().size() > 0) {
6497 TVariable* fakeVariable = new TVariable(&symbol->getName(), TType(EbtFloat));
6498 symbolTable.insert(*fakeVariable);
6499
6500 // substitute a symbol node for this new variable
6501 nodePtr = intermediate.addSymbol(*fakeVariable, symbol->getLoc());
6502 }
6503 }
6504 }
6505
6506 //
6507 // Both test, and if necessary spit out an error, to see if the node is really
6508 // a constant.
6509 //
constantValueCheck(TIntermTyped * node,const char * token)6510 void HlslParseContext::constantValueCheck(TIntermTyped* node, const char* token)
6511 {
6512 if (node->getQualifier().storage != EvqConst)
6513 error(node->getLoc(), "constant expression required", token, "");
6514 }
6515
6516 //
6517 // Both test, and if necessary spit out an error, to see if the node is really
6518 // an integer.
6519 //
integerCheck(const TIntermTyped * node,const char * token)6520 void HlslParseContext::integerCheck(const TIntermTyped* node, const char* token)
6521 {
6522 if ((node->getBasicType() == EbtInt || node->getBasicType() == EbtUint) && node->isScalar())
6523 return;
6524
6525 error(node->getLoc(), "scalar integer expression required", token, "");
6526 }
6527
6528 //
6529 // Both test, and if necessary spit out an error, to see if we are currently
6530 // globally scoped.
6531 //
globalCheck(const TSourceLoc & loc,const char * token)6532 void HlslParseContext::globalCheck(const TSourceLoc& loc, const char* token)
6533 {
6534 if (! symbolTable.atGlobalLevel())
6535 error(loc, "not allowed in nested scope", token, "");
6536 }
6537
builtInName(const TString &)6538 bool HlslParseContext::builtInName(const TString& /*identifier*/)
6539 {
6540 return false;
6541 }
6542
6543 //
6544 // Make sure there is enough data and not too many arguments provided to the
6545 // constructor to build something of the type of the constructor. Also returns
6546 // the type of the constructor.
6547 //
6548 // Returns true if there was an error in construction.
6549 //
constructorError(const TSourceLoc & loc,TIntermNode * node,TFunction & function,TOperator op,TType & type)6550 bool HlslParseContext::constructorError(const TSourceLoc& loc, TIntermNode* node, TFunction& function,
6551 TOperator op, TType& type)
6552 {
6553 type.shallowCopy(function.getType());
6554
6555 bool constructingMatrix = false;
6556 switch (op) {
6557 case EOpConstructTextureSampler:
6558 error(loc, "unhandled texture constructor", "constructor", "");
6559 return true;
6560 case EOpConstructMat2x2:
6561 case EOpConstructMat2x3:
6562 case EOpConstructMat2x4:
6563 case EOpConstructMat3x2:
6564 case EOpConstructMat3x3:
6565 case EOpConstructMat3x4:
6566 case EOpConstructMat4x2:
6567 case EOpConstructMat4x3:
6568 case EOpConstructMat4x4:
6569 case EOpConstructDMat2x2:
6570 case EOpConstructDMat2x3:
6571 case EOpConstructDMat2x4:
6572 case EOpConstructDMat3x2:
6573 case EOpConstructDMat3x3:
6574 case EOpConstructDMat3x4:
6575 case EOpConstructDMat4x2:
6576 case EOpConstructDMat4x3:
6577 case EOpConstructDMat4x4:
6578 case EOpConstructIMat2x2:
6579 case EOpConstructIMat2x3:
6580 case EOpConstructIMat2x4:
6581 case EOpConstructIMat3x2:
6582 case EOpConstructIMat3x3:
6583 case EOpConstructIMat3x4:
6584 case EOpConstructIMat4x2:
6585 case EOpConstructIMat4x3:
6586 case EOpConstructIMat4x4:
6587 case EOpConstructUMat2x2:
6588 case EOpConstructUMat2x3:
6589 case EOpConstructUMat2x4:
6590 case EOpConstructUMat3x2:
6591 case EOpConstructUMat3x3:
6592 case EOpConstructUMat3x4:
6593 case EOpConstructUMat4x2:
6594 case EOpConstructUMat4x3:
6595 case EOpConstructUMat4x4:
6596 case EOpConstructBMat2x2:
6597 case EOpConstructBMat2x3:
6598 case EOpConstructBMat2x4:
6599 case EOpConstructBMat3x2:
6600 case EOpConstructBMat3x3:
6601 case EOpConstructBMat3x4:
6602 case EOpConstructBMat4x2:
6603 case EOpConstructBMat4x3:
6604 case EOpConstructBMat4x4:
6605 constructingMatrix = true;
6606 break;
6607 default:
6608 break;
6609 }
6610
6611 //
6612 // Walk the arguments for first-pass checks and collection of information.
6613 //
6614
6615 int size = 0;
6616 bool constType = true;
6617 bool full = false;
6618 bool overFull = false;
6619 bool matrixInMatrix = false;
6620 bool arrayArg = false;
6621 for (int arg = 0; arg < function.getParamCount(); ++arg) {
6622 if (function[arg].type->isArray()) {
6623 if (function[arg].type->isUnsizedArray()) {
6624 // Can't construct from an unsized array.
6625 error(loc, "array argument must be sized", "constructor", "");
6626 return true;
6627 }
6628 arrayArg = true;
6629 }
6630 if (constructingMatrix && function[arg].type->isMatrix())
6631 matrixInMatrix = true;
6632
6633 // 'full' will go to true when enough args have been seen. If we loop
6634 // again, there is an extra argument.
6635 if (full) {
6636 // For vectors and matrices, it's okay to have too many components
6637 // available, but not okay to have unused arguments.
6638 overFull = true;
6639 }
6640
6641 size += function[arg].type->computeNumComponents();
6642 if (op != EOpConstructStruct && ! type.isArray() && size >= type.computeNumComponents())
6643 full = true;
6644
6645 if (function[arg].type->getQualifier().storage != EvqConst)
6646 constType = false;
6647 }
6648
6649 if (constType)
6650 type.getQualifier().storage = EvqConst;
6651
6652 if (type.isArray()) {
6653 if (function.getParamCount() == 0) {
6654 error(loc, "array constructor must have at least one argument", "constructor", "");
6655 return true;
6656 }
6657
6658 if (type.isUnsizedArray()) {
6659 // auto adapt the constructor type to the number of arguments
6660 type.changeOuterArraySize(function.getParamCount());
6661 } else if (type.getOuterArraySize() != function.getParamCount() && type.computeNumComponents() > size) {
6662 error(loc, "array constructor needs one argument per array element", "constructor", "");
6663 return true;
6664 }
6665
6666 if (type.isArrayOfArrays()) {
6667 // Types have to match, but we're still making the type.
6668 // Finish making the type, and the comparison is done later
6669 // when checking for conversion.
6670 TArraySizes& arraySizes = *type.getArraySizes();
6671
6672 // At least the dimensionalities have to match.
6673 if (! function[0].type->isArray() ||
6674 arraySizes.getNumDims() != function[0].type->getArraySizes()->getNumDims() + 1) {
6675 error(loc, "array constructor argument not correct type to construct array element", "constructor", "");
6676 return true;
6677 }
6678
6679 if (arraySizes.isInnerUnsized()) {
6680 // "Arrays of arrays ..., and the size for any dimension is optional"
6681 // That means we need to adopt (from the first argument) the other array sizes into the type.
6682 for (int d = 1; d < arraySizes.getNumDims(); ++d) {
6683 if (arraySizes.getDimSize(d) == UnsizedArraySize) {
6684 arraySizes.setDimSize(d, function[0].type->getArraySizes()->getDimSize(d - 1));
6685 }
6686 }
6687 }
6688 }
6689 }
6690
6691 // Some array -> array type casts are okay
6692 if (arrayArg && function.getParamCount() == 1 && op != EOpConstructStruct && type.isArray() &&
6693 !type.isArrayOfArrays() && !function[0].type->isArrayOfArrays() &&
6694 type.getVectorSize() >= 1 && function[0].type->getVectorSize() >= 1)
6695 return false;
6696
6697 if (arrayArg && op != EOpConstructStruct && ! type.isArrayOfArrays()) {
6698 error(loc, "constructing non-array constituent from array argument", "constructor", "");
6699 return true;
6700 }
6701
6702 if (matrixInMatrix && ! type.isArray()) {
6703 return false;
6704 }
6705
6706 if (overFull) {
6707 error(loc, "too many arguments", "constructor", "");
6708 return true;
6709 }
6710
6711 if (op == EOpConstructStruct && ! type.isArray()) {
6712 if (isScalarConstructor(node))
6713 return false;
6714
6715 // Self-type construction: e.g, we can construct a struct from a single identically typed object.
6716 if (function.getParamCount() == 1 && type == *function[0].type)
6717 return false;
6718
6719 if ((int)type.getStruct()->size() != function.getParamCount()) {
6720 error(loc, "Number of constructor parameters does not match the number of structure fields", "constructor", "");
6721 return true;
6722 }
6723 }
6724
6725 if ((op != EOpConstructStruct && size != 1 && size < type.computeNumComponents()) ||
6726 (op == EOpConstructStruct && size < type.computeNumComponents())) {
6727 error(loc, "not enough data provided for construction", "constructor", "");
6728 return true;
6729 }
6730
6731 return false;
6732 }
6733
6734 // See if 'node', in the context of constructing aggregates, is a scalar argument
6735 // to a constructor.
6736 //
isScalarConstructor(const TIntermNode * node)6737 bool HlslParseContext::isScalarConstructor(const TIntermNode* node)
6738 {
6739 // Obviously, it must be a scalar, but an aggregate node might not be fully
6740 // completed yet: holding a sequence of initializers under an aggregate
6741 // would not yet be typed, so don't check it's type. This corresponds to
6742 // the aggregate operator also not being set yet. (An aggregate operation
6743 // that legitimately yields a scalar will have a getOp() of that operator,
6744 // not EOpNull.)
6745
6746 return node->getAsTyped() != nullptr &&
6747 node->getAsTyped()->isScalar() &&
6748 (node->getAsAggregate() == nullptr || node->getAsAggregate()->getOp() != EOpNull);
6749 }
6750
6751 // Checks to see if a void variable has been declared and raise an error message for such a case
6752 //
6753 // returns true in case of an error
6754 //
voidErrorCheck(const TSourceLoc & loc,const TString & identifier,const TBasicType basicType)6755 bool HlslParseContext::voidErrorCheck(const TSourceLoc& loc, const TString& identifier, const TBasicType basicType)
6756 {
6757 if (basicType == EbtVoid) {
6758 error(loc, "illegal use of type 'void'", identifier.c_str(), "");
6759 return true;
6760 }
6761
6762 return false;
6763 }
6764
6765 //
6766 // Fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level.
6767 //
globalQualifierFix(const TSourceLoc &,TQualifier & qualifier)6768 void HlslParseContext::globalQualifierFix(const TSourceLoc&, TQualifier& qualifier)
6769 {
6770 // move from parameter/unknown qualifiers to pipeline in/out qualifiers
6771 switch (qualifier.storage) {
6772 case EvqIn:
6773 qualifier.storage = EvqVaryingIn;
6774 break;
6775 case EvqOut:
6776 qualifier.storage = EvqVaryingOut;
6777 break;
6778 default:
6779 break;
6780 }
6781 }
6782
6783 //
6784 // Merge characteristics of the 'src' qualifier into the 'dst'.
6785 //
mergeQualifiers(TQualifier & dst,const TQualifier & src)6786 void HlslParseContext::mergeQualifiers(TQualifier& dst, const TQualifier& src)
6787 {
6788 // Storage qualification
6789 if (dst.storage == EvqTemporary || dst.storage == EvqGlobal)
6790 dst.storage = src.storage;
6791 else if ((dst.storage == EvqIn && src.storage == EvqOut) ||
6792 (dst.storage == EvqOut && src.storage == EvqIn))
6793 dst.storage = EvqInOut;
6794 else if ((dst.storage == EvqIn && src.storage == EvqConst) ||
6795 (dst.storage == EvqConst && src.storage == EvqIn))
6796 dst.storage = EvqConstReadOnly;
6797
6798 // Layout qualifiers
6799 mergeObjectLayoutQualifiers(dst, src, false);
6800
6801 // individual qualifiers
6802 #define MERGE_SINGLETON(field) dst.field |= src.field;
6803 MERGE_SINGLETON(invariant);
6804 MERGE_SINGLETON(noContraction);
6805 MERGE_SINGLETON(centroid);
6806 MERGE_SINGLETON(smooth);
6807 MERGE_SINGLETON(flat);
6808 MERGE_SINGLETON(nopersp);
6809 MERGE_SINGLETON(patch);
6810 MERGE_SINGLETON(sample);
6811 MERGE_SINGLETON(coherent);
6812 MERGE_SINGLETON(volatil);
6813 MERGE_SINGLETON(restrict);
6814 MERGE_SINGLETON(readonly);
6815 MERGE_SINGLETON(writeonly);
6816 MERGE_SINGLETON(specConstant);
6817 MERGE_SINGLETON(nonUniform);
6818 }
6819
6820 // used to flatten the sampler type space into a single dimension
6821 // correlates with the declaration of defaultSamplerPrecision[]
computeSamplerTypeIndex(TSampler & sampler)6822 int HlslParseContext::computeSamplerTypeIndex(TSampler& sampler)
6823 {
6824 int arrayIndex = sampler.arrayed ? 1 : 0;
6825 int shadowIndex = sampler.shadow ? 1 : 0;
6826 int externalIndex = sampler.external ? 1 : 0;
6827
6828 return EsdNumDims *
6829 (EbtNumTypes * (2 * (2 * arrayIndex + shadowIndex) + externalIndex) + sampler.type) + sampler.dim;
6830 }
6831
6832 //
6833 // Do size checking for an array type's size.
6834 //
arraySizeCheck(const TSourceLoc & loc,TIntermTyped * expr,TArraySize & sizePair)6835 void HlslParseContext::arraySizeCheck(const TSourceLoc& loc, TIntermTyped* expr, TArraySize& sizePair)
6836 {
6837 bool isConst = false;
6838 sizePair.size = 1;
6839 sizePair.node = nullptr;
6840
6841 TIntermConstantUnion* constant = expr->getAsConstantUnion();
6842 if (constant) {
6843 // handle true (non-specialization) constant
6844 sizePair.size = constant->getConstArray()[0].getIConst();
6845 isConst = true;
6846 } else {
6847 // see if it's a specialization constant instead
6848 if (expr->getQualifier().isSpecConstant()) {
6849 isConst = true;
6850 sizePair.node = expr;
6851 TIntermSymbol* symbol = expr->getAsSymbolNode();
6852 if (symbol && symbol->getConstArray().size() > 0)
6853 sizePair.size = symbol->getConstArray()[0].getIConst();
6854 }
6855 }
6856
6857 if (! isConst || (expr->getBasicType() != EbtInt && expr->getBasicType() != EbtUint)) {
6858 error(loc, "array size must be a constant integer expression", "", "");
6859 return;
6860 }
6861
6862 if (sizePair.size <= 0) {
6863 error(loc, "array size must be a positive integer", "", "");
6864 return;
6865 }
6866 }
6867
6868 //
6869 // Require array to be completely sized
6870 //
arraySizeRequiredCheck(const TSourceLoc & loc,const TArraySizes & arraySizes)6871 void HlslParseContext::arraySizeRequiredCheck(const TSourceLoc& loc, const TArraySizes& arraySizes)
6872 {
6873 if (arraySizes.hasUnsized())
6874 error(loc, "array size required", "", "");
6875 }
6876
structArrayCheck(const TSourceLoc &,const TType & type)6877 void HlslParseContext::structArrayCheck(const TSourceLoc& /*loc*/, const TType& type)
6878 {
6879 const TTypeList& structure = *type.getStruct();
6880 for (int m = 0; m < (int)structure.size(); ++m) {
6881 const TType& member = *structure[m].type;
6882 if (member.isArray())
6883 arraySizeRequiredCheck(structure[m].loc, *member.getArraySizes());
6884 }
6885 }
6886
6887 //
6888 // Do all the semantic checking for declaring or redeclaring an array, with and
6889 // without a size, and make the right changes to the symbol table.
6890 //
declareArray(const TSourceLoc & loc,const TString & identifier,const TType & type,TSymbol * & symbol,bool track)6891 void HlslParseContext::declareArray(const TSourceLoc& loc, const TString& identifier, const TType& type,
6892 TSymbol*& symbol, bool track)
6893 {
6894 if (symbol == nullptr) {
6895 bool currentScope;
6896 symbol = symbolTable.find(identifier, nullptr, ¤tScope);
6897
6898 if (symbol && builtInName(identifier) && ! symbolTable.atBuiltInLevel()) {
6899 // bad shader (errors already reported) trying to redeclare a built-in name as an array
6900 return;
6901 }
6902 if (symbol == nullptr || ! currentScope) {
6903 //
6904 // Successfully process a new definition.
6905 // (Redeclarations have to take place at the same scope; otherwise they are hiding declarations)
6906 //
6907 symbol = new TVariable(&identifier, type);
6908 symbolTable.insert(*symbol);
6909 if (track && symbolTable.atGlobalLevel())
6910 trackLinkage(*symbol);
6911
6912 return;
6913 }
6914 if (symbol->getAsAnonMember()) {
6915 error(loc, "cannot redeclare a user-block member array", identifier.c_str(), "");
6916 symbol = nullptr;
6917 return;
6918 }
6919 }
6920
6921 //
6922 // Process a redeclaration.
6923 //
6924
6925 if (symbol == nullptr) {
6926 error(loc, "array variable name expected", identifier.c_str(), "");
6927 return;
6928 }
6929
6930 // redeclareBuiltinVariable() should have already done the copyUp()
6931 TType& existingType = symbol->getWritableType();
6932
6933 if (existingType.isSizedArray()) {
6934 // be more lenient for input arrays to geometry shaders and tessellation control outputs,
6935 // where the redeclaration is the same size
6936 return;
6937 }
6938
6939 existingType.updateArraySizes(type);
6940 }
6941
6942 //
6943 // Enforce non-initializer type/qualifier rules.
6944 //
fixConstInit(const TSourceLoc & loc,const TString & identifier,TType & type,TIntermTyped * & initializer)6945 void HlslParseContext::fixConstInit(const TSourceLoc& loc, const TString& identifier, TType& type,
6946 TIntermTyped*& initializer)
6947 {
6948 //
6949 // Make the qualifier make sense, given that there is an initializer.
6950 //
6951 if (initializer == nullptr) {
6952 if (type.getQualifier().storage == EvqConst ||
6953 type.getQualifier().storage == EvqConstReadOnly) {
6954 initializer = intermediate.makeAggregate(loc);
6955 warn(loc, "variable with qualifier 'const' not initialized; zero initializing", identifier.c_str(), "");
6956 }
6957 }
6958 }
6959
6960 //
6961 // See if the identifier is a built-in symbol that can be redeclared, and if so,
6962 // copy the symbol table's read-only built-in variable to the current
6963 // global level, where it can be modified based on the passed in type.
6964 //
6965 // Returns nullptr if no redeclaration took place; meaning a normal declaration still
6966 // needs to occur for it, not necessarily an error.
6967 //
6968 // Returns a redeclared and type-modified variable if a redeclared occurred.
6969 //
redeclareBuiltinVariable(const TSourceLoc &,const TString & identifier,const TQualifier &,const TShaderQualifiers &)6970 TSymbol* HlslParseContext::redeclareBuiltinVariable(const TSourceLoc& /*loc*/, const TString& identifier,
6971 const TQualifier& /*qualifier*/,
6972 const TShaderQualifiers& /*publicType*/)
6973 {
6974 if (! builtInName(identifier) || symbolTable.atBuiltInLevel() || ! symbolTable.atGlobalLevel())
6975 return nullptr;
6976
6977 return nullptr;
6978 }
6979
6980 //
6981 // Generate index to the array element in a structure buffer (SSBO)
6982 //
indexStructBufferContent(const TSourceLoc & loc,TIntermTyped * buffer) const6983 TIntermTyped* HlslParseContext::indexStructBufferContent(const TSourceLoc& loc, TIntermTyped* buffer) const
6984 {
6985 // Bail out if not a struct buffer
6986 if (buffer == nullptr || ! isStructBufferType(buffer->getType()))
6987 return nullptr;
6988
6989 // Runtime sized array is always the last element.
6990 const TTypeList* bufferStruct = buffer->getType().getStruct();
6991 TIntermTyped* arrayPosition = intermediate.addConstantUnion(unsigned(bufferStruct->size()-1), loc);
6992
6993 TIntermTyped* argArray = intermediate.addIndex(EOpIndexDirectStruct, buffer, arrayPosition, loc);
6994 argArray->setType(*(*bufferStruct)[bufferStruct->size()-1].type);
6995
6996 return argArray;
6997 }
6998
6999 //
7000 // IFF type is a structuredbuffer/byteaddressbuffer type, return the content
7001 // (template) type. E.g, StructuredBuffer<MyType> -> MyType. Else return nullptr.
7002 //
getStructBufferContentType(const TType & type) const7003 TType* HlslParseContext::getStructBufferContentType(const TType& type) const
7004 {
7005 if (type.getBasicType() != EbtBlock || type.getQualifier().storage != EvqBuffer)
7006 return nullptr;
7007
7008 const int memberCount = (int)type.getStruct()->size();
7009 assert(memberCount > 0);
7010
7011 TType* contentType = (*type.getStruct())[memberCount-1].type;
7012
7013 return contentType->isUnsizedArray() ? contentType : nullptr;
7014 }
7015
7016 //
7017 // If an existing struct buffer has a sharable type, then share it.
7018 //
shareStructBufferType(TType & type)7019 void HlslParseContext::shareStructBufferType(TType& type)
7020 {
7021 // PackOffset must be equivalent to share types on a per-member basis.
7022 // Note: cannot use auto type due to recursion. Thus, this is a std::function.
7023 const std::function<bool(TType& lhs, TType& rhs)>
7024 compareQualifiers = [&](TType& lhs, TType& rhs) -> bool {
7025 if (lhs.getQualifier().layoutOffset != rhs.getQualifier().layoutOffset)
7026 return false;
7027
7028 if (lhs.isStruct() != rhs.isStruct())
7029 return false;
7030
7031 if (lhs.getQualifier().builtIn != rhs.getQualifier().builtIn)
7032 return false;
7033
7034 if (lhs.isStruct() && rhs.isStruct()) {
7035 if (lhs.getStruct()->size() != rhs.getStruct()->size())
7036 return false;
7037
7038 for (int i = 0; i < int(lhs.getStruct()->size()); ++i)
7039 if (!compareQualifiers(*(*lhs.getStruct())[i].type, *(*rhs.getStruct())[i].type))
7040 return false;
7041 }
7042
7043 return true;
7044 };
7045
7046 // We need to compare certain qualifiers in addition to the type.
7047 const auto typeEqual = [compareQualifiers](TType& lhs, TType& rhs) -> bool {
7048 if (lhs.getQualifier().readonly != rhs.getQualifier().readonly)
7049 return false;
7050
7051 // If both are structures, recursively look for packOffset equality
7052 // as well as type equality.
7053 return compareQualifiers(lhs, rhs) && lhs == rhs;
7054 };
7055
7056 // This is an exhaustive O(N) search, but real world shaders have
7057 // only a small number of these.
7058 for (int idx = 0; idx < int(structBufferTypes.size()); ++idx) {
7059 // If the deep structure matches, modulo qualifiers, use it
7060 if (typeEqual(*structBufferTypes[idx], type)) {
7061 type.shallowCopy(*structBufferTypes[idx]);
7062 return;
7063 }
7064 }
7065
7066 // Otherwise, remember it:
7067 TType* typeCopy = new TType;
7068 typeCopy->shallowCopy(type);
7069 structBufferTypes.push_back(typeCopy);
7070 }
7071
paramFix(TType & type)7072 void HlslParseContext::paramFix(TType& type)
7073 {
7074 switch (type.getQualifier().storage) {
7075 case EvqConst:
7076 type.getQualifier().storage = EvqConstReadOnly;
7077 break;
7078 case EvqGlobal:
7079 case EvqTemporary:
7080 type.getQualifier().storage = EvqIn;
7081 break;
7082 case EvqBuffer:
7083 {
7084 // SSBO parameter. These do not go through the declareBlock path since they are fn parameters.
7085 correctUniform(type.getQualifier());
7086 TQualifier bufferQualifier = globalBufferDefaults;
7087 mergeObjectLayoutQualifiers(bufferQualifier, type.getQualifier(), true);
7088 bufferQualifier.storage = type.getQualifier().storage;
7089 bufferQualifier.readonly = type.getQualifier().readonly;
7090 bufferQualifier.coherent = type.getQualifier().coherent;
7091 bufferQualifier.declaredBuiltIn = type.getQualifier().declaredBuiltIn;
7092 type.getQualifier() = bufferQualifier;
7093 break;
7094 }
7095 default:
7096 break;
7097 }
7098 }
7099
specializationCheck(const TSourceLoc & loc,const TType & type,const char * op)7100 void HlslParseContext::specializationCheck(const TSourceLoc& loc, const TType& type, const char* op)
7101 {
7102 if (type.containsSpecializationSize())
7103 error(loc, "can't use with types containing arrays sized with a specialization constant", op, "");
7104 }
7105
7106 //
7107 // Layout qualifier stuff.
7108 //
7109
7110 // Put the id's layout qualification into the public type, for qualifiers not having a number set.
7111 // This is before we know any type information for error checking.
setLayoutQualifier(const TSourceLoc & loc,TQualifier & qualifier,TString & id)7112 void HlslParseContext::setLayoutQualifier(const TSourceLoc& loc, TQualifier& qualifier, TString& id)
7113 {
7114 std::transform(id.begin(), id.end(), id.begin(), ::tolower);
7115
7116 if (id == TQualifier::getLayoutMatrixString(ElmColumnMajor)) {
7117 qualifier.layoutMatrix = ElmRowMajor;
7118 return;
7119 }
7120 if (id == TQualifier::getLayoutMatrixString(ElmRowMajor)) {
7121 qualifier.layoutMatrix = ElmColumnMajor;
7122 return;
7123 }
7124 if (id == "push_constant") {
7125 requireVulkan(loc, "push_constant");
7126 qualifier.layoutPushConstant = true;
7127 return;
7128 }
7129 if (language == EShLangGeometry || language == EShLangTessEvaluation) {
7130 if (id == TQualifier::getGeometryString(ElgTriangles)) {
7131 // publicType.shaderQualifiers.geometry = ElgTriangles;
7132 warn(loc, "ignored", id.c_str(), "");
7133 return;
7134 }
7135 if (language == EShLangGeometry) {
7136 if (id == TQualifier::getGeometryString(ElgPoints)) {
7137 // publicType.shaderQualifiers.geometry = ElgPoints;
7138 warn(loc, "ignored", id.c_str(), "");
7139 return;
7140 }
7141 if (id == TQualifier::getGeometryString(ElgLineStrip)) {
7142 // publicType.shaderQualifiers.geometry = ElgLineStrip;
7143 warn(loc, "ignored", id.c_str(), "");
7144 return;
7145 }
7146 if (id == TQualifier::getGeometryString(ElgLines)) {
7147 // publicType.shaderQualifiers.geometry = ElgLines;
7148 warn(loc, "ignored", id.c_str(), "");
7149 return;
7150 }
7151 if (id == TQualifier::getGeometryString(ElgLinesAdjacency)) {
7152 // publicType.shaderQualifiers.geometry = ElgLinesAdjacency;
7153 warn(loc, "ignored", id.c_str(), "");
7154 return;
7155 }
7156 if (id == TQualifier::getGeometryString(ElgTrianglesAdjacency)) {
7157 // publicType.shaderQualifiers.geometry = ElgTrianglesAdjacency;
7158 warn(loc, "ignored", id.c_str(), "");
7159 return;
7160 }
7161 if (id == TQualifier::getGeometryString(ElgTriangleStrip)) {
7162 // publicType.shaderQualifiers.geometry = ElgTriangleStrip;
7163 warn(loc, "ignored", id.c_str(), "");
7164 return;
7165 }
7166 } else {
7167 assert(language == EShLangTessEvaluation);
7168
7169 // input primitive
7170 if (id == TQualifier::getGeometryString(ElgTriangles)) {
7171 // publicType.shaderQualifiers.geometry = ElgTriangles;
7172 warn(loc, "ignored", id.c_str(), "");
7173 return;
7174 }
7175 if (id == TQualifier::getGeometryString(ElgQuads)) {
7176 // publicType.shaderQualifiers.geometry = ElgQuads;
7177 warn(loc, "ignored", id.c_str(), "");
7178 return;
7179 }
7180 if (id == TQualifier::getGeometryString(ElgIsolines)) {
7181 // publicType.shaderQualifiers.geometry = ElgIsolines;
7182 warn(loc, "ignored", id.c_str(), "");
7183 return;
7184 }
7185
7186 // vertex spacing
7187 if (id == TQualifier::getVertexSpacingString(EvsEqual)) {
7188 // publicType.shaderQualifiers.spacing = EvsEqual;
7189 warn(loc, "ignored", id.c_str(), "");
7190 return;
7191 }
7192 if (id == TQualifier::getVertexSpacingString(EvsFractionalEven)) {
7193 // publicType.shaderQualifiers.spacing = EvsFractionalEven;
7194 warn(loc, "ignored", id.c_str(), "");
7195 return;
7196 }
7197 if (id == TQualifier::getVertexSpacingString(EvsFractionalOdd)) {
7198 // publicType.shaderQualifiers.spacing = EvsFractionalOdd;
7199 warn(loc, "ignored", id.c_str(), "");
7200 return;
7201 }
7202
7203 // triangle order
7204 if (id == TQualifier::getVertexOrderString(EvoCw)) {
7205 // publicType.shaderQualifiers.order = EvoCw;
7206 warn(loc, "ignored", id.c_str(), "");
7207 return;
7208 }
7209 if (id == TQualifier::getVertexOrderString(EvoCcw)) {
7210 // publicType.shaderQualifiers.order = EvoCcw;
7211 warn(loc, "ignored", id.c_str(), "");
7212 return;
7213 }
7214
7215 // point mode
7216 if (id == "point_mode") {
7217 // publicType.shaderQualifiers.pointMode = true;
7218 warn(loc, "ignored", id.c_str(), "");
7219 return;
7220 }
7221 }
7222 }
7223 if (language == EShLangFragment) {
7224 if (id == "origin_upper_left") {
7225 // publicType.shaderQualifiers.originUpperLeft = true;
7226 warn(loc, "ignored", id.c_str(), "");
7227 return;
7228 }
7229 if (id == "pixel_center_integer") {
7230 // publicType.shaderQualifiers.pixelCenterInteger = true;
7231 warn(loc, "ignored", id.c_str(), "");
7232 return;
7233 }
7234 if (id == "early_fragment_tests") {
7235 // publicType.shaderQualifiers.earlyFragmentTests = true;
7236 warn(loc, "ignored", id.c_str(), "");
7237 return;
7238 }
7239 for (TLayoutDepth depth = (TLayoutDepth)(EldNone + 1); depth < EldCount; depth = (TLayoutDepth)(depth + 1)) {
7240 if (id == TQualifier::getLayoutDepthString(depth)) {
7241 // publicType.shaderQualifiers.layoutDepth = depth;
7242 warn(loc, "ignored", id.c_str(), "");
7243 return;
7244 }
7245 }
7246 if (id.compare(0, 13, "blend_support") == 0) {
7247 bool found = false;
7248 for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) {
7249 if (id == TQualifier::getBlendEquationString(be)) {
7250 requireExtensions(loc, 1, &E_GL_KHR_blend_equation_advanced, "blend equation");
7251 intermediate.addBlendEquation(be);
7252 // publicType.shaderQualifiers.blendEquation = true;
7253 warn(loc, "ignored", id.c_str(), "");
7254 found = true;
7255 break;
7256 }
7257 }
7258 if (! found)
7259 error(loc, "unknown blend equation", "blend_support", "");
7260 return;
7261 }
7262 }
7263 error(loc, "unrecognized layout identifier, or qualifier requires assignment (e.g., binding = 4)", id.c_str(), "");
7264 }
7265
7266 // Put the id's layout qualifier value into the public type, for qualifiers having a number set.
7267 // This is before we know any type information for error checking.
setLayoutQualifier(const TSourceLoc & loc,TQualifier & qualifier,TString & id,const TIntermTyped * node)7268 void HlslParseContext::setLayoutQualifier(const TSourceLoc& loc, TQualifier& qualifier, TString& id,
7269 const TIntermTyped* node)
7270 {
7271 const char* feature = "layout-id value";
7272 // const char* nonLiteralFeature = "non-literal layout-id value";
7273
7274 integerCheck(node, feature);
7275 const TIntermConstantUnion* constUnion = node->getAsConstantUnion();
7276 int value = 0;
7277 if (constUnion) {
7278 value = constUnion->getConstArray()[0].getIConst();
7279 }
7280
7281 std::transform(id.begin(), id.end(), id.begin(), ::tolower);
7282
7283 if (id == "offset") {
7284 qualifier.layoutOffset = value;
7285 return;
7286 } else if (id == "align") {
7287 // "The specified alignment must be a power of 2, or a compile-time error results."
7288 if (! IsPow2(value))
7289 error(loc, "must be a power of 2", "align", "");
7290 else
7291 qualifier.layoutAlign = value;
7292 return;
7293 } else if (id == "location") {
7294 if ((unsigned int)value >= TQualifier::layoutLocationEnd)
7295 error(loc, "location is too large", id.c_str(), "");
7296 else
7297 qualifier.layoutLocation = value;
7298 return;
7299 } else if (id == "set") {
7300 if ((unsigned int)value >= TQualifier::layoutSetEnd)
7301 error(loc, "set is too large", id.c_str(), "");
7302 else
7303 qualifier.layoutSet = value;
7304 return;
7305 } else if (id == "binding") {
7306 if ((unsigned int)value >= TQualifier::layoutBindingEnd)
7307 error(loc, "binding is too large", id.c_str(), "");
7308 else
7309 qualifier.layoutBinding = value;
7310 return;
7311 } else if (id == "component") {
7312 if ((unsigned)value >= TQualifier::layoutComponentEnd)
7313 error(loc, "component is too large", id.c_str(), "");
7314 else
7315 qualifier.layoutComponent = value;
7316 return;
7317 } else if (id.compare(0, 4, "xfb_") == 0) {
7318 // "Any shader making any static use (after preprocessing) of any of these
7319 // *xfb_* qualifiers will cause the shader to be in a transform feedback
7320 // capturing mode and hence responsible for describing the transform feedback
7321 // setup."
7322 intermediate.setXfbMode();
7323 if (id == "xfb_buffer") {
7324 // "It is a compile-time error to specify an *xfb_buffer* that is greater than
7325 // the implementation-dependent constant gl_MaxTransformFeedbackBuffers."
7326 if (value >= resources.maxTransformFeedbackBuffers)
7327 error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d",
7328 resources.maxTransformFeedbackBuffers);
7329 if (value >= (int)TQualifier::layoutXfbBufferEnd)
7330 error(loc, "buffer is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbBufferEnd - 1);
7331 else
7332 qualifier.layoutXfbBuffer = value;
7333 return;
7334 } else if (id == "xfb_offset") {
7335 if (value >= (int)TQualifier::layoutXfbOffsetEnd)
7336 error(loc, "offset is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbOffsetEnd - 1);
7337 else
7338 qualifier.layoutXfbOffset = value;
7339 return;
7340 } else if (id == "xfb_stride") {
7341 // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
7342 // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
7343 if (value > 4 * resources.maxTransformFeedbackInterleavedComponents)
7344 error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d",
7345 resources.maxTransformFeedbackInterleavedComponents);
7346 else if (value >= (int)TQualifier::layoutXfbStrideEnd)
7347 error(loc, "stride is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbStrideEnd - 1);
7348 if (value < (int)TQualifier::layoutXfbStrideEnd)
7349 qualifier.layoutXfbStride = value;
7350 return;
7351 }
7352 }
7353
7354 if (id == "input_attachment_index") {
7355 requireVulkan(loc, "input_attachment_index");
7356 if (value >= (int)TQualifier::layoutAttachmentEnd)
7357 error(loc, "attachment index is too large", id.c_str(), "");
7358 else
7359 qualifier.layoutAttachment = value;
7360 return;
7361 }
7362 if (id == "constant_id") {
7363 setSpecConstantId(loc, qualifier, value);
7364 return;
7365 }
7366
7367 switch (language) {
7368 case EShLangVertex:
7369 break;
7370
7371 case EShLangTessControl:
7372 if (id == "vertices") {
7373 if (value == 0)
7374 error(loc, "must be greater than 0", "vertices", "");
7375 else
7376 // publicType.shaderQualifiers.vertices = value;
7377 warn(loc, "ignored", id.c_str(), "");
7378 return;
7379 }
7380 break;
7381
7382 case EShLangTessEvaluation:
7383 break;
7384
7385 case EShLangGeometry:
7386 if (id == "invocations") {
7387 if (value == 0)
7388 error(loc, "must be at least 1", "invocations", "");
7389 else
7390 // publicType.shaderQualifiers.invocations = value;
7391 warn(loc, "ignored", id.c_str(), "");
7392 return;
7393 }
7394 if (id == "max_vertices") {
7395 // publicType.shaderQualifiers.vertices = value;
7396 warn(loc, "ignored", id.c_str(), "");
7397 if (value > resources.maxGeometryOutputVertices)
7398 error(loc, "too large, must be less than gl_MaxGeometryOutputVertices", "max_vertices", "");
7399 return;
7400 }
7401 if (id == "stream") {
7402 qualifier.layoutStream = value;
7403 return;
7404 }
7405 break;
7406
7407 case EShLangFragment:
7408 if (id == "index") {
7409 qualifier.layoutIndex = value;
7410 return;
7411 }
7412 break;
7413
7414 case EShLangCompute:
7415 if (id.compare(0, 11, "local_size_") == 0) {
7416 if (id == "local_size_x") {
7417 // publicType.shaderQualifiers.localSize[0] = value;
7418 warn(loc, "ignored", id.c_str(), "");
7419 return;
7420 }
7421 if (id == "local_size_y") {
7422 // publicType.shaderQualifiers.localSize[1] = value;
7423 warn(loc, "ignored", id.c_str(), "");
7424 return;
7425 }
7426 if (id == "local_size_z") {
7427 // publicType.shaderQualifiers.localSize[2] = value;
7428 warn(loc, "ignored", id.c_str(), "");
7429 return;
7430 }
7431 if (spvVersion.spv != 0) {
7432 if (id == "local_size_x_id") {
7433 // publicType.shaderQualifiers.localSizeSpecId[0] = value;
7434 warn(loc, "ignored", id.c_str(), "");
7435 return;
7436 }
7437 if (id == "local_size_y_id") {
7438 // publicType.shaderQualifiers.localSizeSpecId[1] = value;
7439 warn(loc, "ignored", id.c_str(), "");
7440 return;
7441 }
7442 if (id == "local_size_z_id") {
7443 // publicType.shaderQualifiers.localSizeSpecId[2] = value;
7444 warn(loc, "ignored", id.c_str(), "");
7445 return;
7446 }
7447 }
7448 }
7449 break;
7450
7451 default:
7452 break;
7453 }
7454
7455 error(loc, "there is no such layout identifier for this stage taking an assigned value", id.c_str(), "");
7456 }
7457
setSpecConstantId(const TSourceLoc & loc,TQualifier & qualifier,int value)7458 void HlslParseContext::setSpecConstantId(const TSourceLoc& loc, TQualifier& qualifier, int value)
7459 {
7460 if (value >= (int)TQualifier::layoutSpecConstantIdEnd) {
7461 error(loc, "specialization-constant id is too large", "constant_id", "");
7462 } else {
7463 qualifier.layoutSpecConstantId = value;
7464 qualifier.specConstant = true;
7465 if (! intermediate.addUsedConstantId(value))
7466 error(loc, "specialization-constant id already used", "constant_id", "");
7467 }
7468 return;
7469 }
7470
7471 // Merge any layout qualifier information from src into dst, leaving everything else in dst alone
7472 //
7473 // "More than one layout qualifier may appear in a single declaration.
7474 // Additionally, the same layout-qualifier-name can occur multiple times
7475 // within a layout qualifier or across multiple layout qualifiers in the
7476 // same declaration. When the same layout-qualifier-name occurs
7477 // multiple times, in a single declaration, the last occurrence overrides
7478 // the former occurrence(s). Further, if such a layout-qualifier-name
7479 // will effect subsequent declarations or other observable behavior, it
7480 // is only the last occurrence that will have any effect, behaving as if
7481 // the earlier occurrence(s) within the declaration are not present.
7482 // This is also true for overriding layout-qualifier-names, where one
7483 // overrides the other (e.g., row_major vs. column_major); only the last
7484 // occurrence has any effect."
7485 //
mergeObjectLayoutQualifiers(TQualifier & dst,const TQualifier & src,bool inheritOnly)7486 void HlslParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly)
7487 {
7488 if (src.hasMatrix())
7489 dst.layoutMatrix = src.layoutMatrix;
7490 if (src.hasPacking())
7491 dst.layoutPacking = src.layoutPacking;
7492
7493 if (src.hasStream())
7494 dst.layoutStream = src.layoutStream;
7495
7496 if (src.hasFormat())
7497 dst.layoutFormat = src.layoutFormat;
7498
7499 if (src.hasXfbBuffer())
7500 dst.layoutXfbBuffer = src.layoutXfbBuffer;
7501
7502 if (src.hasAlign())
7503 dst.layoutAlign = src.layoutAlign;
7504
7505 if (! inheritOnly) {
7506 if (src.hasLocation())
7507 dst.layoutLocation = src.layoutLocation;
7508 if (src.hasComponent())
7509 dst.layoutComponent = src.layoutComponent;
7510 if (src.hasIndex())
7511 dst.layoutIndex = src.layoutIndex;
7512
7513 if (src.hasOffset())
7514 dst.layoutOffset = src.layoutOffset;
7515
7516 if (src.hasSet())
7517 dst.layoutSet = src.layoutSet;
7518 if (src.layoutBinding != TQualifier::layoutBindingEnd)
7519 dst.layoutBinding = src.layoutBinding;
7520
7521 if (src.hasXfbStride())
7522 dst.layoutXfbStride = src.layoutXfbStride;
7523 if (src.hasXfbOffset())
7524 dst.layoutXfbOffset = src.layoutXfbOffset;
7525 if (src.hasAttachment())
7526 dst.layoutAttachment = src.layoutAttachment;
7527 if (src.hasSpecConstantId())
7528 dst.layoutSpecConstantId = src.layoutSpecConstantId;
7529
7530 if (src.layoutPushConstant)
7531 dst.layoutPushConstant = true;
7532 }
7533 }
7534
7535
7536 //
7537 // Look up a function name in the symbol table, and make sure it is a function.
7538 //
7539 // First, look for an exact match. If there is none, use the generic selector
7540 // TParseContextBase::selectFunction() to find one, parameterized by the
7541 // convertible() and better() predicates defined below.
7542 //
7543 // Return the function symbol if found, otherwise nullptr.
7544 //
findFunction(const TSourceLoc & loc,TFunction & call,bool & builtIn,int & thisDepth,TIntermTyped * & args)7545 const TFunction* HlslParseContext::findFunction(const TSourceLoc& loc, TFunction& call, bool& builtIn, int& thisDepth,
7546 TIntermTyped*& args)
7547 {
7548 if (symbolTable.isFunctionNameVariable(call.getName())) {
7549 error(loc, "can't use function syntax on variable", call.getName().c_str(), "");
7550 return nullptr;
7551 }
7552
7553 // first, look for an exact match
7554 bool dummyScope;
7555 TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn, &dummyScope, &thisDepth);
7556 if (symbol)
7557 return symbol->getAsFunction();
7558
7559 // no exact match, use the generic selector, parameterized by the GLSL rules
7560
7561 // create list of candidates to send
7562 TVector<const TFunction*> candidateList;
7563 symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
7564
7565 // These built-in ops can accept any type, so we bypass the argument selection
7566 if (candidateList.size() == 1 && builtIn &&
7567 (candidateList[0]->getBuiltInOp() == EOpMethodAppend ||
7568 candidateList[0]->getBuiltInOp() == EOpMethodRestartStrip ||
7569 candidateList[0]->getBuiltInOp() == EOpMethodIncrementCounter ||
7570 candidateList[0]->getBuiltInOp() == EOpMethodDecrementCounter ||
7571 candidateList[0]->getBuiltInOp() == EOpMethodConsume)) {
7572 return candidateList[0];
7573 }
7574
7575 bool allowOnlyUpConversions = true;
7576
7577 // can 'from' convert to 'to'?
7578 const auto convertible = [&](const TType& from, const TType& to, TOperator op, int arg) -> bool {
7579 if (from == to)
7580 return true;
7581
7582 // no aggregate conversions
7583 if (from.isArray() || to.isArray() ||
7584 from.isStruct() || to.isStruct())
7585 return false;
7586
7587 switch (op) {
7588 case EOpInterlockedAdd:
7589 case EOpInterlockedAnd:
7590 case EOpInterlockedCompareExchange:
7591 case EOpInterlockedCompareStore:
7592 case EOpInterlockedExchange:
7593 case EOpInterlockedMax:
7594 case EOpInterlockedMin:
7595 case EOpInterlockedOr:
7596 case EOpInterlockedXor:
7597 // We do not promote the texture or image type for these ocodes. Normally that would not
7598 // be an issue because it's a buffer, but we haven't decomposed the opcode yet, and at this
7599 // stage it's merely e.g, a basic integer type.
7600 //
7601 // Instead, we want to promote other arguments, but stay within the same family. In other
7602 // words, InterlockedAdd(RWBuffer<int>, ...) will always use the int flavor, never the uint flavor,
7603 // but it is allowed to promote its other arguments.
7604 if (arg == 0)
7605 return false;
7606 break;
7607 case EOpMethodSample:
7608 case EOpMethodSampleBias:
7609 case EOpMethodSampleCmp:
7610 case EOpMethodSampleCmpLevelZero:
7611 case EOpMethodSampleGrad:
7612 case EOpMethodSampleLevel:
7613 case EOpMethodLoad:
7614 case EOpMethodGetDimensions:
7615 case EOpMethodGetSamplePosition:
7616 case EOpMethodGather:
7617 case EOpMethodCalculateLevelOfDetail:
7618 case EOpMethodCalculateLevelOfDetailUnclamped:
7619 case EOpMethodGatherRed:
7620 case EOpMethodGatherGreen:
7621 case EOpMethodGatherBlue:
7622 case EOpMethodGatherAlpha:
7623 case EOpMethodGatherCmp:
7624 case EOpMethodGatherCmpRed:
7625 case EOpMethodGatherCmpGreen:
7626 case EOpMethodGatherCmpBlue:
7627 case EOpMethodGatherCmpAlpha:
7628 case EOpMethodAppend:
7629 case EOpMethodRestartStrip:
7630 // those are method calls, the object type can not be changed
7631 // they are equal if the dim and type match (is dim sufficient?)
7632 if (arg == 0)
7633 return from.getSampler().type == to.getSampler().type &&
7634 from.getSampler().arrayed == to.getSampler().arrayed &&
7635 from.getSampler().shadow == to.getSampler().shadow &&
7636 from.getSampler().ms == to.getSampler().ms &&
7637 from.getSampler().dim == to.getSampler().dim;
7638 break;
7639 default:
7640 break;
7641 }
7642
7643 // basic types have to be convertible
7644 if (allowOnlyUpConversions)
7645 if (! intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType(), EOpFunctionCall))
7646 return false;
7647
7648 // shapes have to be convertible
7649 if ((from.isScalarOrVec1() && to.isScalarOrVec1()) ||
7650 (from.isScalarOrVec1() && to.isVector()) ||
7651 (from.isScalarOrVec1() && to.isMatrix()) ||
7652 (from.isVector() && to.isVector() && from.getVectorSize() >= to.getVectorSize()))
7653 return true;
7654
7655 // TODO: what are the matrix rules? they go here
7656
7657 return false;
7658 };
7659
7660 // Is 'to2' a better conversion than 'to1'?
7661 // Ties should not be considered as better.
7662 // Assumes 'convertible' already said true.
7663 const auto better = [](const TType& from, const TType& to1, const TType& to2) -> bool {
7664 // exact match is always better than mismatch
7665 if (from == to2)
7666 return from != to1;
7667 if (from == to1)
7668 return false;
7669
7670 // shape changes are always worse
7671 if (from.isScalar() || from.isVector()) {
7672 if (from.getVectorSize() == to2.getVectorSize() &&
7673 from.getVectorSize() != to1.getVectorSize())
7674 return true;
7675 if (from.getVectorSize() == to1.getVectorSize() &&
7676 from.getVectorSize() != to2.getVectorSize())
7677 return false;
7678 }
7679
7680 // Handle sampler betterness: An exact sampler match beats a non-exact match.
7681 // (If we just looked at basic type, all EbtSamplers would look the same).
7682 // If any type is not a sampler, just use the linearize function below.
7683 if (from.getBasicType() == EbtSampler && to1.getBasicType() == EbtSampler && to2.getBasicType() == EbtSampler) {
7684 // We can ignore the vector size in the comparison.
7685 TSampler to1Sampler = to1.getSampler();
7686 TSampler to2Sampler = to2.getSampler();
7687
7688 to1Sampler.vectorSize = to2Sampler.vectorSize = from.getSampler().vectorSize;
7689
7690 if (from.getSampler() == to2Sampler)
7691 return from.getSampler() != to1Sampler;
7692 if (from.getSampler() == to1Sampler)
7693 return false;
7694 }
7695
7696 // Might or might not be changing shape, which means basic type might
7697 // or might not match, so within that, the question is how big a
7698 // basic-type conversion is being done.
7699 //
7700 // Use a hierarchy of domains, translated to order of magnitude
7701 // in a linearized view:
7702 // - floating-point vs. integer
7703 // - 32 vs. 64 bit (or width in general)
7704 // - bool vs. non bool
7705 // - signed vs. not signed
7706 const auto linearize = [](const TBasicType& basicType) -> int {
7707 switch (basicType) {
7708 case EbtBool: return 1;
7709 case EbtInt: return 10;
7710 case EbtUint: return 11;
7711 case EbtInt64: return 20;
7712 case EbtUint64: return 21;
7713 case EbtFloat: return 100;
7714 case EbtDouble: return 110;
7715 default: return 0;
7716 }
7717 };
7718
7719 return abs(linearize(to2.getBasicType()) - linearize(from.getBasicType())) <
7720 abs(linearize(to1.getBasicType()) - linearize(from.getBasicType()));
7721 };
7722
7723 // for ambiguity reporting
7724 bool tie = false;
7725
7726 // send to the generic selector
7727 const TFunction* bestMatch = nullptr;
7728
7729 // printf has var args and is in the symbol table as "printf()",
7730 // mangled to "printf("
7731 if (call.getName() == "printf") {
7732 TSymbol* symbol = symbolTable.find("printf(", &builtIn);
7733 if (symbol)
7734 return symbol->getAsFunction();
7735 }
7736
7737 bestMatch = selectFunction(candidateList, call, convertible, better, tie);
7738
7739 if (bestMatch == nullptr) {
7740 // If there is nothing selected by allowing only up-conversions (to a larger linearize() value),
7741 // we instead try down-conversions, which are valid in HLSL, but not preferred if there are any
7742 // upconversions possible.
7743 allowOnlyUpConversions = false;
7744 bestMatch = selectFunction(candidateList, call, convertible, better, tie);
7745 }
7746
7747 if (bestMatch == nullptr) {
7748 error(loc, "no matching overloaded function found", call.getName().c_str(), "");
7749 return nullptr;
7750 }
7751
7752 // For built-ins, we can convert across the arguments. This will happen in several steps:
7753 // Step 1: If there's an exact match, use it.
7754 // Step 2a: Otherwise, get the operator from the best match and promote arguments:
7755 // Step 2b: reconstruct the TFunction based on the new arg types
7756 // Step 3: Re-select after type promotion is applied, to find proper candidate.
7757 if (builtIn) {
7758 // Step 1: If there's an exact match, use it.
7759 if (call.getMangledName() == bestMatch->getMangledName())
7760 return bestMatch;
7761
7762 // Step 2a: Otherwise, get the operator from the best match and promote arguments as if we
7763 // are that kind of operator.
7764 if (args != nullptr) {
7765 // The arg list can be a unary node, or an aggregate. We have to handle both.
7766 // We will use the normal promote() facilities, which require an interm node.
7767 TIntermOperator* promote = nullptr;
7768
7769 if (call.getParamCount() == 1) {
7770 promote = new TIntermUnary(bestMatch->getBuiltInOp());
7771 promote->getAsUnaryNode()->setOperand(args->getAsTyped());
7772 } else {
7773 promote = new TIntermAggregate(bestMatch->getBuiltInOp());
7774 promote->getAsAggregate()->getSequence().swap(args->getAsAggregate()->getSequence());
7775 }
7776
7777 if (! intermediate.promote(promote))
7778 return nullptr;
7779
7780 // Obtain the promoted arg list.
7781 if (call.getParamCount() == 1) {
7782 args = promote->getAsUnaryNode()->getOperand();
7783 } else {
7784 promote->getAsAggregate()->getSequence().swap(args->getAsAggregate()->getSequence());
7785 }
7786 }
7787
7788 // Step 2b: reconstruct the TFunction based on the new arg types
7789 TFunction convertedCall(&call.getName(), call.getType(), call.getBuiltInOp());
7790
7791 if (args->getAsAggregate()) {
7792 // Handle aggregates: put all args into the new function call
7793 for (int arg = 0; arg < int(args->getAsAggregate()->getSequence().size()); ++arg) {
7794 // TODO: But for constness, we could avoid the new & shallowCopy, and use the pointer directly.
7795 TParameter param = { nullptr, new TType, nullptr };
7796 param.type->shallowCopy(args->getAsAggregate()->getSequence()[arg]->getAsTyped()->getType());
7797 convertedCall.addParameter(param);
7798 }
7799 } else if (args->getAsUnaryNode()) {
7800 // Handle unaries: put all args into the new function call
7801 TParameter param = { nullptr, new TType, nullptr };
7802 param.type->shallowCopy(args->getAsUnaryNode()->getOperand()->getAsTyped()->getType());
7803 convertedCall.addParameter(param);
7804 } else if (args->getAsTyped()) {
7805 // Handle bare e.g, floats, not in an aggregate.
7806 TParameter param = { nullptr, new TType, nullptr };
7807 param.type->shallowCopy(args->getAsTyped()->getType());
7808 convertedCall.addParameter(param);
7809 } else {
7810 assert(0); // unknown argument list.
7811 return nullptr;
7812 }
7813
7814 // Step 3: Re-select after type promotion, to find proper candidate
7815 // send to the generic selector
7816 bestMatch = selectFunction(candidateList, convertedCall, convertible, better, tie);
7817
7818 // At this point, there should be no tie.
7819 }
7820
7821 if (tie)
7822 error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), "");
7823
7824 // Append default parameter values if needed
7825 if (!tie && bestMatch != nullptr) {
7826 for (int defParam = call.getParamCount(); defParam < bestMatch->getParamCount(); ++defParam) {
7827 handleFunctionArgument(&call, args, (*bestMatch)[defParam].defaultValue);
7828 }
7829 }
7830
7831 return bestMatch;
7832 }
7833
7834 //
7835 // Do everything necessary to handle a typedef declaration, for a single symbol.
7836 //
7837 // 'parseType' is the type part of the declaration (to the left)
7838 // 'arraySizes' is the arrayness tagged on the identifier (to the right)
7839 //
declareTypedef(const TSourceLoc & loc,const TString & identifier,const TType & parseType)7840 void HlslParseContext::declareTypedef(const TSourceLoc& loc, const TString& identifier, const TType& parseType)
7841 {
7842 TVariable* typeSymbol = new TVariable(&identifier, parseType, true);
7843 if (! symbolTable.insert(*typeSymbol))
7844 error(loc, "name already defined", "typedef", identifier.c_str());
7845 }
7846
7847 // Do everything necessary to handle a struct declaration, including
7848 // making IO aliases because HLSL allows mixed IO in a struct that specializes
7849 // based on the usage (input, output, uniform, none).
declareStruct(const TSourceLoc & loc,TString & structName,TType & type)7850 void HlslParseContext::declareStruct(const TSourceLoc& loc, TString& structName, TType& type)
7851 {
7852 // If it was named, which means the type can be reused later, add
7853 // it to the symbol table. (Unless it's a block, in which
7854 // case the name is not a type.)
7855 if (type.getBasicType() == EbtBlock || structName.size() == 0)
7856 return;
7857
7858 TVariable* userTypeDef = new TVariable(&structName, type, true);
7859 if (! symbolTable.insert(*userTypeDef)) {
7860 error(loc, "redefinition", structName.c_str(), "struct");
7861 return;
7862 }
7863
7864 // See if we need IO aliases for the structure typeList
7865
7866 const auto condAlloc = [](bool pred, TTypeList*& list) {
7867 if (pred && list == nullptr)
7868 list = new TTypeList;
7869 };
7870
7871 tIoKinds newLists = { nullptr, nullptr, nullptr }; // allocate for each kind found
7872 for (auto member = type.getStruct()->begin(); member != type.getStruct()->end(); ++member) {
7873 condAlloc(hasUniform(member->type->getQualifier()), newLists.uniform);
7874 condAlloc( hasInput(member->type->getQualifier()), newLists.input);
7875 condAlloc( hasOutput(member->type->getQualifier()), newLists.output);
7876
7877 if (member->type->isStruct()) {
7878 auto it = ioTypeMap.find(member->type->getStruct());
7879 if (it != ioTypeMap.end()) {
7880 condAlloc(it->second.uniform != nullptr, newLists.uniform);
7881 condAlloc(it->second.input != nullptr, newLists.input);
7882 condAlloc(it->second.output != nullptr, newLists.output);
7883 }
7884 }
7885 }
7886 if (newLists.uniform == nullptr &&
7887 newLists.input == nullptr &&
7888 newLists.output == nullptr) {
7889 // Won't do any IO caching, clear up the type and get out now.
7890 for (auto member = type.getStruct()->begin(); member != type.getStruct()->end(); ++member)
7891 clearUniformInputOutput(member->type->getQualifier());
7892 return;
7893 }
7894
7895 // We have IO involved.
7896
7897 // Make a pure typeList for the symbol table, and cache side copies of IO versions.
7898 for (auto member = type.getStruct()->begin(); member != type.getStruct()->end(); ++member) {
7899 const auto inheritStruct = [&](TTypeList* s, TTypeLoc& ioMember) {
7900 if (s != nullptr) {
7901 ioMember.type = new TType;
7902 ioMember.type->shallowCopy(*member->type);
7903 ioMember.type->setStruct(s);
7904 }
7905 };
7906 const auto newMember = [&](TTypeLoc& m) {
7907 if (m.type == nullptr) {
7908 m.type = new TType;
7909 m.type->shallowCopy(*member->type);
7910 }
7911 };
7912
7913 TTypeLoc newUniformMember = { nullptr, member->loc };
7914 TTypeLoc newInputMember = { nullptr, member->loc };
7915 TTypeLoc newOutputMember = { nullptr, member->loc };
7916 if (member->type->isStruct()) {
7917 // swap in an IO child if there is one
7918 auto it = ioTypeMap.find(member->type->getStruct());
7919 if (it != ioTypeMap.end()) {
7920 inheritStruct(it->second.uniform, newUniformMember);
7921 inheritStruct(it->second.input, newInputMember);
7922 inheritStruct(it->second.output, newOutputMember);
7923 }
7924 }
7925 if (newLists.uniform) {
7926 newMember(newUniformMember);
7927
7928 // inherit default matrix layout (changeable via #pragma pack_matrix), if none given.
7929 if (member->type->isMatrix() && member->type->getQualifier().layoutMatrix == ElmNone)
7930 newUniformMember.type->getQualifier().layoutMatrix = globalUniformDefaults.layoutMatrix;
7931
7932 correctUniform(newUniformMember.type->getQualifier());
7933 newLists.uniform->push_back(newUniformMember);
7934 }
7935 if (newLists.input) {
7936 newMember(newInputMember);
7937 correctInput(newInputMember.type->getQualifier());
7938 newLists.input->push_back(newInputMember);
7939 }
7940 if (newLists.output) {
7941 newMember(newOutputMember);
7942 correctOutput(newOutputMember.type->getQualifier());
7943 newLists.output->push_back(newOutputMember);
7944 }
7945
7946 // make original pure
7947 clearUniformInputOutput(member->type->getQualifier());
7948 }
7949 ioTypeMap[type.getStruct()] = newLists;
7950 }
7951
7952 // Lookup a user-type by name.
7953 // If found, fill in the type and return the defining symbol.
7954 // If not found, return nullptr.
lookupUserType(const TString & typeName,TType & type)7955 TSymbol* HlslParseContext::lookupUserType(const TString& typeName, TType& type)
7956 {
7957 TSymbol* symbol = symbolTable.find(typeName);
7958 if (symbol && symbol->getAsVariable() && symbol->getAsVariable()->isUserType()) {
7959 type.shallowCopy(symbol->getType());
7960 return symbol;
7961 } else
7962 return nullptr;
7963 }
7964
7965 //
7966 // Do everything necessary to handle a variable (non-block) declaration.
7967 // Either redeclaring a variable, or making a new one, updating the symbol
7968 // table, and all error checking.
7969 //
7970 // Returns a subtree node that computes an initializer, if needed.
7971 // Returns nullptr if there is no code to execute for initialization.
7972 //
7973 // 'parseType' is the type part of the declaration (to the left)
7974 // 'arraySizes' is the arrayness tagged on the identifier (to the right)
7975 //
declareVariable(const TSourceLoc & loc,const TString & identifier,TType & type,TIntermTyped * initializer)7976 TIntermNode* HlslParseContext::declareVariable(const TSourceLoc& loc, const TString& identifier, TType& type,
7977 TIntermTyped* initializer)
7978 {
7979 if (voidErrorCheck(loc, identifier, type.getBasicType()))
7980 return nullptr;
7981
7982 // Global consts with initializers that are non-const act like EvqGlobal in HLSL.
7983 // This test is implicitly recursive, because initializers propagate constness
7984 // up the aggregate node tree during creation. E.g, for:
7985 // { { 1, 2 }, { 3, 4 } }
7986 // the initializer list is marked EvqConst at the top node, and remains so here. However:
7987 // { 1, { myvar, 2 }, 3 }
7988 // is not a const intializer, and still becomes EvqGlobal here.
7989
7990 const bool nonConstInitializer = (initializer != nullptr && initializer->getQualifier().storage != EvqConst);
7991
7992 if (type.getQualifier().storage == EvqConst && symbolTable.atGlobalLevel() && nonConstInitializer) {
7993 // Force to global
7994 type.getQualifier().storage = EvqGlobal;
7995 }
7996
7997 // make const and initialization consistent
7998 fixConstInit(loc, identifier, type, initializer);
7999
8000 // Check for redeclaration of built-ins and/or attempting to declare a reserved name
8001 TSymbol* symbol = nullptr;
8002
8003 inheritGlobalDefaults(type.getQualifier());
8004
8005 const bool flattenVar = shouldFlatten(type, type.getQualifier().storage, true);
8006
8007 // correct IO in the type
8008 switch (type.getQualifier().storage) {
8009 case EvqGlobal:
8010 case EvqTemporary:
8011 clearUniformInputOutput(type.getQualifier());
8012 break;
8013 case EvqUniform:
8014 case EvqBuffer:
8015 correctUniform(type.getQualifier());
8016 if (type.isStruct()) {
8017 auto it = ioTypeMap.find(type.getStruct());
8018 if (it != ioTypeMap.end())
8019 type.setStruct(it->second.uniform);
8020 }
8021
8022 break;
8023 default:
8024 break;
8025 }
8026
8027 // Declare the variable
8028 if (type.isArray()) {
8029 // array case
8030 declareArray(loc, identifier, type, symbol, !flattenVar);
8031 } else {
8032 // non-array case
8033 if (symbol == nullptr)
8034 symbol = declareNonArray(loc, identifier, type, !flattenVar);
8035 else if (type != symbol->getType())
8036 error(loc, "cannot change the type of", "redeclaration", symbol->getName().c_str());
8037 }
8038
8039 if (symbol == nullptr)
8040 return nullptr;
8041
8042 if (flattenVar)
8043 flatten(*symbol->getAsVariable(), symbolTable.atGlobalLevel());
8044
8045 TVariable* variable = symbol->getAsVariable();
8046
8047 if (initializer == nullptr) {
8048 if (intermediate.getDebugInfo())
8049 return executeDeclaration(loc, variable);
8050 else
8051 return nullptr;
8052 }
8053
8054 // Deal with initializer
8055 if (variable == nullptr) {
8056 error(loc, "initializer requires a variable, not a member", identifier.c_str(), "");
8057 return nullptr;
8058 }
8059 return executeInitializer(loc, initializer, variable);
8060 }
8061
8062 // Pick up global defaults from the provide global defaults into dst.
inheritGlobalDefaults(TQualifier & dst) const8063 void HlslParseContext::inheritGlobalDefaults(TQualifier& dst) const
8064 {
8065 if (dst.storage == EvqVaryingOut) {
8066 if (! dst.hasStream() && language == EShLangGeometry)
8067 dst.layoutStream = globalOutputDefaults.layoutStream;
8068 if (! dst.hasXfbBuffer())
8069 dst.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
8070 }
8071 }
8072
8073 //
8074 // Make an internal-only variable whose name is for debug purposes only
8075 // and won't be searched for. Callers will only use the return value to use
8076 // the variable, not the name to look it up. It is okay if the name
8077 // is the same as other names; there won't be any conflict.
8078 //
makeInternalVariable(const char * name,const TType & type) const8079 TVariable* HlslParseContext::makeInternalVariable(const char* name, const TType& type) const
8080 {
8081 TString* nameString = NewPoolTString(name);
8082 TVariable* variable = new TVariable(nameString, type);
8083 symbolTable.makeInternalVariable(*variable);
8084
8085 return variable;
8086 }
8087
8088 // Make a symbol node holding a new internal temporary variable.
makeInternalVariableNode(const TSourceLoc & loc,const char * name,const TType & type) const8089 TIntermSymbol* HlslParseContext::makeInternalVariableNode(const TSourceLoc& loc, const char* name,
8090 const TType& type) const
8091 {
8092 TVariable* tmpVar = makeInternalVariable(name, type);
8093 tmpVar->getWritableType().getQualifier().makeTemporary();
8094
8095 return intermediate.addSymbol(*tmpVar, loc);
8096 }
8097
8098 //
8099 // Declare a non-array variable, the main point being there is no redeclaration
8100 // for resizing allowed.
8101 //
8102 // Return the successfully declared variable.
8103 //
declareNonArray(const TSourceLoc & loc,const TString & identifier,const TType & type,bool track)8104 TVariable* HlslParseContext::declareNonArray(const TSourceLoc& loc, const TString& identifier, const TType& type,
8105 bool track)
8106 {
8107 // make a new variable
8108 TVariable* variable = new TVariable(&identifier, type);
8109
8110 // add variable to symbol table
8111 if (symbolTable.insert(*variable)) {
8112 if (track && symbolTable.atGlobalLevel())
8113 trackLinkage(*variable);
8114 return variable;
8115 }
8116
8117 error(loc, "redefinition", variable->getName().c_str(), "");
8118 return nullptr;
8119 }
8120
8121 // Return a declaration of a temporary variable
8122 //
8123 // This is used to force a variable to be declared in the correct scope
8124 // when debug information is being generated.
8125
executeDeclaration(const TSourceLoc & loc,TVariable * variable)8126 TIntermNode* HlslParseContext::executeDeclaration(const TSourceLoc& loc, TVariable* variable)
8127 {
8128 //
8129 // Identifier must be of type temporary.
8130 //
8131 TStorageQualifier qualifier = variable->getType().getQualifier().storage;
8132 if (qualifier != EvqTemporary)
8133 return nullptr;
8134
8135 TIntermSymbol* intermSymbol = intermediate.addSymbol(*variable, loc);
8136 return handleDeclare(loc, intermSymbol);
8137 }
8138
8139 //
8140 // Handle all types of initializers from the grammar.
8141 //
8142 // Returning nullptr just means there is no code to execute to handle the
8143 // initializer, which will, for example, be the case for constant initializers.
8144 //
8145 // Returns a subtree that accomplished the initialization.
8146 //
executeInitializer(const TSourceLoc & loc,TIntermTyped * initializer,TVariable * variable)8147 TIntermNode* HlslParseContext::executeInitializer(const TSourceLoc& loc, TIntermTyped* initializer, TVariable* variable)
8148 {
8149 //
8150 // Identifier must be of type constant, a global, or a temporary, and
8151 // starting at version 120, desktop allows uniforms to have initializers.
8152 //
8153 TStorageQualifier qualifier = variable->getType().getQualifier().storage;
8154
8155 //
8156 // If the initializer was from braces { ... }, we convert the whole subtree to a
8157 // constructor-style subtree, allowing the rest of the code to operate
8158 // identically for both kinds of initializers.
8159 //
8160 //
8161 // Type can't be deduced from the initializer list, so a skeletal type to
8162 // follow has to be passed in. Constness and specialization-constness
8163 // should be deduced bottom up, not dictated by the skeletal type.
8164 //
8165 TType skeletalType;
8166 skeletalType.shallowCopy(variable->getType());
8167 skeletalType.getQualifier().makeTemporary();
8168 if (initializer->getAsAggregate() && initializer->getAsAggregate()->getOp() == EOpNull)
8169 initializer = convertInitializerList(loc, skeletalType, initializer, nullptr);
8170 if (initializer == nullptr) {
8171 // error recovery; don't leave const without constant values
8172 if (qualifier == EvqConst)
8173 variable->getWritableType().getQualifier().storage = EvqTemporary;
8174 return nullptr;
8175 }
8176
8177 // Fix outer arrayness if variable is unsized, getting size from the initializer
8178 if (initializer->getType().isSizedArray() && variable->getType().isUnsizedArray())
8179 variable->getWritableType().changeOuterArraySize(initializer->getType().getOuterArraySize());
8180
8181 // Inner arrayness can also get set by an initializer
8182 if (initializer->getType().isArrayOfArrays() && variable->getType().isArrayOfArrays() &&
8183 initializer->getType().getArraySizes()->getNumDims() ==
8184 variable->getType().getArraySizes()->getNumDims()) {
8185 // adopt unsized sizes from the initializer's sizes
8186 for (int d = 1; d < variable->getType().getArraySizes()->getNumDims(); ++d) {
8187 if (variable->getType().getArraySizes()->getDimSize(d) == UnsizedArraySize) {
8188 variable->getWritableType().getArraySizes()->setDimSize(d,
8189 initializer->getType().getArraySizes()->getDimSize(d));
8190 }
8191 }
8192 }
8193
8194 // Uniform and global consts require a constant initializer
8195 if (qualifier == EvqUniform && initializer->getType().getQualifier().storage != EvqConst) {
8196 error(loc, "uniform initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str());
8197 variable->getWritableType().getQualifier().storage = EvqTemporary;
8198 return nullptr;
8199 }
8200
8201 // Const variables require a constant initializer
8202 if (qualifier == EvqConst) {
8203 if (initializer->getType().getQualifier().storage != EvqConst) {
8204 variable->getWritableType().getQualifier().storage = EvqConstReadOnly;
8205 qualifier = EvqConstReadOnly;
8206 }
8207 }
8208
8209 if (qualifier == EvqConst || qualifier == EvqUniform) {
8210 // Compile-time tagging of the variable with its constant value...
8211
8212 initializer = intermediate.addConversion(EOpAssign, variable->getType(), initializer);
8213 if (initializer != nullptr && variable->getType() != initializer->getType())
8214 initializer = intermediate.addUniShapeConversion(EOpAssign, variable->getType(), initializer);
8215 if (initializer == nullptr || !initializer->getAsConstantUnion() ||
8216 variable->getType() != initializer->getType()) {
8217 error(loc, "non-matching or non-convertible constant type for const initializer",
8218 variable->getType().getStorageQualifierString(), "");
8219 variable->getWritableType().getQualifier().storage = EvqTemporary;
8220 return nullptr;
8221 }
8222
8223 variable->setConstArray(initializer->getAsConstantUnion()->getConstArray());
8224 } else {
8225 // normal assigning of a value to a variable...
8226 specializationCheck(loc, initializer->getType(), "initializer");
8227 TIntermSymbol* intermSymbol = intermediate.addSymbol(*variable, loc);
8228 TIntermNode* initNode = handleAssign(loc, EOpAssign, intermSymbol, initializer);
8229 if (initNode == nullptr)
8230 assignError(loc, "=", intermSymbol->getCompleteString(), initializer->getCompleteString());
8231 return initNode;
8232 }
8233
8234 return nullptr;
8235 }
8236
8237 //
8238 // Reprocess any initializer-list { ... } parts of the initializer.
8239 // Need to hierarchically assign correct types and implicit
8240 // conversions. Will do this mimicking the same process used for
8241 // creating a constructor-style initializer, ensuring we get the
8242 // same form.
8243 //
8244 // Returns a node representing an expression for the initializer list expressed
8245 // as the correct type.
8246 //
8247 // Returns nullptr if there is an error.
8248 //
convertInitializerList(const TSourceLoc & loc,const TType & type,TIntermTyped * initializer,TIntermTyped * scalarInit)8249 TIntermTyped* HlslParseContext::convertInitializerList(const TSourceLoc& loc, const TType& type,
8250 TIntermTyped* initializer, TIntermTyped* scalarInit)
8251 {
8252 // Will operate recursively. Once a subtree is found that is constructor style,
8253 // everything below it is already good: Only the "top part" of the initializer
8254 // can be an initializer list, where "top part" can extend for several (or all) levels.
8255
8256 // see if we have bottomed out in the tree within the initializer-list part
8257 TIntermAggregate* initList = initializer->getAsAggregate();
8258 if (initList == nullptr || initList->getOp() != EOpNull) {
8259 // We don't have a list, but if it's a scalar and the 'type' is a
8260 // composite, we need to lengthen below to make it useful.
8261 // Otherwise, this is an already formed object to initialize with.
8262 if (type.isScalar() || !initializer->getType().isScalar())
8263 return initializer;
8264 else
8265 initList = intermediate.makeAggregate(initializer);
8266 }
8267
8268 // Of the initializer-list set of nodes, need to process bottom up,
8269 // so recurse deep, then process on the way up.
8270
8271 // Go down the tree here...
8272 if (type.isArray()) {
8273 // The type's array might be unsized, which could be okay, so base sizes on the size of the aggregate.
8274 // Later on, initializer execution code will deal with array size logic.
8275 TType arrayType;
8276 arrayType.shallowCopy(type); // sharing struct stuff is fine
8277 arrayType.copyArraySizes(*type.getArraySizes()); // but get a fresh copy of the array information, to edit below
8278
8279 // edit array sizes to fill in unsized dimensions
8280 if (type.isUnsizedArray())
8281 arrayType.changeOuterArraySize((int)initList->getSequence().size());
8282
8283 // set unsized array dimensions that can be derived from the initializer's first element
8284 if (arrayType.isArrayOfArrays() && initList->getSequence().size() > 0) {
8285 TIntermTyped* firstInit = initList->getSequence()[0]->getAsTyped();
8286 if (firstInit->getType().isArray() &&
8287 arrayType.getArraySizes()->getNumDims() == firstInit->getType().getArraySizes()->getNumDims() + 1) {
8288 for (int d = 1; d < arrayType.getArraySizes()->getNumDims(); ++d) {
8289 if (arrayType.getArraySizes()->getDimSize(d) == UnsizedArraySize)
8290 arrayType.getArraySizes()->setDimSize(d, firstInit->getType().getArraySizes()->getDimSize(d - 1));
8291 }
8292 }
8293 }
8294
8295 // lengthen list to be long enough
8296 lengthenList(loc, initList->getSequence(), arrayType.getOuterArraySize(), scalarInit);
8297
8298 // recursively process each element
8299 TType elementType(arrayType, 0); // dereferenced type
8300 for (int i = 0; i < arrayType.getOuterArraySize(); ++i) {
8301 initList->getSequence()[i] = convertInitializerList(loc, elementType,
8302 initList->getSequence()[i]->getAsTyped(), scalarInit);
8303 if (initList->getSequence()[i] == nullptr)
8304 return nullptr;
8305 }
8306
8307 return addConstructor(loc, initList, arrayType);
8308 } else if (type.isStruct()) {
8309 // do we have implicit assignments to opaques?
8310 for (size_t i = initList->getSequence().size(); i < type.getStruct()->size(); ++i) {
8311 if ((*type.getStruct())[i].type->containsOpaque()) {
8312 error(loc, "cannot implicitly initialize opaque members", "initializer list", "");
8313 return nullptr;
8314 }
8315 }
8316
8317 // lengthen list to be long enough
8318 lengthenList(loc, initList->getSequence(), static_cast<int>(type.getStruct()->size()), scalarInit);
8319
8320 if (type.getStruct()->size() != initList->getSequence().size()) {
8321 error(loc, "wrong number of structure members", "initializer list", "");
8322 return nullptr;
8323 }
8324 for (size_t i = 0; i < type.getStruct()->size(); ++i) {
8325 initList->getSequence()[i] = convertInitializerList(loc, *(*type.getStruct())[i].type,
8326 initList->getSequence()[i]->getAsTyped(), scalarInit);
8327 if (initList->getSequence()[i] == nullptr)
8328 return nullptr;
8329 }
8330 } else if (type.isMatrix()) {
8331 if (type.computeNumComponents() == (int)initList->getSequence().size()) {
8332 // This means the matrix is initialized component-wise, rather than as
8333 // a series of rows and columns. We can just use the list directly as
8334 // a constructor; no further processing needed.
8335 } else {
8336 // lengthen list to be long enough
8337 lengthenList(loc, initList->getSequence(), type.getMatrixCols(), scalarInit);
8338
8339 if (type.getMatrixCols() != (int)initList->getSequence().size()) {
8340 error(loc, "wrong number of matrix columns:", "initializer list", type.getCompleteString().c_str());
8341 return nullptr;
8342 }
8343 TType vectorType(type, 0); // dereferenced type
8344 for (int i = 0; i < type.getMatrixCols(); ++i) {
8345 initList->getSequence()[i] = convertInitializerList(loc, vectorType,
8346 initList->getSequence()[i]->getAsTyped(), scalarInit);
8347 if (initList->getSequence()[i] == nullptr)
8348 return nullptr;
8349 }
8350 }
8351 } else if (type.isVector()) {
8352 // lengthen list to be long enough
8353 lengthenList(loc, initList->getSequence(), type.getVectorSize(), scalarInit);
8354
8355 // error check; we're at bottom, so work is finished below
8356 if (type.getVectorSize() != (int)initList->getSequence().size()) {
8357 error(loc, "wrong vector size (or rows in a matrix column):", "initializer list",
8358 type.getCompleteString().c_str());
8359 return nullptr;
8360 }
8361 } else if (type.isScalar()) {
8362 // lengthen list to be long enough
8363 lengthenList(loc, initList->getSequence(), 1, scalarInit);
8364
8365 if ((int)initList->getSequence().size() != 1) {
8366 error(loc, "scalar expected one element:", "initializer list", type.getCompleteString().c_str());
8367 return nullptr;
8368 }
8369 } else {
8370 error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString().c_str());
8371 return nullptr;
8372 }
8373
8374 // Now that the subtree is processed, process this node as if the
8375 // initializer list is a set of arguments to a constructor.
8376 TIntermTyped* emulatedConstructorArguments;
8377 if (initList->getSequence().size() == 1)
8378 emulatedConstructorArguments = initList->getSequence()[0]->getAsTyped();
8379 else
8380 emulatedConstructorArguments = initList;
8381
8382 return addConstructor(loc, emulatedConstructorArguments, type);
8383 }
8384
8385 // Lengthen list to be long enough to cover any gap from the current list size
8386 // to 'size'. If the list is longer, do nothing.
8387 // The value to lengthen with is the default for short lists.
8388 //
8389 // By default, lists that are too short due to lack of initializers initialize to zero.
8390 // Alternatively, it could be a scalar initializer for a structure. Both cases are handled,
8391 // based on whether something is passed in as 'scalarInit'.
8392 //
8393 // 'scalarInit' must be safe to use each time this is called (no side effects replication).
8394 //
lengthenList(const TSourceLoc & loc,TIntermSequence & list,int size,TIntermTyped * scalarInit)8395 void HlslParseContext::lengthenList(const TSourceLoc& loc, TIntermSequence& list, int size, TIntermTyped* scalarInit)
8396 {
8397 for (int c = (int)list.size(); c < size; ++c) {
8398 if (scalarInit == nullptr)
8399 list.push_back(intermediate.addConstantUnion(0, loc));
8400 else
8401 list.push_back(scalarInit);
8402 }
8403 }
8404
8405 //
8406 // Test for the correctness of the parameters passed to various constructor functions
8407 // and also convert them to the right data type, if allowed and required.
8408 //
8409 // Returns nullptr for an error or the constructed node (aggregate or typed) for no error.
8410 //
handleConstructor(const TSourceLoc & loc,TIntermTyped * node,const TType & type)8411 TIntermTyped* HlslParseContext::handleConstructor(const TSourceLoc& loc, TIntermTyped* node, const TType& type)
8412 {
8413 if (node == nullptr)
8414 return nullptr;
8415
8416 // Construct identical type
8417 if (type == node->getType())
8418 return node;
8419
8420 // Handle the idiom "(struct type)<scalar value>"
8421 if (type.isStruct() && isScalarConstructor(node)) {
8422 // 'node' will almost always get used multiple times, so should not be used directly,
8423 // it would create a DAG instead of a tree, which might be okay (would
8424 // like to formalize that for constants and symbols), but if it has
8425 // side effects, they would get executed multiple times, which is not okay.
8426 if (node->getAsConstantUnion() == nullptr && node->getAsSymbolNode() == nullptr) {
8427 TIntermAggregate* seq = intermediate.makeAggregate(loc);
8428 TIntermSymbol* copy = makeInternalVariableNode(loc, "scalarCopy", node->getType());
8429 seq = intermediate.growAggregate(seq, intermediate.addBinaryNode(EOpAssign, copy, node, loc));
8430 seq = intermediate.growAggregate(seq, convertInitializerList(loc, type, intermediate.makeAggregate(loc), copy));
8431 seq->setOp(EOpComma);
8432 seq->setType(type);
8433 return seq;
8434 } else
8435 return convertInitializerList(loc, type, intermediate.makeAggregate(loc), node);
8436 }
8437
8438 return addConstructor(loc, node, type);
8439 }
8440
8441 // Add a constructor, either from the grammar, or other programmatic reasons.
8442 //
8443 // 'node' is what to construct from.
8444 // 'type' is what type to construct.
8445 //
8446 // Returns the constructed object.
8447 // Return nullptr if it can't be done.
8448 //
addConstructor(const TSourceLoc & loc,TIntermTyped * node,const TType & type)8449 TIntermTyped* HlslParseContext::addConstructor(const TSourceLoc& loc, TIntermTyped* node, const TType& type)
8450 {
8451 TIntermAggregate* aggrNode = node->getAsAggregate();
8452 TOperator op = intermediate.mapTypeToConstructorOp(type);
8453
8454 if (op == EOpConstructTextureSampler)
8455 return intermediate.setAggregateOperator(aggrNode, op, type, loc);
8456
8457 TTypeList::const_iterator memberTypes;
8458 if (op == EOpConstructStruct)
8459 memberTypes = type.getStruct()->begin();
8460
8461 TType elementType;
8462 if (type.isArray()) {
8463 TType dereferenced(type, 0);
8464 elementType.shallowCopy(dereferenced);
8465 } else
8466 elementType.shallowCopy(type);
8467
8468 bool singleArg;
8469 if (aggrNode != nullptr) {
8470 if (aggrNode->getOp() != EOpNull)
8471 singleArg = true;
8472 else
8473 singleArg = false;
8474 } else
8475 singleArg = true;
8476
8477 TIntermTyped *newNode;
8478 if (singleArg) {
8479 // Handle array -> array conversion
8480 // Constructing an array of one type from an array of another type is allowed,
8481 // assuming there are enough components available (semantic-checked earlier).
8482 if (type.isArray() && node->isArray())
8483 newNode = convertArray(node, type);
8484
8485 // If structure constructor or array constructor is being called
8486 // for only one parameter inside the aggregate, we need to call constructAggregate function once.
8487 else if (type.isArray())
8488 newNode = constructAggregate(node, elementType, 1, node->getLoc());
8489 else if (op == EOpConstructStruct)
8490 newNode = constructAggregate(node, *(*memberTypes).type, 1, node->getLoc());
8491 else {
8492 // shape conversion for matrix constructor from scalar. HLSL semantics are: scalar
8493 // is replicated into every element of the matrix (not just the diagnonal), so
8494 // that is handled specially here.
8495 if (type.isMatrix() && node->getType().isScalarOrVec1())
8496 node = intermediate.addShapeConversion(type, node);
8497
8498 newNode = constructBuiltIn(type, op, node, node->getLoc(), false);
8499 }
8500
8501 if (newNode && (type.isArray() || op == EOpConstructStruct))
8502 newNode = intermediate.setAggregateOperator(newNode, EOpConstructStruct, type, loc);
8503
8504 return newNode;
8505 }
8506
8507 //
8508 // Handle list of arguments.
8509 //
8510 TIntermSequence& sequenceVector = aggrNode->getSequence(); // Stores the information about the parameter to the constructor
8511 // if the structure constructor contains more than one parameter, then construct
8512 // each parameter
8513
8514 int paramCount = 0; // keeps a track of the constructor parameter number being checked
8515
8516 // for each parameter to the constructor call, check to see if the right type is passed or convert them
8517 // to the right type if possible (and allowed).
8518 // for structure constructors, just check if the right type is passed, no conversion is allowed.
8519
8520 for (TIntermSequence::iterator p = sequenceVector.begin();
8521 p != sequenceVector.end(); p++, paramCount++) {
8522 if (type.isArray())
8523 newNode = constructAggregate(*p, elementType, paramCount + 1, node->getLoc());
8524 else if (op == EOpConstructStruct)
8525 newNode = constructAggregate(*p, *(memberTypes[paramCount]).type, paramCount + 1, node->getLoc());
8526 else
8527 newNode = constructBuiltIn(type, op, (*p)->getAsTyped(), node->getLoc(), true);
8528
8529 if (newNode)
8530 *p = newNode;
8531 else
8532 return nullptr;
8533 }
8534
8535 TIntermTyped* constructor = intermediate.setAggregateOperator(aggrNode, op, type, loc);
8536
8537 return constructor;
8538 }
8539
8540 // Function for constructor implementation. Calls addUnaryMath with appropriate EOp value
8541 // for the parameter to the constructor (passed to this function). Essentially, it converts
8542 // the parameter types correctly. If a constructor expects an int (like ivec2) and is passed a
8543 // float, then float is converted to int.
8544 //
8545 // Returns nullptr for an error or the constructed node.
8546 //
constructBuiltIn(const TType & type,TOperator op,TIntermTyped * node,const TSourceLoc & loc,bool subset)8547 TIntermTyped* HlslParseContext::constructBuiltIn(const TType& type, TOperator op, TIntermTyped* node,
8548 const TSourceLoc& loc, bool subset)
8549 {
8550 TIntermTyped* newNode;
8551 TOperator basicOp;
8552
8553 //
8554 // First, convert types as needed.
8555 //
8556 switch (op) {
8557 case EOpConstructF16Vec2:
8558 case EOpConstructF16Vec3:
8559 case EOpConstructF16Vec4:
8560 case EOpConstructF16Mat2x2:
8561 case EOpConstructF16Mat2x3:
8562 case EOpConstructF16Mat2x4:
8563 case EOpConstructF16Mat3x2:
8564 case EOpConstructF16Mat3x3:
8565 case EOpConstructF16Mat3x4:
8566 case EOpConstructF16Mat4x2:
8567 case EOpConstructF16Mat4x3:
8568 case EOpConstructF16Mat4x4:
8569 case EOpConstructFloat16:
8570 basicOp = EOpConstructFloat16;
8571 break;
8572
8573 case EOpConstructVec2:
8574 case EOpConstructVec3:
8575 case EOpConstructVec4:
8576 case EOpConstructMat2x2:
8577 case EOpConstructMat2x3:
8578 case EOpConstructMat2x4:
8579 case EOpConstructMat3x2:
8580 case EOpConstructMat3x3:
8581 case EOpConstructMat3x4:
8582 case EOpConstructMat4x2:
8583 case EOpConstructMat4x3:
8584 case EOpConstructMat4x4:
8585 case EOpConstructFloat:
8586 basicOp = EOpConstructFloat;
8587 break;
8588
8589 case EOpConstructDVec2:
8590 case EOpConstructDVec3:
8591 case EOpConstructDVec4:
8592 case EOpConstructDMat2x2:
8593 case EOpConstructDMat2x3:
8594 case EOpConstructDMat2x4:
8595 case EOpConstructDMat3x2:
8596 case EOpConstructDMat3x3:
8597 case EOpConstructDMat3x4:
8598 case EOpConstructDMat4x2:
8599 case EOpConstructDMat4x3:
8600 case EOpConstructDMat4x4:
8601 case EOpConstructDouble:
8602 basicOp = EOpConstructDouble;
8603 break;
8604
8605 case EOpConstructI16Vec2:
8606 case EOpConstructI16Vec3:
8607 case EOpConstructI16Vec4:
8608 case EOpConstructInt16:
8609 basicOp = EOpConstructInt16;
8610 break;
8611
8612 case EOpConstructIVec2:
8613 case EOpConstructIVec3:
8614 case EOpConstructIVec4:
8615 case EOpConstructIMat2x2:
8616 case EOpConstructIMat2x3:
8617 case EOpConstructIMat2x4:
8618 case EOpConstructIMat3x2:
8619 case EOpConstructIMat3x3:
8620 case EOpConstructIMat3x4:
8621 case EOpConstructIMat4x2:
8622 case EOpConstructIMat4x3:
8623 case EOpConstructIMat4x4:
8624 case EOpConstructInt:
8625 basicOp = EOpConstructInt;
8626 break;
8627
8628 case EOpConstructU16Vec2:
8629 case EOpConstructU16Vec3:
8630 case EOpConstructU16Vec4:
8631 case EOpConstructUint16:
8632 basicOp = EOpConstructUint16;
8633 break;
8634
8635 case EOpConstructUVec2:
8636 case EOpConstructUVec3:
8637 case EOpConstructUVec4:
8638 case EOpConstructUMat2x2:
8639 case EOpConstructUMat2x3:
8640 case EOpConstructUMat2x4:
8641 case EOpConstructUMat3x2:
8642 case EOpConstructUMat3x3:
8643 case EOpConstructUMat3x4:
8644 case EOpConstructUMat4x2:
8645 case EOpConstructUMat4x3:
8646 case EOpConstructUMat4x4:
8647 case EOpConstructUint:
8648 basicOp = EOpConstructUint;
8649 break;
8650
8651 case EOpConstructBVec2:
8652 case EOpConstructBVec3:
8653 case EOpConstructBVec4:
8654 case EOpConstructBMat2x2:
8655 case EOpConstructBMat2x3:
8656 case EOpConstructBMat2x4:
8657 case EOpConstructBMat3x2:
8658 case EOpConstructBMat3x3:
8659 case EOpConstructBMat3x4:
8660 case EOpConstructBMat4x2:
8661 case EOpConstructBMat4x3:
8662 case EOpConstructBMat4x4:
8663 case EOpConstructBool:
8664 basicOp = EOpConstructBool;
8665 break;
8666
8667 default:
8668 error(loc, "unsupported construction", "", "");
8669
8670 return nullptr;
8671 }
8672 newNode = intermediate.addUnaryMath(basicOp, node, node->getLoc());
8673 if (newNode == nullptr) {
8674 error(loc, "can't convert", "constructor", "");
8675 return nullptr;
8676 }
8677
8678 //
8679 // Now, if there still isn't an operation to do the construction, and we need one, add one.
8680 //
8681
8682 // Otherwise, skip out early.
8683 if (subset || (newNode != node && newNode->getType() == type))
8684 return newNode;
8685
8686 // setAggregateOperator will insert a new node for the constructor, as needed.
8687 return intermediate.setAggregateOperator(newNode, op, type, loc);
8688 }
8689
8690 // Convert the array in node to the requested type, which is also an array.
8691 // Returns nullptr on failure, otherwise returns aggregate holding the list of
8692 // elements needed to construct the array.
convertArray(TIntermTyped * node,const TType & type)8693 TIntermTyped* HlslParseContext::convertArray(TIntermTyped* node, const TType& type)
8694 {
8695 assert(node->isArray() && type.isArray());
8696 if (node->getType().computeNumComponents() < type.computeNumComponents())
8697 return nullptr;
8698
8699 // TODO: write an argument replicator, for the case the argument should not be
8700 // executed multiple times, yet multiple copies are needed.
8701
8702 TIntermTyped* constructee = node->getAsTyped();
8703 // track where we are in consuming the argument
8704 int constructeeElement = 0;
8705 int constructeeComponent = 0;
8706
8707 // bump up to the next component to consume
8708 const auto getNextComponent = [&]() {
8709 TIntermTyped* component;
8710 component = handleBracketDereference(node->getLoc(), constructee,
8711 intermediate.addConstantUnion(constructeeElement, node->getLoc()));
8712 if (component->isVector())
8713 component = handleBracketDereference(node->getLoc(), component,
8714 intermediate.addConstantUnion(constructeeComponent, node->getLoc()));
8715 // bump component pointer up
8716 ++constructeeComponent;
8717 if (constructeeComponent == constructee->getVectorSize()) {
8718 constructeeComponent = 0;
8719 ++constructeeElement;
8720 }
8721 return component;
8722 };
8723
8724 // make one subnode per constructed array element
8725 TIntermAggregate* constructor = nullptr;
8726 TType derefType(type, 0);
8727 TType speculativeComponentType(derefType, 0);
8728 TType* componentType = derefType.isVector() ? &speculativeComponentType : &derefType;
8729 TOperator componentOp = intermediate.mapTypeToConstructorOp(*componentType);
8730 TType crossType(node->getBasicType(), EvqTemporary, type.getVectorSize());
8731 for (int e = 0; e < type.getOuterArraySize(); ++e) {
8732 // construct an element
8733 TIntermTyped* elementArg;
8734 if (type.getVectorSize() == constructee->getVectorSize()) {
8735 // same element shape
8736 elementArg = handleBracketDereference(node->getLoc(), constructee,
8737 intermediate.addConstantUnion(e, node->getLoc()));
8738 } else {
8739 // mismatched element shapes
8740 if (type.getVectorSize() == 1)
8741 elementArg = getNextComponent();
8742 else {
8743 // make a vector
8744 TIntermAggregate* elementConstructee = nullptr;
8745 for (int c = 0; c < type.getVectorSize(); ++c)
8746 elementConstructee = intermediate.growAggregate(elementConstructee, getNextComponent());
8747 elementArg = addConstructor(node->getLoc(), elementConstructee, crossType);
8748 }
8749 }
8750 // convert basic types
8751 elementArg = intermediate.addConversion(componentOp, derefType, elementArg);
8752 if (elementArg == nullptr)
8753 return nullptr;
8754 // combine with top-level constructor
8755 constructor = intermediate.growAggregate(constructor, elementArg);
8756 }
8757
8758 return constructor;
8759 }
8760
8761 // This function tests for the type of the parameters to the structure or array constructor. Raises
8762 // an error message if the expected type does not match the parameter passed to the constructor.
8763 //
8764 // Returns nullptr for an error or the input node itself if the expected and the given parameter types match.
8765 //
constructAggregate(TIntermNode * node,const TType & type,int paramCount,const TSourceLoc & loc)8766 TIntermTyped* HlslParseContext::constructAggregate(TIntermNode* node, const TType& type, int paramCount,
8767 const TSourceLoc& loc)
8768 {
8769 // Handle cases that map more 1:1 between constructor arguments and constructed.
8770 TIntermTyped* converted = intermediate.addConversion(EOpConstructStruct, type, node->getAsTyped());
8771 if (converted == nullptr || converted->getType() != type) {
8772 error(loc, "", "constructor", "cannot convert parameter %d from '%s' to '%s'", paramCount,
8773 node->getAsTyped()->getType().getCompleteString().c_str(), type.getCompleteString().c_str());
8774
8775 return nullptr;
8776 }
8777
8778 return converted;
8779 }
8780
8781 //
8782 // Do everything needed to add an interface block.
8783 //
declareBlock(const TSourceLoc & loc,TType & type,const TString * instanceName)8784 void HlslParseContext::declareBlock(const TSourceLoc& loc, TType& type, const TString* instanceName)
8785 {
8786 assert(type.getWritableStruct() != nullptr);
8787
8788 // Clean up top-level decorations that don't belong.
8789 switch (type.getQualifier().storage) {
8790 case EvqUniform:
8791 case EvqBuffer:
8792 correctUniform(type.getQualifier());
8793 break;
8794 case EvqVaryingIn:
8795 correctInput(type.getQualifier());
8796 break;
8797 case EvqVaryingOut:
8798 correctOutput(type.getQualifier());
8799 break;
8800 default:
8801 break;
8802 }
8803
8804 TTypeList& typeList = *type.getWritableStruct();
8805 // fix and check for member storage qualifiers and types that don't belong within a block
8806 for (unsigned int member = 0; member < typeList.size(); ++member) {
8807 TType& memberType = *typeList[member].type;
8808 TQualifier& memberQualifier = memberType.getQualifier();
8809 const TSourceLoc& memberLoc = typeList[member].loc;
8810 globalQualifierFix(memberLoc, memberQualifier);
8811 memberQualifier.storage = type.getQualifier().storage;
8812
8813 if (memberType.isStruct()) {
8814 // clean up and pick up the right set of decorations
8815 auto it = ioTypeMap.find(memberType.getStruct());
8816 switch (type.getQualifier().storage) {
8817 case EvqUniform:
8818 case EvqBuffer:
8819 correctUniform(type.getQualifier());
8820 if (it != ioTypeMap.end() && it->second.uniform)
8821 memberType.setStruct(it->second.uniform);
8822 break;
8823 case EvqVaryingIn:
8824 correctInput(type.getQualifier());
8825 if (it != ioTypeMap.end() && it->second.input)
8826 memberType.setStruct(it->second.input);
8827 break;
8828 case EvqVaryingOut:
8829 correctOutput(type.getQualifier());
8830 if (it != ioTypeMap.end() && it->second.output)
8831 memberType.setStruct(it->second.output);
8832 break;
8833 default:
8834 break;
8835 }
8836 }
8837 }
8838
8839 // Make default block qualification, and adjust the member qualifications
8840
8841 TQualifier defaultQualification;
8842 switch (type.getQualifier().storage) {
8843 case EvqUniform: defaultQualification = globalUniformDefaults; break;
8844 case EvqBuffer: defaultQualification = globalBufferDefaults; break;
8845 case EvqVaryingIn: defaultQualification = globalInputDefaults; break;
8846 case EvqVaryingOut: defaultQualification = globalOutputDefaults; break;
8847 default: defaultQualification.clear(); break;
8848 }
8849
8850 // Special case for "push_constant uniform", which has a default of std430,
8851 // contrary to normal uniform defaults, and can't have a default tracked for it.
8852 if (type.getQualifier().layoutPushConstant && ! type.getQualifier().hasPacking())
8853 type.getQualifier().layoutPacking = ElpStd430;
8854
8855 // fix and check for member layout qualifiers
8856
8857 mergeObjectLayoutQualifiers(defaultQualification, type.getQualifier(), true);
8858
8859 bool memberWithLocation = false;
8860 bool memberWithoutLocation = false;
8861 for (unsigned int member = 0; member < typeList.size(); ++member) {
8862 TQualifier& memberQualifier = typeList[member].type->getQualifier();
8863 const TSourceLoc& memberLoc = typeList[member].loc;
8864 if (memberQualifier.hasStream()) {
8865 if (defaultQualification.layoutStream != memberQualifier.layoutStream)
8866 error(memberLoc, "member cannot contradict block", "stream", "");
8867 }
8868
8869 // "This includes a block's inheritance of the
8870 // current global default buffer, a block member's inheritance of the block's
8871 // buffer, and the requirement that any *xfb_buffer* declared on a block
8872 // member must match the buffer inherited from the block."
8873 if (memberQualifier.hasXfbBuffer()) {
8874 if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer)
8875 error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
8876 }
8877
8878 if (memberQualifier.hasLocation()) {
8879 switch (type.getQualifier().storage) {
8880 case EvqVaryingIn:
8881 case EvqVaryingOut:
8882 memberWithLocation = true;
8883 break;
8884 default:
8885 break;
8886 }
8887 } else
8888 memberWithoutLocation = true;
8889
8890 TQualifier newMemberQualification = defaultQualification;
8891 mergeQualifiers(newMemberQualification, memberQualifier);
8892 memberQualifier = newMemberQualification;
8893 }
8894
8895 // Process the members
8896 fixBlockLocations(loc, type.getQualifier(), typeList, memberWithLocation, memberWithoutLocation);
8897 fixXfbOffsets(type.getQualifier(), typeList);
8898 fixBlockUniformOffsets(type.getQualifier(), typeList);
8899
8900 // reverse merge, so that currentBlockQualifier now has all layout information
8901 // (can't use defaultQualification directly, it's missing other non-layout-default-class qualifiers)
8902 mergeObjectLayoutQualifiers(type.getQualifier(), defaultQualification, true);
8903
8904 //
8905 // Build and add the interface block as a new type named 'blockName'
8906 //
8907
8908 // Use the instance name as the interface name if one exists, else the block name.
8909 const TString& interfaceName = (instanceName && !instanceName->empty()) ? *instanceName : type.getTypeName();
8910
8911 TType blockType(&typeList, interfaceName, type.getQualifier());
8912 if (type.isArray())
8913 blockType.transferArraySizes(type.getArraySizes());
8914
8915 // Add the variable, as anonymous or named instanceName.
8916 // Make an anonymous variable if no name was provided.
8917 if (instanceName == nullptr)
8918 instanceName = NewPoolTString("");
8919
8920 TVariable& variable = *new TVariable(instanceName, blockType);
8921 if (! symbolTable.insert(variable)) {
8922 if (*instanceName == "")
8923 error(loc, "nameless block contains a member that already has a name at global scope",
8924 "" /* blockName->c_str() */, "");
8925 else
8926 error(loc, "block instance name redefinition", variable.getName().c_str(), "");
8927
8928 return;
8929 }
8930
8931 // Save it in the AST for linker use.
8932 if (symbolTable.atGlobalLevel())
8933 trackLinkage(variable);
8934 }
8935
8936 //
8937 // "For a block, this process applies to the entire block, or until the first member
8938 // is reached that has a location layout qualifier. When a block member is declared with a location
8939 // qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level
8940 // declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
8941 // until the next member declared with a location qualifier. The values used for locations do not have to be
8942 // declared in increasing order."
fixBlockLocations(const TSourceLoc & loc,TQualifier & qualifier,TTypeList & typeList,bool memberWithLocation,bool memberWithoutLocation)8943 void HlslParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation)
8944 {
8945 // "If a block has no block-level location layout qualifier, it is required that either all or none of its members
8946 // have a location layout qualifier, or a compile-time error results."
8947 if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation)
8948 error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", "");
8949 else {
8950 if (memberWithLocation) {
8951 // remove any block-level location and make it per *every* member
8952 int nextLocation = 0; // by the rule above, initial value is not relevant
8953 if (qualifier.hasAnyLocation()) {
8954 nextLocation = qualifier.layoutLocation;
8955 qualifier.layoutLocation = TQualifier::layoutLocationEnd;
8956 if (qualifier.hasComponent()) {
8957 // "It is a compile-time error to apply the *component* qualifier to a ... block"
8958 error(loc, "cannot apply to a block", "component", "");
8959 }
8960 if (qualifier.hasIndex()) {
8961 error(loc, "cannot apply to a block", "index", "");
8962 }
8963 }
8964 for (unsigned int member = 0; member < typeList.size(); ++member) {
8965 TQualifier& memberQualifier = typeList[member].type->getQualifier();
8966 const TSourceLoc& memberLoc = typeList[member].loc;
8967 if (! memberQualifier.hasLocation()) {
8968 if (nextLocation >= (int)TQualifier::layoutLocationEnd)
8969 error(memberLoc, "location is too large", "location", "");
8970 memberQualifier.layoutLocation = nextLocation;
8971 memberQualifier.layoutComponent = 0;
8972 }
8973 nextLocation = memberQualifier.layoutLocation +
8974 intermediate.computeTypeLocationSize(*typeList[member].type, language);
8975 }
8976 }
8977 }
8978 }
8979
fixXfbOffsets(TQualifier & qualifier,TTypeList & typeList)8980 void HlslParseContext::fixXfbOffsets(TQualifier& qualifier, TTypeList& typeList)
8981 {
8982 // "If a block is qualified with xfb_offset, all its
8983 // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
8984 // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
8985 // offsets."
8986
8987 if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset())
8988 return;
8989
8990 int nextOffset = qualifier.layoutXfbOffset;
8991 for (unsigned int member = 0; member < typeList.size(); ++member) {
8992 TQualifier& memberQualifier = typeList[member].type->getQualifier();
8993 bool contains64BitType = false;
8994 bool contains32BitType = false;
8995 bool contains16BitType = false;
8996 int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType, contains32BitType, contains16BitType);
8997 // see if we need to auto-assign an offset to this member
8998 if (! memberQualifier.hasXfbOffset()) {
8999 // "if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8"
9000 if (contains64BitType)
9001 RoundToPow2(nextOffset, 8);
9002 else if (contains32BitType)
9003 RoundToPow2(nextOffset, 4);
9004 // "if applied to an aggregate containing a half float or 16-bit integer, the offset must also be a multiple of 2"
9005 else if (contains16BitType)
9006 RoundToPow2(nextOffset, 2);
9007 memberQualifier.layoutXfbOffset = nextOffset;
9008 } else
9009 nextOffset = memberQualifier.layoutXfbOffset;
9010 nextOffset += memberSize;
9011 }
9012
9013 // The above gave all block members an offset, so we can take it off the block now,
9014 // which will avoid double counting the offset usage.
9015 qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd;
9016 }
9017
9018 // Calculate and save the offset of each block member, using the recursively
9019 // defined block offset rules and the user-provided offset and align.
9020 //
9021 // Also, compute and save the total size of the block. For the block's size, arrayness
9022 // is not taken into account, as each element is backed by a separate buffer.
9023 //
fixBlockUniformOffsets(const TQualifier & qualifier,TTypeList & typeList)9024 void HlslParseContext::fixBlockUniformOffsets(const TQualifier& qualifier, TTypeList& typeList)
9025 {
9026 if (! qualifier.isUniformOrBuffer())
9027 return;
9028 if (qualifier.layoutPacking != ElpStd140 && qualifier.layoutPacking != ElpStd430 && qualifier.layoutPacking != ElpScalar)
9029 return;
9030
9031 int offset = 0;
9032 int memberSize;
9033 for (unsigned int member = 0; member < typeList.size(); ++member) {
9034 TQualifier& memberQualifier = typeList[member].type->getQualifier();
9035 const TSourceLoc& memberLoc = typeList[member].loc;
9036
9037 // "When align is applied to an array, it effects only the start of the array, not the array's internal stride."
9038
9039 // modify just the children's view of matrix layout, if there is one for this member
9040 TLayoutMatrix subMatrixLayout = typeList[member].type->getQualifier().layoutMatrix;
9041 int dummyStride;
9042 int memberAlignment = intermediate.getMemberAlignment(*typeList[member].type, memberSize, dummyStride,
9043 qualifier.layoutPacking,
9044 subMatrixLayout != ElmNone
9045 ? subMatrixLayout == ElmRowMajor
9046 : qualifier.layoutMatrix == ElmRowMajor);
9047 if (memberQualifier.hasOffset()) {
9048 // "The specified offset must be a multiple
9049 // of the base alignment of the type of the block member it qualifies, or a compile-time error results."
9050 if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment))
9051 error(memberLoc, "must be a multiple of the member's alignment", "offset",
9052 "(layout offset = %d | member alignment = %d)", memberQualifier.layoutOffset, memberAlignment);
9053
9054 // "The offset qualifier forces the qualified member to start at or after the specified
9055 // integral-constant expression, which will be its byte offset from the beginning of the buffer.
9056 // "The actual offset of a member is computed as
9057 // follows: If offset was declared, start with that offset, otherwise start with the next available offset."
9058 offset = std::max(offset, memberQualifier.layoutOffset);
9059 }
9060
9061 // "The actual alignment of a member will be the greater of the specified align alignment and the standard
9062 // (e.g., std140) base alignment for the member's type."
9063 if (memberQualifier.hasAlign())
9064 memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign);
9065
9066 // "If the resulting offset is not a multiple of the actual alignment,
9067 // increase it to the first offset that is a multiple of
9068 // the actual alignment."
9069 RoundToPow2(offset, memberAlignment);
9070 typeList[member].type->getQualifier().layoutOffset = offset;
9071 offset += memberSize;
9072 }
9073 }
9074
9075 // For an identifier that is already declared, add more qualification to it.
addQualifierToExisting(const TSourceLoc & loc,TQualifier qualifier,const TString & identifier)9076 void HlslParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier)
9077 {
9078 TSymbol* symbol = symbolTable.find(identifier);
9079 if (symbol == nullptr) {
9080 error(loc, "identifier not previously declared", identifier.c_str(), "");
9081 return;
9082 }
9083 if (symbol->getAsFunction()) {
9084 error(loc, "cannot re-qualify a function name", identifier.c_str(), "");
9085 return;
9086 }
9087
9088 if (qualifier.isAuxiliary() ||
9089 qualifier.isMemory() ||
9090 qualifier.isInterpolation() ||
9091 qualifier.hasLayout() ||
9092 qualifier.storage != EvqTemporary ||
9093 qualifier.precision != EpqNone) {
9094 error(loc, "cannot add storage, auxiliary, memory, interpolation, layout, or precision qualifier to an existing variable", identifier.c_str(), "");
9095 return;
9096 }
9097
9098 // For read-only built-ins, add a new symbol for holding the modified qualifier.
9099 // This will bring up an entire block, if a block type has to be modified (e.g., gl_Position inside a block)
9100 if (symbol->isReadOnly())
9101 symbol = symbolTable.copyUp(symbol);
9102
9103 if (qualifier.invariant) {
9104 if (intermediate.inIoAccessed(identifier))
9105 error(loc, "cannot change qualification after use", "invariant", "");
9106 symbol->getWritableType().getQualifier().invariant = true;
9107 } else if (qualifier.noContraction) {
9108 if (intermediate.inIoAccessed(identifier))
9109 error(loc, "cannot change qualification after use", "precise", "");
9110 symbol->getWritableType().getQualifier().noContraction = true;
9111 } else if (qualifier.specConstant) {
9112 symbol->getWritableType().getQualifier().makeSpecConstant();
9113 if (qualifier.hasSpecConstantId())
9114 symbol->getWritableType().getQualifier().layoutSpecConstantId = qualifier.layoutSpecConstantId;
9115 } else
9116 warn(loc, "unknown requalification", "", "");
9117 }
9118
addQualifierToExisting(const TSourceLoc & loc,TQualifier qualifier,TIdentifierList & identifiers)9119 void HlslParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, TIdentifierList& identifiers)
9120 {
9121 for (unsigned int i = 0; i < identifiers.size(); ++i)
9122 addQualifierToExisting(loc, qualifier, *identifiers[i]);
9123 }
9124
9125 //
9126 // Update the intermediate for the given input geometry
9127 //
handleInputGeometry(const TSourceLoc & loc,const TLayoutGeometry & geometry)9128 bool HlslParseContext::handleInputGeometry(const TSourceLoc& loc, const TLayoutGeometry& geometry)
9129 {
9130 // these can be declared on non-entry-points, in which case they lose their meaning
9131 if (! parsingEntrypointParameters)
9132 return true;
9133
9134 switch (geometry) {
9135 case ElgPoints: // fall through
9136 case ElgLines: // ...
9137 case ElgTriangles: // ...
9138 case ElgLinesAdjacency: // ...
9139 case ElgTrianglesAdjacency: // ...
9140 if (! intermediate.setInputPrimitive(geometry)) {
9141 error(loc, "input primitive geometry redefinition", TQualifier::getGeometryString(geometry), "");
9142 return false;
9143 }
9144 break;
9145
9146 default:
9147 error(loc, "cannot apply to 'in'", TQualifier::getGeometryString(geometry), "");
9148 return false;
9149 }
9150
9151 return true;
9152 }
9153
9154 //
9155 // Update the intermediate for the given output geometry
9156 //
handleOutputGeometry(const TSourceLoc & loc,const TLayoutGeometry & geometry)9157 bool HlslParseContext::handleOutputGeometry(const TSourceLoc& loc, const TLayoutGeometry& geometry)
9158 {
9159 // If this is not a geometry shader, ignore. It might be a mixed shader including several stages.
9160 // Since that's an OK situation, return true for success.
9161 if (language != EShLangGeometry)
9162 return true;
9163
9164 // these can be declared on non-entry-points, in which case they lose their meaning
9165 if (! parsingEntrypointParameters)
9166 return true;
9167
9168 switch (geometry) {
9169 case ElgPoints:
9170 case ElgLineStrip:
9171 case ElgTriangleStrip:
9172 if (! intermediate.setOutputPrimitive(geometry)) {
9173 error(loc, "output primitive geometry redefinition", TQualifier::getGeometryString(geometry), "");
9174 return false;
9175 }
9176 break;
9177 default:
9178 error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(geometry), "");
9179 return false;
9180 }
9181
9182 return true;
9183 }
9184
9185 //
9186 // Selection attributes
9187 //
handleSelectionAttributes(const TSourceLoc & loc,TIntermSelection * selection,const TAttributes & attributes)9188 void HlslParseContext::handleSelectionAttributes(const TSourceLoc& loc, TIntermSelection* selection,
9189 const TAttributes& attributes)
9190 {
9191 if (selection == nullptr)
9192 return;
9193
9194 for (auto it = attributes.begin(); it != attributes.end(); ++it) {
9195 switch (it->name) {
9196 case EatFlatten:
9197 selection->setFlatten();
9198 break;
9199 case EatBranch:
9200 selection->setDontFlatten();
9201 break;
9202 default:
9203 warn(loc, "attribute does not apply to a selection", "", "");
9204 break;
9205 }
9206 }
9207 }
9208
9209 //
9210 // Switch attributes
9211 //
handleSwitchAttributes(const TSourceLoc & loc,TIntermSwitch * selection,const TAttributes & attributes)9212 void HlslParseContext::handleSwitchAttributes(const TSourceLoc& loc, TIntermSwitch* selection,
9213 const TAttributes& attributes)
9214 {
9215 if (selection == nullptr)
9216 return;
9217
9218 for (auto it = attributes.begin(); it != attributes.end(); ++it) {
9219 switch (it->name) {
9220 case EatFlatten:
9221 selection->setFlatten();
9222 break;
9223 case EatBranch:
9224 selection->setDontFlatten();
9225 break;
9226 default:
9227 warn(loc, "attribute does not apply to a switch", "", "");
9228 break;
9229 }
9230 }
9231 }
9232
9233 //
9234 // Loop attributes
9235 //
handleLoopAttributes(const TSourceLoc & loc,TIntermLoop * loop,const TAttributes & attributes)9236 void HlslParseContext::handleLoopAttributes(const TSourceLoc& loc, TIntermLoop* loop,
9237 const TAttributes& attributes)
9238 {
9239 if (loop == nullptr)
9240 return;
9241
9242 for (auto it = attributes.begin(); it != attributes.end(); ++it) {
9243 switch (it->name) {
9244 case EatUnroll:
9245 loop->setUnroll();
9246 break;
9247 case EatLoop:
9248 loop->setDontUnroll();
9249 break;
9250 default:
9251 warn(loc, "attribute does not apply to a loop", "", "");
9252 break;
9253 }
9254 }
9255 }
9256
9257 //
9258 // Updating default qualifier for the case of a declaration with just a qualifier,
9259 // no type, block, or identifier.
9260 //
updateStandaloneQualifierDefaults(const TSourceLoc & loc,const TPublicType & publicType)9261 void HlslParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, const TPublicType& publicType)
9262 {
9263 if (publicType.shaderQualifiers.vertices != TQualifier::layoutNotSet) {
9264 assert(language == EShLangTessControl || language == EShLangGeometry);
9265 // const char* id = (language == EShLangTessControl) ? "vertices" : "max_vertices";
9266 }
9267 if (publicType.shaderQualifiers.invocations != TQualifier::layoutNotSet) {
9268 if (! intermediate.setInvocations(publicType.shaderQualifiers.invocations))
9269 error(loc, "cannot change previously set layout value", "invocations", "");
9270 }
9271 if (publicType.shaderQualifiers.geometry != ElgNone) {
9272 if (publicType.qualifier.storage == EvqVaryingIn) {
9273 switch (publicType.shaderQualifiers.geometry) {
9274 case ElgPoints:
9275 case ElgLines:
9276 case ElgLinesAdjacency:
9277 case ElgTriangles:
9278 case ElgTrianglesAdjacency:
9279 case ElgQuads:
9280 case ElgIsolines:
9281 break;
9282 default:
9283 error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry),
9284 "");
9285 }
9286 } else if (publicType.qualifier.storage == EvqVaryingOut) {
9287 handleOutputGeometry(loc, publicType.shaderQualifiers.geometry);
9288 } else
9289 error(loc, "cannot apply to:", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry),
9290 GetStorageQualifierString(publicType.qualifier.storage));
9291 }
9292 if (publicType.shaderQualifiers.spacing != EvsNone)
9293 intermediate.setVertexSpacing(publicType.shaderQualifiers.spacing);
9294 if (publicType.shaderQualifiers.order != EvoNone)
9295 intermediate.setVertexOrder(publicType.shaderQualifiers.order);
9296 if (publicType.shaderQualifiers.pointMode)
9297 intermediate.setPointMode();
9298 for (int i = 0; i < 3; ++i) {
9299 if (publicType.shaderQualifiers.localSize[i] > 1) {
9300 int max = 0;
9301 switch (i) {
9302 case 0: max = resources.maxComputeWorkGroupSizeX; break;
9303 case 1: max = resources.maxComputeWorkGroupSizeY; break;
9304 case 2: max = resources.maxComputeWorkGroupSizeZ; break;
9305 default: break;
9306 }
9307 if (intermediate.getLocalSize(i) > (unsigned int)max)
9308 error(loc, "too large; see gl_MaxComputeWorkGroupSize", "local_size", "");
9309
9310 // Fix the existing constant gl_WorkGroupSize with this new information.
9311 TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
9312 workGroupSize->getWritableConstArray()[i].setUConst(intermediate.getLocalSize(i));
9313 }
9314 if (publicType.shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) {
9315 intermediate.setLocalSizeSpecId(i, publicType.shaderQualifiers.localSizeSpecId[i]);
9316 // Set the workgroup built-in variable as a specialization constant
9317 TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
9318 workGroupSize->getWritableType().getQualifier().specConstant = true;
9319 }
9320 }
9321 if (publicType.shaderQualifiers.earlyFragmentTests)
9322 intermediate.setEarlyFragmentTests();
9323
9324 const TQualifier& qualifier = publicType.qualifier;
9325
9326 switch (qualifier.storage) {
9327 case EvqUniform:
9328 if (qualifier.hasMatrix())
9329 globalUniformDefaults.layoutMatrix = qualifier.layoutMatrix;
9330 if (qualifier.hasPacking())
9331 globalUniformDefaults.layoutPacking = qualifier.layoutPacking;
9332 break;
9333 case EvqBuffer:
9334 if (qualifier.hasMatrix())
9335 globalBufferDefaults.layoutMatrix = qualifier.layoutMatrix;
9336 if (qualifier.hasPacking())
9337 globalBufferDefaults.layoutPacking = qualifier.layoutPacking;
9338 break;
9339 case EvqVaryingIn:
9340 break;
9341 case EvqVaryingOut:
9342 if (qualifier.hasStream())
9343 globalOutputDefaults.layoutStream = qualifier.layoutStream;
9344 if (qualifier.hasXfbBuffer())
9345 globalOutputDefaults.layoutXfbBuffer = qualifier.layoutXfbBuffer;
9346 if (globalOutputDefaults.hasXfbBuffer() && qualifier.hasXfbStride()) {
9347 if (! intermediate.setXfbBufferStride(globalOutputDefaults.layoutXfbBuffer, qualifier.layoutXfbStride))
9348 error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d",
9349 qualifier.layoutXfbBuffer);
9350 }
9351 break;
9352 default:
9353 error(loc, "default qualifier requires 'uniform', 'buffer', 'in', or 'out' storage qualification", "", "");
9354 return;
9355 }
9356 }
9357
9358 //
9359 // Take the sequence of statements that has been built up since the last case/default,
9360 // put it on the list of top-level nodes for the current (inner-most) switch statement,
9361 // and follow that by the case/default we are on now. (See switch topology comment on
9362 // TIntermSwitch.)
9363 //
wrapupSwitchSubsequence(TIntermAggregate * statements,TIntermNode * branchNode)9364 void HlslParseContext::wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode)
9365 {
9366 TIntermSequence* switchSequence = switchSequenceStack.back();
9367
9368 if (statements) {
9369 statements->setOperator(EOpSequence);
9370 switchSequence->push_back(statements);
9371 }
9372 if (branchNode) {
9373 // check all previous cases for the same label (or both are 'default')
9374 for (unsigned int s = 0; s < switchSequence->size(); ++s) {
9375 TIntermBranch* prevBranch = (*switchSequence)[s]->getAsBranchNode();
9376 if (prevBranch) {
9377 TIntermTyped* prevExpression = prevBranch->getExpression();
9378 TIntermTyped* newExpression = branchNode->getAsBranchNode()->getExpression();
9379 if (prevExpression == nullptr && newExpression == nullptr)
9380 error(branchNode->getLoc(), "duplicate label", "default", "");
9381 else if (prevExpression != nullptr &&
9382 newExpression != nullptr &&
9383 prevExpression->getAsConstantUnion() &&
9384 newExpression->getAsConstantUnion() &&
9385 prevExpression->getAsConstantUnion()->getConstArray()[0].getIConst() ==
9386 newExpression->getAsConstantUnion()->getConstArray()[0].getIConst())
9387 error(branchNode->getLoc(), "duplicated value", "case", "");
9388 }
9389 }
9390 switchSequence->push_back(branchNode);
9391 }
9392 }
9393
9394 //
9395 // Turn the top-level node sequence built up of wrapupSwitchSubsequence
9396 // into a switch node.
9397 //
addSwitch(const TSourceLoc & loc,TIntermTyped * expression,TIntermAggregate * lastStatements,const TAttributes & attributes)9398 TIntermNode* HlslParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expression,
9399 TIntermAggregate* lastStatements, const TAttributes& attributes)
9400 {
9401 wrapupSwitchSubsequence(lastStatements, nullptr);
9402
9403 if (expression == nullptr ||
9404 (expression->getBasicType() != EbtInt && expression->getBasicType() != EbtUint) ||
9405 expression->getType().isArray() || expression->getType().isMatrix() || expression->getType().isVector())
9406 error(loc, "condition must be a scalar integer expression", "switch", "");
9407
9408 // If there is nothing to do, drop the switch but still execute the expression
9409 TIntermSequence* switchSequence = switchSequenceStack.back();
9410 if (switchSequence->size() == 0)
9411 return expression;
9412
9413 if (lastStatements == nullptr) {
9414 // emulate a break for error recovery
9415 lastStatements = intermediate.makeAggregate(intermediate.addBranch(EOpBreak, loc));
9416 lastStatements->setOperator(EOpSequence);
9417 switchSequence->push_back(lastStatements);
9418 }
9419
9420 TIntermAggregate* body = new TIntermAggregate(EOpSequence);
9421 body->getSequence() = *switchSequenceStack.back();
9422 body->setLoc(loc);
9423
9424 TIntermSwitch* switchNode = new TIntermSwitch(expression, body);
9425 switchNode->setLoc(loc);
9426 handleSwitchAttributes(loc, switchNode, attributes);
9427
9428 return switchNode;
9429 }
9430
9431 // Make a new symbol-table level that is made out of the members of a structure.
9432 // This should be done as an anonymous struct (name is "") so that the symbol table
9433 // finds the members with no explicit reference to a 'this' variable.
pushThisScope(const TType & thisStruct,const TVector<TFunctionDeclarator> & functionDeclarators)9434 void HlslParseContext::pushThisScope(const TType& thisStruct, const TVector<TFunctionDeclarator>& functionDeclarators)
9435 {
9436 // member variables
9437 TVariable& thisVariable = *new TVariable(NewPoolTString(""), thisStruct);
9438 symbolTable.pushThis(thisVariable);
9439
9440 // member functions
9441 for (auto it = functionDeclarators.begin(); it != functionDeclarators.end(); ++it) {
9442 // member should have a prefix matching currentTypePrefix.back()
9443 // but, symbol lookup within the class scope will just use the
9444 // unprefixed name. Hence, there are two: one fully prefixed and
9445 // one with no prefix.
9446 TFunction& member = *it->function->clone();
9447 member.removePrefix(currentTypePrefix.back());
9448 symbolTable.insert(member);
9449 }
9450 }
9451
9452 // Track levels of class/struct/namespace nesting with a prefix string using
9453 // the type names separated by the scoping operator. E.g., two levels
9454 // would look like:
9455 //
9456 // outer::inner
9457 //
9458 // The string is empty when at normal global level.
9459 //
pushNamespace(const TString & typeName)9460 void HlslParseContext::pushNamespace(const TString& typeName)
9461 {
9462 // make new type prefix
9463 TString newPrefix;
9464 if (currentTypePrefix.size() > 0)
9465 newPrefix = currentTypePrefix.back();
9466 newPrefix.append(typeName);
9467 newPrefix.append(scopeMangler);
9468 currentTypePrefix.push_back(newPrefix);
9469 }
9470
9471 // Opposite of pushNamespace(), see above
popNamespace()9472 void HlslParseContext::popNamespace()
9473 {
9474 currentTypePrefix.pop_back();
9475 }
9476
9477 // Use the class/struct nesting string to create a global name for
9478 // a member of a class/struct.
getFullNamespaceName(TString * & name) const9479 void HlslParseContext::getFullNamespaceName(TString*& name) const
9480 {
9481 if (currentTypePrefix.size() == 0)
9482 return;
9483
9484 TString* fullName = NewPoolTString(currentTypePrefix.back().c_str());
9485 fullName->append(*name);
9486 name = fullName;
9487 }
9488
9489 // Helper function to add the namespace scope mangling syntax to a string.
addScopeMangler(TString & name)9490 void HlslParseContext::addScopeMangler(TString& name)
9491 {
9492 name.append(scopeMangler);
9493 }
9494
9495 // Return true if this has uniform-interface like decorations.
hasUniform(const TQualifier & qualifier) const9496 bool HlslParseContext::hasUniform(const TQualifier& qualifier) const
9497 {
9498 return qualifier.hasUniformLayout() ||
9499 qualifier.layoutPushConstant;
9500 }
9501
9502 // Potentially not the opposite of hasUniform(), as if some characteristic is
9503 // ever used for more than one thing (e.g., uniform or input), hasUniform() should
9504 // say it exists, but clearUniform() should leave it in place.
clearUniform(TQualifier & qualifier)9505 void HlslParseContext::clearUniform(TQualifier& qualifier)
9506 {
9507 qualifier.clearUniformLayout();
9508 qualifier.layoutPushConstant = false;
9509 }
9510
9511 // Return false if builtIn by itself doesn't force this qualifier to be an input qualifier.
isInputBuiltIn(const TQualifier & qualifier) const9512 bool HlslParseContext::isInputBuiltIn(const TQualifier& qualifier) const
9513 {
9514 switch (qualifier.builtIn) {
9515 case EbvPosition:
9516 case EbvPointSize:
9517 return language != EShLangVertex && language != EShLangCompute && language != EShLangFragment;
9518 case EbvClipDistance:
9519 case EbvCullDistance:
9520 return language != EShLangVertex && language != EShLangCompute;
9521 case EbvFragCoord:
9522 case EbvFace:
9523 case EbvHelperInvocation:
9524 case EbvLayer:
9525 case EbvPointCoord:
9526 case EbvSampleId:
9527 case EbvSampleMask:
9528 case EbvSamplePosition:
9529 case EbvViewportIndex:
9530 return language == EShLangFragment;
9531 case EbvGlobalInvocationId:
9532 case EbvLocalInvocationIndex:
9533 case EbvLocalInvocationId:
9534 case EbvNumWorkGroups:
9535 case EbvWorkGroupId:
9536 case EbvWorkGroupSize:
9537 return language == EShLangCompute;
9538 case EbvInvocationId:
9539 return language == EShLangTessControl || language == EShLangTessEvaluation || language == EShLangGeometry;
9540 case EbvPatchVertices:
9541 return language == EShLangTessControl || language == EShLangTessEvaluation;
9542 case EbvInstanceId:
9543 case EbvInstanceIndex:
9544 case EbvVertexId:
9545 case EbvVertexIndex:
9546 return language == EShLangVertex;
9547 case EbvPrimitiveId:
9548 return language == EShLangGeometry || language == EShLangFragment || language == EShLangTessControl;
9549 case EbvTessLevelInner:
9550 case EbvTessLevelOuter:
9551 return language == EShLangTessEvaluation;
9552 case EbvTessCoord:
9553 return language == EShLangTessEvaluation;
9554 case EbvViewIndex:
9555 return language != EShLangCompute;
9556 default:
9557 return false;
9558 }
9559 }
9560
9561 // Return true if there are decorations to preserve for input-like storage.
hasInput(const TQualifier & qualifier) const9562 bool HlslParseContext::hasInput(const TQualifier& qualifier) const
9563 {
9564 if (qualifier.hasAnyLocation())
9565 return true;
9566
9567 if (language == EShLangFragment && (qualifier.isInterpolation() || qualifier.centroid || qualifier.sample))
9568 return true;
9569
9570 if (language == EShLangTessEvaluation && qualifier.patch)
9571 return true;
9572
9573 if (isInputBuiltIn(qualifier))
9574 return true;
9575
9576 return false;
9577 }
9578
9579 // Return false if builtIn by itself doesn't force this qualifier to be an output qualifier.
isOutputBuiltIn(const TQualifier & qualifier) const9580 bool HlslParseContext::isOutputBuiltIn(const TQualifier& qualifier) const
9581 {
9582 switch (qualifier.builtIn) {
9583 case EbvPosition:
9584 case EbvPointSize:
9585 case EbvClipVertex:
9586 case EbvClipDistance:
9587 case EbvCullDistance:
9588 return language != EShLangFragment && language != EShLangCompute;
9589 case EbvFragDepth:
9590 case EbvFragDepthGreater:
9591 case EbvFragDepthLesser:
9592 case EbvSampleMask:
9593 return language == EShLangFragment;
9594 case EbvLayer:
9595 case EbvViewportIndex:
9596 return language == EShLangGeometry || language == EShLangVertex;
9597 case EbvPrimitiveId:
9598 return language == EShLangGeometry;
9599 case EbvTessLevelInner:
9600 case EbvTessLevelOuter:
9601 return language == EShLangTessControl;
9602 default:
9603 return false;
9604 }
9605 }
9606
9607 // Return true if there are decorations to preserve for output-like storage.
hasOutput(const TQualifier & qualifier) const9608 bool HlslParseContext::hasOutput(const TQualifier& qualifier) const
9609 {
9610 if (qualifier.hasAnyLocation())
9611 return true;
9612
9613 if (language != EShLangFragment && language != EShLangCompute && qualifier.hasXfb())
9614 return true;
9615
9616 if (language == EShLangTessControl && qualifier.patch)
9617 return true;
9618
9619 if (language == EShLangGeometry && qualifier.hasStream())
9620 return true;
9621
9622 if (isOutputBuiltIn(qualifier))
9623 return true;
9624
9625 return false;
9626 }
9627
9628 // Make the IO decorations etc. be appropriate only for an input interface.
correctInput(TQualifier & qualifier)9629 void HlslParseContext::correctInput(TQualifier& qualifier)
9630 {
9631 clearUniform(qualifier);
9632 if (language == EShLangVertex)
9633 qualifier.clearInterstage();
9634 if (language != EShLangTessEvaluation)
9635 qualifier.patch = false;
9636 if (language != EShLangFragment) {
9637 qualifier.clearInterpolation();
9638 qualifier.sample = false;
9639 }
9640
9641 qualifier.clearStreamLayout();
9642 qualifier.clearXfbLayout();
9643
9644 if (! isInputBuiltIn(qualifier))
9645 qualifier.builtIn = EbvNone;
9646 }
9647
9648 // Make the IO decorations etc. be appropriate only for an output interface.
correctOutput(TQualifier & qualifier)9649 void HlslParseContext::correctOutput(TQualifier& qualifier)
9650 {
9651 clearUniform(qualifier);
9652 if (language == EShLangFragment)
9653 qualifier.clearInterstage();
9654 if (language != EShLangGeometry)
9655 qualifier.clearStreamLayout();
9656 if (language == EShLangFragment)
9657 qualifier.clearXfbLayout();
9658 if (language != EShLangTessControl)
9659 qualifier.patch = false;
9660
9661 switch (qualifier.builtIn) {
9662 case EbvFragDepth:
9663 intermediate.setDepthReplacing();
9664 intermediate.setDepth(EldAny);
9665 break;
9666 case EbvFragDepthGreater:
9667 intermediate.setDepthReplacing();
9668 intermediate.setDepth(EldGreater);
9669 qualifier.builtIn = EbvFragDepth;
9670 break;
9671 case EbvFragDepthLesser:
9672 intermediate.setDepthReplacing();
9673 intermediate.setDepth(EldLess);
9674 qualifier.builtIn = EbvFragDepth;
9675 break;
9676 default:
9677 break;
9678 }
9679
9680 if (! isOutputBuiltIn(qualifier))
9681 qualifier.builtIn = EbvNone;
9682 }
9683
9684 // Make the IO decorations etc. be appropriate only for uniform type interfaces.
correctUniform(TQualifier & qualifier)9685 void HlslParseContext::correctUniform(TQualifier& qualifier)
9686 {
9687 if (qualifier.declaredBuiltIn == EbvNone)
9688 qualifier.declaredBuiltIn = qualifier.builtIn;
9689
9690 qualifier.builtIn = EbvNone;
9691 qualifier.clearInterstage();
9692 qualifier.clearInterstageLayout();
9693 }
9694
9695 // Clear out all IO/Uniform stuff, so this has nothing to do with being an IO interface.
clearUniformInputOutput(TQualifier & qualifier)9696 void HlslParseContext::clearUniformInputOutput(TQualifier& qualifier)
9697 {
9698 clearUniform(qualifier);
9699 correctUniform(qualifier);
9700 }
9701
9702
9703 // Set texture return type. Returns success (not all types are valid).
setTextureReturnType(TSampler & sampler,const TType & retType,const TSourceLoc & loc)9704 bool HlslParseContext::setTextureReturnType(TSampler& sampler, const TType& retType, const TSourceLoc& loc)
9705 {
9706 // Seed the output with an invalid index. We will set it to a valid one if we can.
9707 sampler.structReturnIndex = TSampler::noReturnStruct;
9708
9709 // Arrays aren't supported.
9710 if (retType.isArray()) {
9711 error(loc, "Arrays not supported in texture template types", "", "");
9712 return false;
9713 }
9714
9715 // If return type is a vector, remember the vector size in the sampler, and return.
9716 if (retType.isVector() || retType.isScalar()) {
9717 sampler.vectorSize = retType.getVectorSize();
9718 return true;
9719 }
9720
9721 // If it wasn't a vector, it must be a struct meeting certain requirements. The requirements
9722 // are checked below: just check for struct-ness here.
9723 if (!retType.isStruct()) {
9724 error(loc, "Invalid texture template type", "", "");
9725 return false;
9726 }
9727
9728 // TODO: Subpass doesn't handle struct returns, due to some oddities with fn overloading.
9729 if (sampler.isSubpass()) {
9730 error(loc, "Unimplemented: structure template type in subpass input", "", "");
9731 return false;
9732 }
9733
9734 TTypeList* members = retType.getWritableStruct();
9735
9736 // Check for too many or not enough structure members.
9737 if (members->size() > 4 || members->size() == 0) {
9738 error(loc, "Invalid member count in texture template structure", "", "");
9739 return false;
9740 }
9741
9742 // Error checking: We must have <= 4 total components, all of the same basic type.
9743 unsigned totalComponents = 0;
9744 for (unsigned m = 0; m < members->size(); ++m) {
9745 // Check for bad member types
9746 if (!(*members)[m].type->isScalar() && !(*members)[m].type->isVector()) {
9747 error(loc, "Invalid texture template struct member type", "", "");
9748 return false;
9749 }
9750
9751 const unsigned memberVectorSize = (*members)[m].type->getVectorSize();
9752 totalComponents += memberVectorSize;
9753
9754 // too many total member components
9755 if (totalComponents > 4) {
9756 error(loc, "Too many components in texture template structure type", "", "");
9757 return false;
9758 }
9759
9760 // All members must be of a common basic type
9761 if ((*members)[m].type->getBasicType() != (*members)[0].type->getBasicType()) {
9762 error(loc, "Texture template structure members must same basic type", "", "");
9763 return false;
9764 }
9765 }
9766
9767 // If the structure in the return type already exists in the table, we'll use it. Otherwise, we'll make
9768 // a new entry. This is a linear search, but it hardly ever happens, and the list cannot be very large.
9769 for (unsigned int idx = 0; idx < textureReturnStruct.size(); ++idx) {
9770 if (textureReturnStruct[idx] == members) {
9771 sampler.structReturnIndex = idx;
9772 return true;
9773 }
9774 }
9775
9776 // It wasn't found as an existing entry. See if we have room for a new one.
9777 if (textureReturnStruct.size() >= TSampler::structReturnSlots) {
9778 error(loc, "Texture template struct return slots exceeded", "", "");
9779 return false;
9780 }
9781
9782 // Insert it in the vector that tracks struct return types.
9783 sampler.structReturnIndex = unsigned(textureReturnStruct.size());
9784 textureReturnStruct.push_back(members);
9785
9786 // Success!
9787 return true;
9788 }
9789
9790 // Return the sampler return type in retType.
getTextureReturnType(const TSampler & sampler,TType & retType) const9791 void HlslParseContext::getTextureReturnType(const TSampler& sampler, TType& retType) const
9792 {
9793 if (sampler.hasReturnStruct()) {
9794 assert(textureReturnStruct.size() >= sampler.structReturnIndex);
9795
9796 // We land here if the texture return is a structure.
9797 TTypeList* blockStruct = textureReturnStruct[sampler.structReturnIndex];
9798
9799 const TType resultType(blockStruct, "");
9800 retType.shallowCopy(resultType);
9801 } else {
9802 // We land here if the texture return is a vector or scalar.
9803 const TType resultType(sampler.type, EvqTemporary, sampler.getVectorSize());
9804 retType.shallowCopy(resultType);
9805 }
9806 }
9807
9808
9809 // Return a symbol for the tessellation linkage variable of the given TBuiltInVariable type
findTessLinkageSymbol(TBuiltInVariable biType) const9810 TIntermSymbol* HlslParseContext::findTessLinkageSymbol(TBuiltInVariable biType) const
9811 {
9812 const auto it = builtInTessLinkageSymbols.find(biType);
9813 if (it == builtInTessLinkageSymbols.end()) // if it wasn't declared by the user, return nullptr
9814 return nullptr;
9815
9816 return intermediate.addSymbol(*it->second->getAsVariable());
9817 }
9818
9819 // Find the patch constant function (issues error, returns nullptr if not found)
findPatchConstantFunction(const TSourceLoc & loc)9820 const TFunction* HlslParseContext::findPatchConstantFunction(const TSourceLoc& loc)
9821 {
9822 if (symbolTable.isFunctionNameVariable(patchConstantFunctionName)) {
9823 error(loc, "can't use variable in patch constant function", patchConstantFunctionName.c_str(), "");
9824 return nullptr;
9825 }
9826
9827 const TString mangledName = patchConstantFunctionName + "(";
9828
9829 // create list of PCF candidates
9830 TVector<const TFunction*> candidateList;
9831 bool builtIn;
9832 symbolTable.findFunctionNameList(mangledName, candidateList, builtIn);
9833
9834 // We have to have one and only one, or we don't know which to pick: the patchconstantfunc does not
9835 // allow any disambiguation of overloads.
9836 if (candidateList.empty()) {
9837 error(loc, "patch constant function not found", patchConstantFunctionName.c_str(), "");
9838 return nullptr;
9839 }
9840
9841 // Based on directed experiments, it appears that if there are overloaded patchconstantfunctions,
9842 // HLSL picks the last one in shader source order. Since that isn't yet implemented here, error
9843 // out if there is more than one candidate.
9844 if (candidateList.size() > 1) {
9845 error(loc, "ambiguous patch constant function", patchConstantFunctionName.c_str(), "");
9846 return nullptr;
9847 }
9848
9849 return candidateList[0];
9850 }
9851
9852 // Finalization step: Add patch constant function invocation
addPatchConstantInvocation()9853 void HlslParseContext::addPatchConstantInvocation()
9854 {
9855 TSourceLoc loc;
9856 loc.init();
9857
9858 // If there's no patch constant function, or we're not a HS, do nothing.
9859 if (patchConstantFunctionName.empty() || language != EShLangTessControl)
9860 return;
9861
9862 // Look for built-in variables in a function's parameter list.
9863 const auto findBuiltIns = [&](const TFunction& function, std::set<tInterstageIoData>& builtIns) {
9864 for (int p=0; p<function.getParamCount(); ++p) {
9865 TStorageQualifier storage = function[p].type->getQualifier().storage;
9866
9867 if (storage == EvqConstReadOnly) // treated identically to input
9868 storage = EvqIn;
9869
9870 if (function[p].getDeclaredBuiltIn() != EbvNone)
9871 builtIns.insert(HlslParseContext::tInterstageIoData(function[p].getDeclaredBuiltIn(), storage));
9872 else
9873 builtIns.insert(HlslParseContext::tInterstageIoData(function[p].type->getQualifier().builtIn, storage));
9874 }
9875 };
9876
9877 // If we synthesize a built-in interface variable, we must add it to the linkage.
9878 const auto addToLinkage = [&](const TType& type, const TString* name, TIntermSymbol** symbolNode) {
9879 if (name == nullptr) {
9880 error(loc, "unable to locate patch function parameter name", "", "");
9881 return;
9882 } else {
9883 TVariable& variable = *new TVariable(name, type);
9884 if (! symbolTable.insert(variable)) {
9885 error(loc, "unable to declare patch constant function interface variable", name->c_str(), "");
9886 return;
9887 }
9888
9889 globalQualifierFix(loc, variable.getWritableType().getQualifier());
9890
9891 if (symbolNode != nullptr)
9892 *symbolNode = intermediate.addSymbol(variable);
9893
9894 trackLinkage(variable);
9895 }
9896 };
9897
9898 const auto isOutputPatch = [](TFunction& patchConstantFunction, int param) {
9899 const TType& type = *patchConstantFunction[param].type;
9900 const TBuiltInVariable biType = patchConstantFunction[param].getDeclaredBuiltIn();
9901
9902 return type.isSizedArray() && biType == EbvOutputPatch;
9903 };
9904
9905 // We will perform these steps. Each is in a scoped block for separation: they could
9906 // become separate functions to make addPatchConstantInvocation shorter.
9907 //
9908 // 1. Union the interfaces, and create built-ins for anything present in the PCF and
9909 // declared as a built-in variable that isn't present in the entry point's signature.
9910 //
9911 // 2. Synthesizes a call to the patchconstfunction using built-in variables from either main,
9912 // or the ones we created. Matching is based on built-in type. We may use synthesized
9913 // variables from (1) above.
9914 //
9915 // 2B: Synthesize per control point invocations of wrapped entry point if the PCF requires them.
9916 //
9917 // 3. Create a return sequence: copy the return value (if any) from the PCF to a
9918 // (non-sanitized) output variable. In case this may involve multiple copies, such as for
9919 // an arrayed variable, a temporary copy of the PCF output is created to avoid multiple
9920 // indirections into a complex R-value coming from the call to the PCF.
9921 //
9922 // 4. Create a barrier.
9923 //
9924 // 5/5B. Call the PCF inside an if test for (invocation id == 0).
9925
9926 TFunction* patchConstantFunctionPtr = const_cast<TFunction*>(findPatchConstantFunction(loc));
9927
9928 if (patchConstantFunctionPtr == nullptr)
9929 return;
9930
9931 TFunction& patchConstantFunction = *patchConstantFunctionPtr;
9932
9933 const int pcfParamCount = patchConstantFunction.getParamCount();
9934 TIntermSymbol* invocationIdSym = findTessLinkageSymbol(EbvInvocationId);
9935 TIntermSequence& epBodySeq = entryPointFunctionBody->getAsAggregate()->getSequence();
9936
9937 int outPatchParam = -1; // -1 means there isn't one.
9938
9939 // ================ Step 1A: Union Interfaces ================
9940 // Our patch constant function.
9941 {
9942 std::set<tInterstageIoData> pcfBuiltIns; // patch constant function built-ins
9943 std::set<tInterstageIoData> epfBuiltIns; // entry point function built-ins
9944
9945 assert(entryPointFunction);
9946 assert(entryPointFunctionBody);
9947
9948 findBuiltIns(patchConstantFunction, pcfBuiltIns);
9949 findBuiltIns(*entryPointFunction, epfBuiltIns);
9950
9951 // Find the set of built-ins in the PCF that are not present in the entry point.
9952 std::set<tInterstageIoData> notInEntryPoint;
9953
9954 notInEntryPoint = pcfBuiltIns;
9955
9956 // std::set_difference not usable on unordered containers
9957 for (auto bi = epfBuiltIns.begin(); bi != epfBuiltIns.end(); ++bi)
9958 notInEntryPoint.erase(*bi);
9959
9960 // Now we'll add those to the entry and to the linkage.
9961 for (int p=0; p<pcfParamCount; ++p) {
9962 const TBuiltInVariable biType = patchConstantFunction[p].getDeclaredBuiltIn();
9963 TStorageQualifier storage = patchConstantFunction[p].type->getQualifier().storage;
9964
9965 // Track whether there is an output patch param
9966 if (isOutputPatch(patchConstantFunction, p)) {
9967 if (outPatchParam >= 0) {
9968 // Presently we only support one per ctrl pt input.
9969 error(loc, "unimplemented: multiple output patches in patch constant function", "", "");
9970 return;
9971 }
9972 outPatchParam = p;
9973 }
9974
9975 if (biType != EbvNone) {
9976 TType* paramType = patchConstantFunction[p].type->clone();
9977
9978 if (storage == EvqConstReadOnly) // treated identically to input
9979 storage = EvqIn;
9980
9981 // Presently, the only non-built-in we support is InputPatch, which is treated as
9982 // a pseudo-built-in.
9983 if (biType == EbvInputPatch) {
9984 builtInTessLinkageSymbols[biType] = inputPatch;
9985 } else if (biType == EbvOutputPatch) {
9986 // Nothing...
9987 } else {
9988 // Use the original declaration type for the linkage
9989 paramType->getQualifier().builtIn = biType;
9990 if (biType == EbvTessLevelInner || biType == EbvTessLevelOuter)
9991 paramType->getQualifier().patch = true;
9992
9993 if (notInEntryPoint.count(tInterstageIoData(biType, storage)) == 1)
9994 addToLinkage(*paramType, patchConstantFunction[p].name, nullptr);
9995 }
9996 }
9997 }
9998
9999 // If we didn't find it because the shader made one, add our own.
10000 if (invocationIdSym == nullptr) {
10001 TType invocationIdType(EbtUint, EvqIn, 1);
10002 TString* invocationIdName = NewPoolTString("InvocationId");
10003 invocationIdType.getQualifier().builtIn = EbvInvocationId;
10004 addToLinkage(invocationIdType, invocationIdName, &invocationIdSym);
10005 }
10006
10007 assert(invocationIdSym);
10008 }
10009
10010 TIntermTyped* pcfArguments = nullptr;
10011 TVariable* perCtrlPtVar = nullptr;
10012
10013 // ================ Step 1B: Argument synthesis ================
10014 // Create pcfArguments for synthesis of patchconstantfunction invocation
10015 {
10016 for (int p=0; p<pcfParamCount; ++p) {
10017 TIntermTyped* inputArg = nullptr;
10018
10019 if (p == outPatchParam) {
10020 if (perCtrlPtVar == nullptr) {
10021 perCtrlPtVar = makeInternalVariable(*patchConstantFunction[outPatchParam].name,
10022 *patchConstantFunction[outPatchParam].type);
10023
10024 perCtrlPtVar->getWritableType().getQualifier().makeTemporary();
10025 }
10026 inputArg = intermediate.addSymbol(*perCtrlPtVar, loc);
10027 } else {
10028 // find which built-in it is
10029 const TBuiltInVariable biType = patchConstantFunction[p].getDeclaredBuiltIn();
10030
10031 if (biType == EbvInputPatch && inputPatch == nullptr) {
10032 error(loc, "unimplemented: PCF input patch without entry point input patch parameter", "", "");
10033 return;
10034 }
10035
10036 inputArg = findTessLinkageSymbol(biType);
10037
10038 if (inputArg == nullptr) {
10039 error(loc, "unable to find patch constant function built-in variable", "", "");
10040 return;
10041 }
10042 }
10043
10044 if (pcfParamCount == 1)
10045 pcfArguments = inputArg;
10046 else
10047 pcfArguments = intermediate.growAggregate(pcfArguments, inputArg);
10048 }
10049 }
10050
10051 // ================ Step 2: Synthesize call to PCF ================
10052 TIntermAggregate* pcfCallSequence = nullptr;
10053 TIntermTyped* pcfCall = nullptr;
10054
10055 {
10056 // Create a function call to the patchconstantfunction
10057 if (pcfArguments)
10058 addInputArgumentConversions(patchConstantFunction, pcfArguments);
10059
10060 // Synthetic call.
10061 pcfCall = intermediate.setAggregateOperator(pcfArguments, EOpFunctionCall, patchConstantFunction.getType(), loc);
10062 pcfCall->getAsAggregate()->setUserDefined();
10063 pcfCall->getAsAggregate()->setName(patchConstantFunction.getMangledName());
10064 intermediate.addToCallGraph(infoSink, intermediate.getEntryPointMangledName().c_str(),
10065 patchConstantFunction.getMangledName());
10066
10067 if (pcfCall->getAsAggregate()) {
10068 TQualifierList& qualifierList = pcfCall->getAsAggregate()->getQualifierList();
10069 for (int i = 0; i < patchConstantFunction.getParamCount(); ++i) {
10070 TStorageQualifier qual = patchConstantFunction[i].type->getQualifier().storage;
10071 qualifierList.push_back(qual);
10072 }
10073 pcfCall = addOutputArgumentConversions(patchConstantFunction, *pcfCall->getAsOperator());
10074 }
10075 }
10076
10077 // ================ Step 2B: Per Control Point synthesis ================
10078 // If there is per control point data, we must either emulate that with multiple
10079 // invocations of the entry point to build up an array, or (TODO:) use a yet
10080 // unavailable extension to look across the SIMD lanes. This is the former
10081 // as a placeholder for the latter.
10082 if (outPatchParam >= 0) {
10083 // We must introduce a local temp variable of the type wanted by the PCF input.
10084 const int arraySize = patchConstantFunction[outPatchParam].type->getOuterArraySize();
10085
10086 if (entryPointFunction->getType().getBasicType() == EbtVoid) {
10087 error(loc, "entry point must return a value for use with patch constant function", "", "");
10088 return;
10089 }
10090
10091 // Create calls to wrapped main to fill in the array. We will substitute fixed values
10092 // of invocation ID when calling the wrapped main.
10093
10094 // This is the type of the each member of the per ctrl point array.
10095 const TType derefType(perCtrlPtVar->getType(), 0);
10096
10097 for (int cpt = 0; cpt < arraySize; ++cpt) {
10098 // TODO: improve. substr(1) here is to avoid the '@' that was grafted on but isn't in the symtab
10099 // for this function.
10100 const TString origName = entryPointFunction->getName().substr(1);
10101 TFunction callee(&origName, TType(EbtVoid));
10102 TIntermTyped* callingArgs = nullptr;
10103
10104 for (int i = 0; i < entryPointFunction->getParamCount(); i++) {
10105 TParameter& param = (*entryPointFunction)[i];
10106 TType& paramType = *param.type;
10107
10108 if (paramType.getQualifier().isParamOutput()) {
10109 error(loc, "unimplemented: entry point outputs in patch constant function invocation", "", "");
10110 return;
10111 }
10112
10113 if (paramType.getQualifier().isParamInput()) {
10114 TIntermTyped* arg = nullptr;
10115 if ((*entryPointFunction)[i].getDeclaredBuiltIn() == EbvInvocationId) {
10116 // substitute invocation ID with the array element ID
10117 arg = intermediate.addConstantUnion(cpt, loc);
10118 } else {
10119 TVariable* argVar = makeInternalVariable(*param.name, *param.type);
10120 argVar->getWritableType().getQualifier().makeTemporary();
10121 arg = intermediate.addSymbol(*argVar);
10122 }
10123
10124 handleFunctionArgument(&callee, callingArgs, arg);
10125 }
10126 }
10127
10128 // Call and assign to per ctrl point variable
10129 currentCaller = intermediate.getEntryPointMangledName().c_str();
10130 TIntermTyped* callReturn = handleFunctionCall(loc, &callee, callingArgs);
10131 TIntermTyped* index = intermediate.addConstantUnion(cpt, loc);
10132 TIntermSymbol* perCtrlPtSym = intermediate.addSymbol(*perCtrlPtVar, loc);
10133 TIntermTyped* element = intermediate.addIndex(EOpIndexDirect, perCtrlPtSym, index, loc);
10134 element->setType(derefType);
10135 element->setLoc(loc);
10136
10137 pcfCallSequence = intermediate.growAggregate(pcfCallSequence,
10138 handleAssign(loc, EOpAssign, element, callReturn));
10139 }
10140 }
10141
10142 // ================ Step 3: Create return Sequence ================
10143 // Return sequence: copy PCF result to a temporary, then to shader output variable.
10144 if (pcfCall->getBasicType() != EbtVoid) {
10145 const TType* retType = &patchConstantFunction.getType(); // return type from the PCF
10146 TType outType; // output type that goes with the return type.
10147 outType.shallowCopy(*retType);
10148
10149 // substitute the output type
10150 const auto newLists = ioTypeMap.find(retType->getStruct());
10151 if (newLists != ioTypeMap.end())
10152 outType.setStruct(newLists->second.output);
10153
10154 // Substitute the top level type's built-in type
10155 if (patchConstantFunction.getDeclaredBuiltInType() != EbvNone)
10156 outType.getQualifier().builtIn = patchConstantFunction.getDeclaredBuiltInType();
10157
10158 outType.getQualifier().patch = true; // make it a per-patch variable
10159
10160 TVariable* pcfOutput = makeInternalVariable("@patchConstantOutput", outType);
10161 pcfOutput->getWritableType().getQualifier().storage = EvqVaryingOut;
10162
10163 if (pcfOutput->getType().isStruct())
10164 flatten(*pcfOutput, false);
10165
10166 assignToInterface(*pcfOutput);
10167
10168 TIntermSymbol* pcfOutputSym = intermediate.addSymbol(*pcfOutput, loc);
10169
10170 // The call to the PCF is a complex R-value: we want to store it in a temp to avoid
10171 // repeated calls to the PCF:
10172 TVariable* pcfCallResult = makeInternalVariable("@patchConstantResult", *retType);
10173 pcfCallResult->getWritableType().getQualifier().makeTemporary();
10174
10175 TIntermSymbol* pcfResultVar = intermediate.addSymbol(*pcfCallResult, loc);
10176 TIntermNode* pcfResultAssign = handleAssign(loc, EOpAssign, pcfResultVar, pcfCall);
10177 TIntermNode* pcfResultToOut = handleAssign(loc, EOpAssign, pcfOutputSym,
10178 intermediate.addSymbol(*pcfCallResult, loc));
10179
10180 pcfCallSequence = intermediate.growAggregate(pcfCallSequence, pcfResultAssign);
10181 pcfCallSequence = intermediate.growAggregate(pcfCallSequence, pcfResultToOut);
10182 } else {
10183 pcfCallSequence = intermediate.growAggregate(pcfCallSequence, pcfCall);
10184 }
10185
10186 // ================ Step 4: Barrier ================
10187 TIntermTyped* barrier = new TIntermAggregate(EOpBarrier);
10188 barrier->setLoc(loc);
10189 barrier->setType(TType(EbtVoid));
10190 epBodySeq.insert(epBodySeq.end(), barrier);
10191
10192 // ================ Step 5: Test on invocation ID ================
10193 TIntermTyped* zero = intermediate.addConstantUnion(0, loc, true);
10194 TIntermTyped* cmp = intermediate.addBinaryNode(EOpEqual, invocationIdSym, zero, loc, TType(EbtBool));
10195
10196
10197 // ================ Step 5B: Create if statement on Invocation ID == 0 ================
10198 intermediate.setAggregateOperator(pcfCallSequence, EOpSequence, TType(EbtVoid), loc);
10199 TIntermTyped* invocationIdTest = new TIntermSelection(cmp, pcfCallSequence, nullptr);
10200 invocationIdTest->setLoc(loc);
10201
10202 // add our test sequence before the return.
10203 epBodySeq.insert(epBodySeq.end(), invocationIdTest);
10204 }
10205
10206 // Finalization step: remove unused buffer blocks from linkage (we don't know until the
10207 // shader is entirely compiled).
10208 // Preserve order of remaining symbols.
removeUnusedStructBufferCounters()10209 void HlslParseContext::removeUnusedStructBufferCounters()
10210 {
10211 const auto endIt = std::remove_if(linkageSymbols.begin(), linkageSymbols.end(),
10212 [this](const TSymbol* sym) {
10213 const auto sbcIt = structBufferCounter.find(sym->getName());
10214 return sbcIt != structBufferCounter.end() && !sbcIt->second;
10215 });
10216
10217 linkageSymbols.erase(endIt, linkageSymbols.end());
10218 }
10219
10220 // Finalization step: patch texture shadow modes to match samplers they were combined with
fixTextureShadowModes()10221 void HlslParseContext::fixTextureShadowModes()
10222 {
10223 for (auto symbol = linkageSymbols.begin(); symbol != linkageSymbols.end(); ++symbol) {
10224 TSampler& sampler = (*symbol)->getWritableType().getSampler();
10225
10226 if (sampler.isTexture()) {
10227 const auto shadowMode = textureShadowVariant.find((*symbol)->getUniqueId());
10228 if (shadowMode != textureShadowVariant.end()) {
10229
10230 if (shadowMode->second->overloaded())
10231 // Texture needs legalization if it's been seen with both shadow and non-shadow modes.
10232 intermediate.setNeedsLegalization();
10233
10234 sampler.shadow = shadowMode->second->isShadowId((*symbol)->getUniqueId());
10235 }
10236 }
10237 }
10238 }
10239
10240 // Finalization step: patch append methods to use proper stream output, which isn't known until
10241 // main is parsed, which could happen after the append method is parsed.
finalizeAppendMethods()10242 void HlslParseContext::finalizeAppendMethods()
10243 {
10244 TSourceLoc loc;
10245 loc.init();
10246
10247 // Nothing to do: bypass test for valid stream output.
10248 if (gsAppends.empty())
10249 return;
10250
10251 if (gsStreamOutput == nullptr) {
10252 error(loc, "unable to find output symbol for Append()", "", "");
10253 return;
10254 }
10255
10256 // Patch append sequences, now that we know the stream output symbol.
10257 for (auto append = gsAppends.begin(); append != gsAppends.end(); ++append) {
10258 append->node->getSequence()[0] =
10259 handleAssign(append->loc, EOpAssign,
10260 intermediate.addSymbol(*gsStreamOutput, append->loc),
10261 append->node->getSequence()[0]->getAsTyped());
10262 }
10263 }
10264
10265 // post-processing
finish()10266 void HlslParseContext::finish()
10267 {
10268 // Error check: There was a dangling .mips operator. These are not nested constructs in the grammar, so
10269 // cannot be detected there. This is not strictly needed in a non-validating parser; it's just helpful.
10270 if (! mipsOperatorMipArg.empty()) {
10271 error(mipsOperatorMipArg.back().loc, "unterminated mips operator:", "", "");
10272 }
10273
10274 removeUnusedStructBufferCounters();
10275 addPatchConstantInvocation();
10276 fixTextureShadowModes();
10277 finalizeAppendMethods();
10278
10279 // Communicate out (esp. for command line) that we formed AST that will make
10280 // illegal AST SPIR-V and it needs transforms to legalize it.
10281 if (intermediate.needsLegalization() && (messages & EShMsgHlslLegalization))
10282 infoSink.info << "WARNING: AST will form illegal SPIR-V; need to transform to legalize";
10283
10284 TParseContextBase::finish();
10285 }
10286
10287 } // end namespace glslang
10288