1 //
2 // Copyright (C) 2013 LunarG, Inc.
3 // Copyright (C) 2017 ARM Limited.
4 // Copyright (C) 2015-2018 Google, Inc.
5 //
6 // All rights reserved.
7 //
8 // Redistribution and use in source and binary forms, with or without
9 // modification, are permitted provided that the following conditions
10 // are met:
11 //
12 // Redistributions of source code must retain the above copyright
13 // notice, this list of conditions and the following disclaimer.
14 //
15 // Redistributions in binary form must reproduce the above
16 // copyright notice, this list of conditions and the following
17 // disclaimer in the documentation and/or other materials provided
18 // with the distribution.
19 //
20 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21 // contributors may be used to endorse or promote products derived
22 // from this software without specific prior written permission.
23 //
24 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 // POSSIBILITY OF SUCH DAMAGE.
36 //
37
38 //
39 // Do link-time merging and validation of intermediate representations.
40 //
41 // Basic model is that during compilation, each compilation unit (shader) is
42 // compiled into one TIntermediate instance. Then, at link time, multiple
43 // units for the same stage can be merged together, which can generate errors.
44 // Then, after all merging, a single instance of TIntermediate represents
45 // the whole stage. A final error check can be done on the resulting stage,
46 // even if no merging was done (i.e., the stage was only one compilation unit).
47 //
48
49 #include "localintermediate.h"
50 #include "../Include/InfoSink.h"
51
52 namespace glslang {
53
54 //
55 // Link-time error emitter.
56 //
error(TInfoSink & infoSink,const char * message)57 void TIntermediate::error(TInfoSink& infoSink, const char* message)
58 {
59 infoSink.info.prefix(EPrefixError);
60 infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
61
62 ++numErrors;
63 }
64
65 // Link-time warning.
warn(TInfoSink & infoSink,const char * message)66 void TIntermediate::warn(TInfoSink& infoSink, const char* message)
67 {
68 infoSink.info.prefix(EPrefixWarning);
69 infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
70 }
71
72 // TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
73 // name must have the exact same set of members qualified with offset and their integral-constant
74 // expression values must be the same, or a link-time error results."
75
76 //
77 // Merge the information from 'unit' into 'this'
78 //
merge(TInfoSink & infoSink,TIntermediate & unit)79 void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
80 {
81 mergeCallGraphs(infoSink, unit);
82 mergeModes(infoSink, unit);
83 mergeTrees(infoSink, unit);
84 }
85
mergeCallGraphs(TInfoSink & infoSink,TIntermediate & unit)86 void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
87 {
88 if (unit.getNumEntryPoints() > 0) {
89 if (getNumEntryPoints() > 0)
90 error(infoSink, "can't handle multiple entry points per stage");
91 else {
92 entryPointName = unit.getEntryPointName();
93 entryPointMangledName = unit.getEntryPointMangledName();
94 }
95 }
96 numEntryPoints += unit.getNumEntryPoints();
97
98 callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
99 }
100
101 #define MERGE_MAX(member) member = std::max(member, unit.member)
102 #define MERGE_TRUE(member) if (unit.member) member = unit.member;
103
mergeModes(TInfoSink & infoSink,TIntermediate & unit)104 void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
105 {
106 if (language != unit.language)
107 error(infoSink, "stages must match when linking into a single stage");
108
109 if (source == EShSourceNone)
110 source = unit.source;
111 if (source != unit.source)
112 error(infoSink, "can't link compilation units from different source languages");
113
114 if (treeRoot == nullptr) {
115 profile = unit.profile;
116 version = unit.version;
117 requestedExtensions = unit.requestedExtensions;
118 } else {
119 if ((profile == EEsProfile) != (unit.profile == EEsProfile))
120 error(infoSink, "Cannot cross link ES and desktop profiles");
121 else if (unit.profile == ECompatibilityProfile)
122 profile = ECompatibilityProfile;
123 version = std::max(version, unit.version);
124 requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
125 }
126
127 MERGE_MAX(spvVersion.spv);
128 MERGE_MAX(spvVersion.vulkanGlsl);
129 MERGE_MAX(spvVersion.vulkan);
130 MERGE_MAX(spvVersion.openGl);
131
132 numErrors += unit.getNumErrors();
133 numPushConstants += unit.numPushConstants;
134
135 if (unit.invocations != TQualifier::layoutNotSet) {
136 if (invocations == TQualifier::layoutNotSet)
137 invocations = unit.invocations;
138 else if (invocations != unit.invocations)
139 error(infoSink, "number of invocations must match between compilation units");
140 }
141
142 if (vertices == TQualifier::layoutNotSet)
143 vertices = unit.vertices;
144 else if (vertices != unit.vertices) {
145 if (language == EShLangGeometry
146 #ifdef NV_EXTENSIONS
147 || language == EShLangMeshNV
148 #endif
149 )
150 error(infoSink, "Contradictory layout max_vertices values");
151 else if (language == EShLangTessControl)
152 error(infoSink, "Contradictory layout vertices values");
153 else
154 assert(0);
155 }
156 #ifdef NV_EXTENSIONS
157 if (primitives == TQualifier::layoutNotSet)
158 primitives = unit.primitives;
159 else if (primitives != unit.primitives) {
160 if (language == EShLangMeshNV)
161 error(infoSink, "Contradictory layout max_primitives values");
162 else
163 assert(0);
164 }
165 #endif
166
167 if (inputPrimitive == ElgNone)
168 inputPrimitive = unit.inputPrimitive;
169 else if (inputPrimitive != unit.inputPrimitive)
170 error(infoSink, "Contradictory input layout primitives");
171
172 if (outputPrimitive == ElgNone)
173 outputPrimitive = unit.outputPrimitive;
174 else if (outputPrimitive != unit.outputPrimitive)
175 error(infoSink, "Contradictory output layout primitives");
176
177 if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
178 error(infoSink, "gl_FragCoord redeclarations must match across shaders");
179
180 if (vertexSpacing == EvsNone)
181 vertexSpacing = unit.vertexSpacing;
182 else if (vertexSpacing != unit.vertexSpacing)
183 error(infoSink, "Contradictory input vertex spacing");
184
185 if (vertexOrder == EvoNone)
186 vertexOrder = unit.vertexOrder;
187 else if (vertexOrder != unit.vertexOrder)
188 error(infoSink, "Contradictory triangle ordering");
189
190 MERGE_TRUE(pointMode);
191
192 for (int i = 0; i < 3; ++i) {
193 if (localSize[i] > 1)
194 localSize[i] = unit.localSize[i];
195 else if (localSize[i] != unit.localSize[i])
196 error(infoSink, "Contradictory local size");
197
198 if (localSizeSpecId[i] != TQualifier::layoutNotSet)
199 localSizeSpecId[i] = unit.localSizeSpecId[i];
200 else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
201 error(infoSink, "Contradictory local size specialization ids");
202 }
203
204 MERGE_TRUE(earlyFragmentTests);
205 MERGE_TRUE(postDepthCoverage);
206
207 if (depthLayout == EldNone)
208 depthLayout = unit.depthLayout;
209 else if (depthLayout != unit.depthLayout)
210 error(infoSink, "Contradictory depth layouts");
211
212 MERGE_TRUE(depthReplacing);
213 MERGE_TRUE(hlslFunctionality1);
214
215 blendEquations |= unit.blendEquations;
216
217 MERGE_TRUE(xfbMode);
218
219 for (size_t b = 0; b < xfbBuffers.size(); ++b) {
220 if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
221 xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
222 else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
223 error(infoSink, "Contradictory xfb_stride");
224 xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
225 if (unit.xfbBuffers[b].containsDouble)
226 xfbBuffers[b].containsDouble = true;
227 // TODO: 4.4 link: enhanced layouts: compare ranges
228 }
229
230 MERGE_TRUE(multiStream);
231
232 #ifdef NV_EXTENSIONS
233 MERGE_TRUE(layoutOverrideCoverage);
234 MERGE_TRUE(geoPassthroughEXT);
235 #endif
236
237 for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
238 if (unit.shiftBinding[i] > 0)
239 setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
240 }
241
242 for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
243 for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
244 setShiftBindingForSet((TResourceType)i, it->second, it->first);
245 }
246
247 resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
248
249 MERGE_TRUE(autoMapBindings);
250 MERGE_TRUE(autoMapLocations);
251 MERGE_TRUE(invertY);
252 MERGE_TRUE(flattenUniformArrays);
253 MERGE_TRUE(useUnknownFormat);
254 MERGE_TRUE(hlslOffsets);
255 MERGE_TRUE(useStorageBuffer);
256 MERGE_TRUE(hlslIoMapping);
257
258 // TODO: sourceFile
259 // TODO: sourceText
260 // TODO: processes
261
262 MERGE_TRUE(needToLegalize);
263 MERGE_TRUE(binaryDoubleOutput);
264 MERGE_TRUE(usePhysicalStorageBuffer);
265 }
266
267 //
268 // Merge the 'unit' AST into 'this' AST.
269 // That includes rationalizing the unique IDs, which were set up independently,
270 // and might have overlaps that are not the same symbol, or might have different
271 // IDs for what should be the same shared symbol.
272 //
mergeTrees(TInfoSink & infoSink,TIntermediate & unit)273 void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
274 {
275 if (unit.treeRoot == nullptr)
276 return;
277
278 if (treeRoot == nullptr) {
279 treeRoot = unit.treeRoot;
280 return;
281 }
282
283 // Getting this far means we have two existing trees to merge...
284 #ifdef NV_EXTENSIONS
285 numShaderRecordNVBlocks += unit.numShaderRecordNVBlocks;
286 #endif
287
288 #ifdef NV_EXTENSIONS
289 numTaskNVBlocks += unit.numTaskNVBlocks;
290 #endif
291
292 // Get the top-level globals of each unit
293 TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
294 TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
295
296 // Get the linker-object lists
297 TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
298 const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
299
300 // Map by global name to unique ID to rationalize the same object having
301 // differing IDs in different trees.
302 TMap<TString, int> idMap;
303 int maxId;
304 seedIdMap(idMap, maxId);
305 remapIds(idMap, maxId + 1, unit);
306
307 mergeBodies(infoSink, globals, unitGlobals);
308 mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects);
309 ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
310 }
311
312 // Traverser that seeds an ID map with all built-ins, and tracks the
313 // maximum ID used.
314 // (It would be nice to put this in a function, but that causes warnings
315 // on having no bodies for the copy-constructor/operator=.)
316 class TBuiltInIdTraverser : public TIntermTraverser {
317 public:
TBuiltInIdTraverser(TMap<TString,int> & idMap)318 TBuiltInIdTraverser(TMap<TString, int>& idMap) : idMap(idMap), maxId(0) { }
319 // If it's a built in, add it to the map.
320 // Track the max ID.
visitSymbol(TIntermSymbol * symbol)321 virtual void visitSymbol(TIntermSymbol* symbol)
322 {
323 const TQualifier& qualifier = symbol->getType().getQualifier();
324 if (qualifier.builtIn != EbvNone)
325 idMap[symbol->getName()] = symbol->getId();
326 maxId = std::max(maxId, symbol->getId());
327 }
getMaxId() const328 int getMaxId() const { return maxId; }
329 protected:
330 TBuiltInIdTraverser(TBuiltInIdTraverser&);
331 TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
332 TMap<TString, int>& idMap;
333 int maxId;
334 };
335
336 // Traverser that seeds an ID map with non-builtins.
337 // (It would be nice to put this in a function, but that causes warnings
338 // on having no bodies for the copy-constructor/operator=.)
339 class TUserIdTraverser : public TIntermTraverser {
340 public:
TUserIdTraverser(TMap<TString,int> & idMap)341 TUserIdTraverser(TMap<TString, int>& idMap) : idMap(idMap) { }
342 // If its a non-built-in global, add it to the map.
visitSymbol(TIntermSymbol * symbol)343 virtual void visitSymbol(TIntermSymbol* symbol)
344 {
345 const TQualifier& qualifier = symbol->getType().getQualifier();
346 if (qualifier.builtIn == EbvNone)
347 idMap[symbol->getName()] = symbol->getId();
348 }
349
350 protected:
351 TUserIdTraverser(TUserIdTraverser&);
352 TUserIdTraverser& operator=(TUserIdTraverser&);
353 TMap<TString, int>& idMap; // over biggest id
354 };
355
356 // Initialize the the ID map with what we know of 'this' AST.
seedIdMap(TMap<TString,int> & idMap,int & maxId)357 void TIntermediate::seedIdMap(TMap<TString, int>& idMap, int& maxId)
358 {
359 // all built-ins everywhere need to align on IDs and contribute to the max ID
360 TBuiltInIdTraverser builtInIdTraverser(idMap);
361 treeRoot->traverse(&builtInIdTraverser);
362 maxId = builtInIdTraverser.getMaxId();
363
364 // user variables in the linker object list need to align on ids
365 TUserIdTraverser userIdTraverser(idMap);
366 findLinkerObjects()->traverse(&userIdTraverser);
367 }
368
369 // Traverser to map an AST ID to what was known from the seeding AST.
370 // (It would be nice to put this in a function, but that causes warnings
371 // on having no bodies for the copy-constructor/operator=.)
372 class TRemapIdTraverser : public TIntermTraverser {
373 public:
TRemapIdTraverser(const TMap<TString,int> & idMap,int idShift)374 TRemapIdTraverser(const TMap<TString, int>& idMap, int idShift) : idMap(idMap), idShift(idShift) { }
375 // Do the mapping:
376 // - if the same symbol, adopt the 'this' ID
377 // - otherwise, ensure a unique ID by shifting to a new space
visitSymbol(TIntermSymbol * symbol)378 virtual void visitSymbol(TIntermSymbol* symbol)
379 {
380 const TQualifier& qualifier = symbol->getType().getQualifier();
381 bool remapped = false;
382 if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
383 auto it = idMap.find(symbol->getName());
384 if (it != idMap.end()) {
385 symbol->changeId(it->second);
386 remapped = true;
387 }
388 }
389 if (!remapped)
390 symbol->changeId(symbol->getId() + idShift);
391 }
392 protected:
393 TRemapIdTraverser(TRemapIdTraverser&);
394 TRemapIdTraverser& operator=(TRemapIdTraverser&);
395 const TMap<TString, int>& idMap;
396 int idShift;
397 };
398
remapIds(const TMap<TString,int> & idMap,int idShift,TIntermediate & unit)399 void TIntermediate::remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate& unit)
400 {
401 // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
402 TRemapIdTraverser idTraverser(idMap, idShift);
403 unit.getTreeRoot()->traverse(&idTraverser);
404 }
405
406 //
407 // Merge the function bodies and global-level initializers from unitGlobals into globals.
408 // Will error check duplication of function bodies for the same signature.
409 //
mergeBodies(TInfoSink & infoSink,TIntermSequence & globals,const TIntermSequence & unitGlobals)410 void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
411 {
412 // TODO: link-time performance: Processing in alphabetical order will be faster
413
414 // Error check the global objects, not including the linker objects
415 for (unsigned int child = 0; child < globals.size() - 1; ++child) {
416 for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
417 TIntermAggregate* body = globals[child]->getAsAggregate();
418 TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
419 if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
420 error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
421 infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n";
422 }
423 }
424 }
425
426 // Merge the global objects, just in front of the linker objects
427 globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
428 }
429
430 //
431 // Merge the linker objects from unitLinkerObjects into linkerObjects.
432 // Duplication is expected and filtered out, but contradictions are an error.
433 //
mergeLinkerObjects(TInfoSink & infoSink,TIntermSequence & linkerObjects,const TIntermSequence & unitLinkerObjects)434 void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects)
435 {
436 // Error check and merge the linker objects (duplicates should not be created)
437 std::size_t initialNumLinkerObjects = linkerObjects.size();
438 for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
439 bool merge = true;
440 for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
441 TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
442 TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
443 assert(symbol && unitSymbol);
444 if (symbol->getName() == unitSymbol->getName()) {
445 // filter out copy
446 merge = false;
447
448 // but if one has an initializer and the other does not, update
449 // the initializer
450 if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
451 symbol->setConstArray(unitSymbol->getConstArray());
452
453 // Similarly for binding
454 if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
455 symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
456
457 // Update implicit array sizes
458 mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
459
460 // Check for consistent types/qualification/initializers etc.
461 mergeErrorCheck(infoSink, *symbol, *unitSymbol, false);
462 }
463 }
464 if (merge)
465 linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
466 }
467 }
468
469 // TODO 4.5 link functionality: cull distance array size checking
470
471 // Recursively merge the implicit array sizes through the objects' respective type trees.
mergeImplicitArraySizes(TType & type,const TType & unitType)472 void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
473 {
474 if (type.isUnsizedArray()) {
475 if (unitType.isUnsizedArray()) {
476 type.updateImplicitArraySize(unitType.getImplicitArraySize());
477 if (unitType.isArrayVariablyIndexed())
478 type.setArrayVariablyIndexed();
479 } else if (unitType.isSizedArray())
480 type.changeOuterArraySize(unitType.getOuterArraySize());
481 }
482
483 // Type mismatches are caught and reported after this, just be careful for now.
484 if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
485 return;
486
487 for (int i = 0; i < (int)type.getStruct()->size(); ++i)
488 mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
489 }
490
491 //
492 // Compare two global objects from two compilation units and see if they match
493 // well enough. Rules can be different for intra- vs. cross-stage matching.
494 //
495 // This function only does one of intra- or cross-stage matching per call.
496 //
mergeErrorCheck(TInfoSink & infoSink,const TIntermSymbol & symbol,const TIntermSymbol & unitSymbol,bool crossStage)497 void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage)
498 {
499 bool writeTypeComparison = false;
500
501 // Types have to match
502 if (symbol.getType() != unitSymbol.getType()) {
503 // but, we make an exception if one is an implicit array and the other is sized
504 if (! (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
505 symbol.getType().sameElementType(unitSymbol.getType()) &&
506 (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()))) {
507 error(infoSink, "Types must match:");
508 writeTypeComparison = true;
509 }
510 }
511
512 // Qualifiers have to (almost) match
513
514 // Storage...
515 if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
516 error(infoSink, "Storage qualifiers must match:");
517 writeTypeComparison = true;
518 }
519
520 // Precision...
521 if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
522 error(infoSink, "Precision qualifiers must match:");
523 writeTypeComparison = true;
524 }
525
526 // Invariance...
527 if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
528 error(infoSink, "Presence of invariant qualifier must match:");
529 writeTypeComparison = true;
530 }
531
532 // Precise...
533 if (! crossStage && symbol.getQualifier().noContraction != unitSymbol.getQualifier().noContraction) {
534 error(infoSink, "Presence of precise qualifier must match:");
535 writeTypeComparison = true;
536 }
537
538 // Auxiliary and interpolation...
539 if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid ||
540 symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth ||
541 symbol.getQualifier().flat != unitSymbol.getQualifier().flat ||
542 symbol.getQualifier().sample != unitSymbol.getQualifier().sample ||
543 symbol.getQualifier().patch != unitSymbol.getQualifier().patch ||
544 symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) {
545 error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
546 writeTypeComparison = true;
547 }
548
549 // Memory...
550 if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
551 symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent ||
552 symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent ||
553 symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
554 symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent ||
555 symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate ||
556 symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
557 symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
558 symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
559 symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
560 error(infoSink, "Memory qualifiers must match:");
561 writeTypeComparison = true;
562 }
563
564 // Layouts...
565 // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
566 // requires separate user-supplied offset from actual computed offset, but
567 // current implementation only has one offset.
568 if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix ||
569 symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking ||
570 symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation ||
571 symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent ||
572 symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex ||
573 symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding ||
574 (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) {
575 error(infoSink, "Layout qualification must match:");
576 writeTypeComparison = true;
577 }
578
579 // Initializers have to match, if both are present, and if we don't already know the types don't match
580 if (! writeTypeComparison) {
581 if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
582 if (symbol.getConstArray() != unitSymbol.getConstArray()) {
583 error(infoSink, "Initializers must match:");
584 infoSink.info << " " << symbol.getName() << "\n";
585 }
586 }
587 }
588
589 if (writeTypeComparison)
590 infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" <<
591 unitSymbol.getType().getCompleteString() << "\"\n";
592 }
593
594 //
595 // Do final link-time error checking of a complete (merged) intermediate representation.
596 // (Much error checking was done during merging).
597 //
598 // Also, lock in defaults of things not set, including array sizes.
599 //
finalCheck(TInfoSink & infoSink,bool keepUncalled)600 void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
601 {
602 if (getTreeRoot() == nullptr)
603 return;
604
605 if (numEntryPoints < 1) {
606 if (source == EShSourceGlsl)
607 error(infoSink, "Missing entry point: Each stage requires one entry point");
608 else
609 warn(infoSink, "Entry point not found");
610 }
611
612 if (numPushConstants > 1)
613 error(infoSink, "Only one push_constant block is allowed per stage");
614
615 // recursion and missing body checking
616 checkCallGraphCycles(infoSink);
617 checkCallGraphBodies(infoSink, keepUncalled);
618
619 // overlap/alias/missing I/O, etc.
620 inOutLocationCheck(infoSink);
621
622 // invocations
623 if (invocations == TQualifier::layoutNotSet)
624 invocations = 1;
625
626 if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
627 error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
628 if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
629 error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
630
631 if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
632 error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
633 if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
634 error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
635
636 for (size_t b = 0; b < xfbBuffers.size(); ++b) {
637 if (xfbBuffers[b].containsDouble)
638 RoundToPow2(xfbBuffers[b].implicitStride, 8);
639
640 // "It is a compile-time or link-time error to have
641 // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
642 // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
643 // compile-time or link-time error to have different values specified for the stride for the same buffer."
644 if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
645 error(infoSink, "xfb_stride is too small to hold all buffer entries:");
646 infoSink.info.prefix(EPrefixError);
647 infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
648 }
649 if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
650 xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
651
652 // "If the buffer is capturing any
653 // outputs with double-precision components, the stride must be a multiple of 8, otherwise it must be a
654 // multiple of 4, or a compile-time or link-time error results."
655 if (xfbBuffers[b].containsDouble && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
656 error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double:");
657 infoSink.info.prefix(EPrefixError);
658 infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
659 } else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
660 error(infoSink, "xfb_stride must be multiple of 4:");
661 infoSink.info.prefix(EPrefixError);
662 infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
663 }
664
665 // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
666 // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
667 if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
668 error(infoSink, "xfb_stride is too large:");
669 infoSink.info.prefix(EPrefixError);
670 infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n";
671 }
672 }
673
674 switch (language) {
675 case EShLangVertex:
676 break;
677 case EShLangTessControl:
678 if (vertices == TQualifier::layoutNotSet)
679 error(infoSink, "At least one shader must specify an output layout(vertices=...)");
680 break;
681 case EShLangTessEvaluation:
682 if (source == EShSourceGlsl) {
683 if (inputPrimitive == ElgNone)
684 error(infoSink, "At least one shader must specify an input layout primitive");
685 if (vertexSpacing == EvsNone)
686 vertexSpacing = EvsEqual;
687 if (vertexOrder == EvoNone)
688 vertexOrder = EvoCcw;
689 }
690 break;
691 case EShLangGeometry:
692 if (inputPrimitive == ElgNone)
693 error(infoSink, "At least one shader must specify an input layout primitive");
694 if (outputPrimitive == ElgNone)
695 error(infoSink, "At least one shader must specify an output layout primitive");
696 if (vertices == TQualifier::layoutNotSet)
697 error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
698 break;
699 case EShLangFragment:
700 // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
701 // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
702 // requiring explicit early_fragment_tests
703 if (getPostDepthCoverage() && !getEarlyFragmentTests())
704 error(infoSink, "post_depth_coverage requires early_fragment_tests");
705 break;
706 case EShLangCompute:
707 break;
708
709 #ifdef NV_EXTENSIONS
710 case EShLangRayGenNV:
711 case EShLangIntersectNV:
712 case EShLangAnyHitNV:
713 case EShLangClosestHitNV:
714 case EShLangMissNV:
715 case EShLangCallableNV:
716 if (numShaderRecordNVBlocks > 1)
717 error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
718 break;
719 case EShLangMeshNV:
720 // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
721 if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
722 error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
723 if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
724 error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
725 if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
726 error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
727 if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
728 error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
729 if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
730 error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
731 if (outputPrimitive == ElgNone)
732 error(infoSink, "At least one shader must specify an output layout primitive");
733 if (vertices == TQualifier::layoutNotSet)
734 error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
735 if (primitives == TQualifier::layoutNotSet)
736 error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
737 // fall through
738 case EShLangTaskNV:
739 if (numTaskNVBlocks > 1)
740 error(infoSink, "Only one taskNV interface block is allowed per shader");
741 break;
742 #endif
743
744 default:
745 error(infoSink, "Unknown Stage.");
746 break;
747 }
748
749 // Process the tree for any node-specific work.
750 class TFinalLinkTraverser : public TIntermTraverser {
751 public:
752 TFinalLinkTraverser() { }
753 virtual ~TFinalLinkTraverser() { }
754
755 virtual void visitSymbol(TIntermSymbol* symbol)
756 {
757 // Implicitly size arrays.
758 // If an unsized array is left as unsized, it effectively
759 // becomes run-time sized.
760 symbol->getWritableType().adoptImplicitArraySizes(false);
761 }
762 } finalLinkTraverser;
763
764 treeRoot->traverse(&finalLinkTraverser);
765 }
766
767 //
768 // See if the call graph contains any static recursion, which is disallowed
769 // by the specification.
770 //
checkCallGraphCycles(TInfoSink & infoSink)771 void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
772 {
773 // Clear fields we'll use for this.
774 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
775 call->visited = false;
776 call->currentPath = false;
777 call->errorGiven = false;
778 }
779
780 //
781 // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration.
782 //
783
784 TCall* newRoot;
785 do {
786 // See if we have unvisited parts of the graph.
787 newRoot = 0;
788 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
789 if (! call->visited) {
790 newRoot = &(*call);
791 break;
792 }
793 }
794
795 // If not, we are done.
796 if (! newRoot)
797 break;
798
799 // Otherwise, we found a new subgraph, process it:
800 // See what all can be reached by this new root, and if any of
801 // that is recursive. This is done by depth-first traversals, seeing
802 // if a new call is found that was already in the currentPath (a back edge),
803 // thereby detecting recursion.
804 std::list<TCall*> stack;
805 newRoot->currentPath = true; // currentPath will be true iff it is on the stack
806 stack.push_back(newRoot);
807 while (! stack.empty()) {
808 // get a caller
809 TCall* call = stack.back();
810
811 // Add to the stack just one callee.
812 // This algorithm always terminates, because only !visited and !currentPath causes a push
813 // and all pushes change currentPath to true, and all pops change visited to true.
814 TGraph::iterator child = callGraph.begin();
815 for (; child != callGraph.end(); ++child) {
816
817 // If we already visited this node, its whole subgraph has already been processed, so skip it.
818 if (child->visited)
819 continue;
820
821 if (call->callee == child->caller) {
822 if (child->currentPath) {
823 // Then, we found a back edge
824 if (! child->errorGiven) {
825 error(infoSink, "Recursion detected:");
826 infoSink.info << " " << call->callee << " calling " << child->callee << "\n";
827 child->errorGiven = true;
828 recursive = true;
829 }
830 } else {
831 child->currentPath = true;
832 stack.push_back(&(*child));
833 break;
834 }
835 }
836 }
837 if (child == callGraph.end()) {
838 // no more callees, we bottomed out, never look at this node again
839 stack.back()->currentPath = false;
840 stack.back()->visited = true;
841 stack.pop_back();
842 }
843 } // end while, meaning nothing left to process in this subtree
844
845 } while (newRoot); // redundant loop check; should always exit via the 'break' above
846 }
847
848 //
849 // See which functions are reachable from the entry point and which have bodies.
850 // Reachable ones with missing bodies are errors.
851 // Unreachable bodies are dead code.
852 //
checkCallGraphBodies(TInfoSink & infoSink,bool keepUncalled)853 void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
854 {
855 // Clear fields we'll use for this.
856 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
857 call->visited = false;
858 call->calleeBodyPosition = -1;
859 }
860
861 // The top level of the AST includes function definitions (bodies).
862 // Compare these to function calls in the call graph.
863 // We'll end up knowing which have bodies, and if so,
864 // how to map the call-graph node to the location in the AST.
865 TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
866 std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
867 for (int f = 0; f < (int)functionSequence.size(); ++f) {
868 glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
869 if (node && (node->getOp() == glslang::EOpFunction)) {
870 if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
871 reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
872 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
873 if (call->callee == node->getName())
874 call->calleeBodyPosition = f;
875 }
876 }
877 }
878
879 // Start call-graph traversal by visiting the entry point nodes.
880 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
881 if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
882 call->visited = true;
883 }
884
885 // Propagate 'visited' through the call-graph to every part of the graph it
886 // can reach (seeded with the entry-point setting above).
887 bool changed;
888 do {
889 changed = false;
890 for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
891 if (call1->visited) {
892 for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
893 if (! call2->visited) {
894 if (call1->callee == call2->caller) {
895 changed = true;
896 call2->visited = true;
897 }
898 }
899 }
900 }
901 }
902 } while (changed);
903
904 // Any call-graph node set to visited but without a callee body is an error.
905 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
906 if (call->visited) {
907 if (call->calleeBodyPosition == -1) {
908 error(infoSink, "No function definition (body) found: ");
909 infoSink.info << " " << call->callee << "\n";
910 } else
911 reachable[call->calleeBodyPosition] = true;
912 }
913 }
914
915 // Bodies in the AST not reached by the call graph are dead;
916 // clear them out, since they can't be reached and also can't
917 // be translated further due to possibility of being ill defined.
918 if (! keepUncalled) {
919 for (int f = 0; f < (int)functionSequence.size(); ++f) {
920 if (! reachable[f])
921 functionSequence[f] = nullptr;
922 }
923 functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
924 }
925 }
926
927 //
928 // Satisfy rules for location qualifiers on inputs and outputs
929 //
inOutLocationCheck(TInfoSink & infoSink)930 void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
931 {
932 // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
933 bool fragOutWithNoLocation = false;
934 int numFragOut = 0;
935
936 // TODO: linker functionality: location collision checking
937
938 TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
939 for (size_t i = 0; i < linkObjects.size(); ++i) {
940 const TType& type = linkObjects[i]->getAsTyped()->getType();
941 const TQualifier& qualifier = type.getQualifier();
942 if (language == EShLangFragment) {
943 if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
944 ++numFragOut;
945 if (!qualifier.hasAnyLocation())
946 fragOutWithNoLocation = true;
947 }
948 }
949 }
950
951 if (profile == EEsProfile) {
952 if (numFragOut > 1 && fragOutWithNoLocation)
953 error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
954 }
955 }
956
findLinkerObjects() const957 TIntermAggregate* TIntermediate::findLinkerObjects() const
958 {
959 // Get the top-level globals
960 TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
961
962 // Get the last member of the sequences, expected to be the linker-object lists
963 assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
964
965 return globals.back()->getAsAggregate();
966 }
967
968 // See if a variable was both a user-declared output and used.
969 // Note: the spec discusses writing to one, but this looks at read or write, which
970 // is more useful, and perhaps the spec should be changed to reflect that.
userOutputUsed() const971 bool TIntermediate::userOutputUsed() const
972 {
973 const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
974
975 bool found = false;
976 for (size_t i = 0; i < linkerObjects.size(); ++i) {
977 const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
978 if (symbolNode.getQualifier().storage == EvqVaryingOut &&
979 symbolNode.getName().compare(0, 3, "gl_") != 0 &&
980 inIoAccessed(symbolNode.getName())) {
981 found = true;
982 break;
983 }
984 }
985
986 return found;
987 }
988
989 // Accumulate locations used for inputs, outputs, and uniforms, and check for collisions
990 // as the accumulation is done.
991 //
992 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
993 //
994 // typeCollision is set to true if there is no direct collision, but the types in the same location
995 // are different.
996 //
addUsedLocation(const TQualifier & qualifier,const TType & type,bool & typeCollision)997 int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
998 {
999 typeCollision = false;
1000
1001 int set;
1002 if (qualifier.isPipeInput())
1003 set = 0;
1004 else if (qualifier.isPipeOutput())
1005 set = 1;
1006 else if (qualifier.storage == EvqUniform)
1007 set = 2;
1008 else if (qualifier.storage == EvqBuffer)
1009 set = 3;
1010 else
1011 return -1;
1012
1013 int size;
1014 if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
1015 if (type.isSizedArray())
1016 size = type.getCumulativeArraySize();
1017 else
1018 size = 1;
1019 } else {
1020 // Strip off the outer array dimension for those having an extra one.
1021 if (type.isArray() && qualifier.isArrayedIo(language)) {
1022 TType elementType(type, 0);
1023 size = computeTypeLocationSize(elementType, language);
1024 } else
1025 size = computeTypeLocationSize(type, language);
1026 }
1027
1028 // Locations, and components within locations.
1029 //
1030 // Almost always, dealing with components means a single location is involved.
1031 // The exception is a dvec3. From the spec:
1032 //
1033 // "A dvec3 will consume all four components of the first location and components 0 and 1 of
1034 // the second location. This leaves components 2 and 3 available for other component-qualified
1035 // declarations."
1036 //
1037 // That means, without ever mentioning a component, a component range
1038 // for a different location gets specified, if it's not a vertex shader input. (!)
1039 // (A vertex shader input will show using only one location, even for a dvec3/4.)
1040 //
1041 // So, for the case of dvec3, we need two independent ioRanges.
1042
1043 int collision = -1; // no collision
1044 if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
1045 (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
1046 // Dealing with dvec3 in/out split across two locations.
1047 // Need two io-ranges.
1048 // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
1049
1050 // First range:
1051 TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
1052 TRange componentRange(0, 3);
1053 TIoRange range(locationRange, componentRange, type.getBasicType(), 0);
1054
1055 // check for collisions
1056 collision = checkLocationRange(set, range, type, typeCollision);
1057 if (collision < 0) {
1058 usedIo[set].push_back(range);
1059
1060 // Second range:
1061 TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
1062 TRange componentRange2(0, 1);
1063 TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0);
1064
1065 // check for collisions
1066 collision = checkLocationRange(set, range2, type, typeCollision);
1067 if (collision < 0)
1068 usedIo[set].push_back(range2);
1069 }
1070 } else {
1071 // Not a dvec3 in/out split across two locations, generic path.
1072 // Need a single IO-range block.
1073
1074 TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
1075 TRange componentRange(0, 3);
1076 if (qualifier.hasComponent() || type.getVectorSize() > 0) {
1077 int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
1078 if (qualifier.hasComponent())
1079 componentRange.start = qualifier.layoutComponent;
1080 componentRange.last = componentRange.start + consumedComponents - 1;
1081 }
1082
1083 // combine location and component ranges
1084 TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0);
1085
1086 // check for collisions, except for vertex inputs on desktop targeting OpenGL
1087 if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
1088 collision = checkLocationRange(set, range, type, typeCollision);
1089
1090 if (collision < 0)
1091 usedIo[set].push_back(range);
1092 }
1093
1094 return collision;
1095 }
1096
1097 // Compare a new (the passed in) 'range' against the existing set, and see
1098 // if there are any collisions.
1099 //
1100 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1101 //
checkLocationRange(int set,const TIoRange & range,const TType & type,bool & typeCollision)1102 int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
1103 {
1104 for (size_t r = 0; r < usedIo[set].size(); ++r) {
1105 if (range.overlap(usedIo[set][r])) {
1106 // there is a collision; pick one
1107 return std::max(range.location.start, usedIo[set][r].location.start);
1108 } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) {
1109 // aliased-type mismatch
1110 typeCollision = true;
1111 return std::max(range.location.start, usedIo[set][r].location.start);
1112 }
1113 }
1114
1115 return -1; // no collision
1116 }
1117
1118 // Accumulate bindings and offsets, and check for collisions
1119 // as the accumulation is done.
1120 //
1121 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1122 //
addUsedOffsets(int binding,int offset,int numOffsets)1123 int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
1124 {
1125 TRange bindingRange(binding, binding);
1126 TRange offsetRange(offset, offset + numOffsets - 1);
1127 TOffsetRange range(bindingRange, offsetRange);
1128
1129 // check for collisions, except for vertex inputs on desktop
1130 for (size_t r = 0; r < usedAtomics.size(); ++r) {
1131 if (range.overlap(usedAtomics[r])) {
1132 // there is a collision; pick one
1133 return std::max(offset, usedAtomics[r].offset.start);
1134 }
1135 }
1136
1137 usedAtomics.push_back(range);
1138
1139 return -1; // no collision
1140 }
1141
1142 // Accumulate used constant_id values.
1143 //
1144 // Return false is one was already used.
addUsedConstantId(int id)1145 bool TIntermediate::addUsedConstantId(int id)
1146 {
1147 if (usedConstantId.find(id) != usedConstantId.end())
1148 return false;
1149
1150 usedConstantId.insert(id);
1151
1152 return true;
1153 }
1154
1155 // Recursively figure out how many locations are used up by an input or output type.
1156 // Return the size of type, as measured by "locations".
computeTypeLocationSize(const TType & type,EShLanguage stage)1157 int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
1158 {
1159 // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
1160 // consecutive locations..."
1161 if (type.isArray()) {
1162 // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1163 // TODO: are there valid cases of having an unsized array with a location? If so, running this code too early.
1164 TType elementType(type, 0);
1165 if (type.isSizedArray()
1166 #ifdef NV_EXTENSIONS
1167 && !type.getQualifier().isPerView()
1168 #endif
1169 )
1170 return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
1171 else {
1172 #ifdef NV_EXTENSIONS
1173 // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
1174 elementType.getQualifier().perViewNV = false;
1175 #endif
1176 return computeTypeLocationSize(elementType, stage);
1177 }
1178 }
1179
1180 // "The locations consumed by block and structure members are determined by applying the rules above
1181 // recursively..."
1182 if (type.isStruct()) {
1183 int size = 0;
1184 for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1185 TType memberType(type, member);
1186 size += computeTypeLocationSize(memberType, stage);
1187 }
1188 return size;
1189 }
1190
1191 // ES: "If a shader input is any scalar or vector type, it will consume a single location."
1192
1193 // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
1194 // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
1195 // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
1196 // consume only a single location, in all stages."
1197 if (type.isScalar())
1198 return 1;
1199 if (type.isVector()) {
1200 if (stage == EShLangVertex && type.getQualifier().isPipeInput())
1201 return 1;
1202 if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
1203 return 2;
1204 else
1205 return 1;
1206 }
1207
1208 // "If the declared input is an n x m single- or double-precision matrix, ...
1209 // The number of locations assigned for each matrix will be the same as
1210 // for an n-element array of m-component vectors..."
1211 if (type.isMatrix()) {
1212 TType columnType(type, 0);
1213 return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
1214 }
1215
1216 assert(0);
1217 return 1;
1218 }
1219
1220 // Same as computeTypeLocationSize but for uniforms
computeTypeUniformLocationSize(const TType & type)1221 int TIntermediate::computeTypeUniformLocationSize(const TType& type)
1222 {
1223 // "Individual elements of a uniform array are assigned
1224 // consecutive locations with the first element taking location
1225 // location."
1226 if (type.isArray()) {
1227 // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1228 TType elementType(type, 0);
1229 if (type.isSizedArray()) {
1230 return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
1231 } else {
1232 // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early.
1233 return computeTypeUniformLocationSize(elementType);
1234 }
1235 }
1236
1237 // "Each subsequent inner-most member or element gets incremental
1238 // locations for the entire structure or array."
1239 if (type.isStruct()) {
1240 int size = 0;
1241 for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1242 TType memberType(type, member);
1243 size += computeTypeUniformLocationSize(memberType);
1244 }
1245 return size;
1246 }
1247
1248 return 1;
1249 }
1250
1251 // Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
1252 //
1253 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1254 //
addXfbBufferOffset(const TType & type)1255 int TIntermediate::addXfbBufferOffset(const TType& type)
1256 {
1257 const TQualifier& qualifier = type.getQualifier();
1258
1259 assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
1260 TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
1261
1262 // compute the range
1263 unsigned int size = computeTypeXfbSize(type, buffer.containsDouble);
1264 buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
1265 TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
1266
1267 // check for collisions
1268 for (size_t r = 0; r < buffer.ranges.size(); ++r) {
1269 if (range.overlap(buffer.ranges[r])) {
1270 // there is a collision; pick an example to return
1271 return std::max(range.start, buffer.ranges[r].start);
1272 }
1273 }
1274
1275 buffer.ranges.push_back(range);
1276
1277 return -1; // no collision
1278 }
1279
1280 // Recursively figure out how many bytes of xfb buffer are used by the given type.
1281 // Return the size of type, in bytes.
1282 // Sets containsDouble to true if the type contains a double.
1283 // N.B. Caller must set containsDouble to false before calling.
computeTypeXfbSize(const TType & type,bool & containsDouble) const1284 unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& containsDouble) const
1285 {
1286 // "...if applied to an aggregate containing a double, the offset must also be a multiple of 8,
1287 // and the space taken in the buffer will be a multiple of 8.
1288 // ...within the qualified entity, subsequent components are each
1289 // assigned, in order, to the next available offset aligned to a multiple of
1290 // that component's size. Aggregate types are flattened down to the component
1291 // level to get this sequence of components."
1292
1293 if (type.isArray()) {
1294 // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1295 assert(type.isSizedArray());
1296 TType elementType(type, 0);
1297 return type.getOuterArraySize() * computeTypeXfbSize(elementType, containsDouble);
1298 }
1299
1300 if (type.isStruct()) {
1301 unsigned int size = 0;
1302 bool structContainsDouble = false;
1303 for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1304 TType memberType(type, member);
1305 // "... if applied to
1306 // an aggregate containing a double, the offset must also be a multiple of 8,
1307 // and the space taken in the buffer will be a multiple of 8."
1308 bool memberContainsDouble = false;
1309 int memberSize = computeTypeXfbSize(memberType, memberContainsDouble);
1310 if (memberContainsDouble) {
1311 structContainsDouble = true;
1312 RoundToPow2(size, 8);
1313 }
1314 size += memberSize;
1315 }
1316
1317 if (structContainsDouble) {
1318 containsDouble = true;
1319 RoundToPow2(size, 8);
1320 }
1321 return size;
1322 }
1323
1324 int numComponents;
1325 if (type.isScalar())
1326 numComponents = 1;
1327 else if (type.isVector())
1328 numComponents = type.getVectorSize();
1329 else if (type.isMatrix())
1330 numComponents = type.getMatrixCols() * type.getMatrixRows();
1331 else {
1332 assert(0);
1333 numComponents = 1;
1334 }
1335
1336 if (type.getBasicType() == EbtDouble) {
1337 containsDouble = true;
1338 return 8 * numComponents;
1339 } else
1340 return 4 * numComponents;
1341 }
1342
1343 const int baseAlignmentVec4Std140 = 16;
1344
1345 // Return the size and alignment of a component of the given type.
1346 // The size is returned in the 'size' parameter
1347 // Return value is the alignment..
getBaseAlignmentScalar(const TType & type,int & size)1348 int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
1349 {
1350 switch (type.getBasicType()) {
1351 case EbtInt64:
1352 case EbtUint64:
1353 case EbtDouble: size = 8; return 8;
1354 case EbtFloat16: size = 2; return 2;
1355 case EbtInt8:
1356 case EbtUint8: size = 1; return 1;
1357 case EbtInt16:
1358 case EbtUint16: size = 2; return 2;
1359 case EbtReference: size = 8; return 8;
1360 default: size = 4; return 4;
1361 }
1362 }
1363
1364 // Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
1365 // Operates recursively.
1366 //
1367 // If std140 is true, it does the rounding up to vec4 size required by std140,
1368 // otherwise it does not, yielding std430 rules.
1369 //
1370 // The size is returned in the 'size' parameter
1371 //
1372 // The stride is only non-0 for arrays or matrices, and is the stride of the
1373 // top-level object nested within the type. E.g., for an array of matrices,
1374 // it is the distances needed between matrices, despite the rules saying the
1375 // stride comes from the flattening down to vectors.
1376 //
1377 // Return value is the alignment of the type.
getBaseAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)1378 int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
1379 {
1380 int alignment;
1381
1382 bool std140 = layoutPacking == glslang::ElpStd140;
1383 // When using the std140 storage layout, structures will be laid out in buffer
1384 // storage with its members stored in monotonically increasing order based on their
1385 // location in the declaration. A structure and each structure member have a base
1386 // offset and a base alignment, from which an aligned offset is computed by rounding
1387 // the base offset up to a multiple of the base alignment. The base offset of the first
1388 // member of a structure is taken from the aligned offset of the structure itself. The
1389 // base offset of all other structure members is derived by taking the offset of the
1390 // last basic machine unit consumed by the previous member and adding one. Each
1391 // structure member is stored in memory at its aligned offset. The members of a top-
1392 // level uniform block are laid out in buffer storage by treating the uniform block as
1393 // a structure with a base offset of zero.
1394 //
1395 // 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
1396 //
1397 // 2. If the member is a two- or four-component vector with components consuming N basic
1398 // machine units, the base alignment is 2N or 4N, respectively.
1399 //
1400 // 3. If the member is a three-component vector with components consuming N
1401 // basic machine units, the base alignment is 4N.
1402 //
1403 // 4. If the member is an array of scalars or vectors, the base alignment and array
1404 // stride are set to match the base alignment of a single array element, according
1405 // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
1406 // array may have padding at the end; the base offset of the member following
1407 // the array is rounded up to the next multiple of the base alignment.
1408 //
1409 // 5. If the member is a column-major matrix with C columns and R rows, the
1410 // matrix is stored identically to an array of C column vectors with R
1411 // components each, according to rule (4).
1412 //
1413 // 6. If the member is an array of S column-major matrices with C columns and
1414 // R rows, the matrix is stored identically to a row of S X C column vectors
1415 // with R components each, according to rule (4).
1416 //
1417 // 7. If the member is a row-major matrix with C columns and R rows, the matrix
1418 // is stored identically to an array of R row vectors with C components each,
1419 // according to rule (4).
1420 //
1421 // 8. If the member is an array of S row-major matrices with C columns and R
1422 // rows, the matrix is stored identically to a row of S X R row vectors with C
1423 // components each, according to rule (4).
1424 //
1425 // 9. If the member is a structure, the base alignment of the structure is N , where
1426 // N is the largest base alignment value of any of its members, and rounded
1427 // up to the base alignment of a vec4. The individual members of this substructure
1428 // are then assigned offsets by applying this set of rules recursively,
1429 // where the base offset of the first member of the sub-structure is equal to the
1430 // aligned offset of the structure. The structure may have padding at the end;
1431 // the base offset of the member following the sub-structure is rounded up to
1432 // the next multiple of the base alignment of the structure.
1433 //
1434 // 10. If the member is an array of S structures, the S elements of the array are laid
1435 // out in order, according to rule (9).
1436 //
1437 // Assuming, for rule 10: The stride is the same as the size of an element.
1438
1439 stride = 0;
1440 int dummyStride;
1441
1442 // rules 4, 6, 8, and 10
1443 if (type.isArray()) {
1444 // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1445 TType derefType(type, 0);
1446 alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1447 if (std140)
1448 alignment = std::max(baseAlignmentVec4Std140, alignment);
1449 RoundToPow2(size, alignment);
1450 stride = size; // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
1451 // uses the assumption for rule 10 in the comment above
1452 size = stride * type.getOuterArraySize();
1453 return alignment;
1454 }
1455
1456 // rule 9
1457 if (type.getBasicType() == EbtStruct) {
1458 const TTypeList& memberList = *type.getStruct();
1459
1460 size = 0;
1461 int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
1462 for (size_t m = 0; m < memberList.size(); ++m) {
1463 int memberSize;
1464 // modify just the children's view of matrix layout, if there is one for this member
1465 TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
1466 int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
1467 (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
1468 maxAlignment = std::max(maxAlignment, memberAlignment);
1469 RoundToPow2(size, memberAlignment);
1470 size += memberSize;
1471 }
1472
1473 // The structure may have padding at the end; the base offset of
1474 // the member following the sub-structure is rounded up to the next
1475 // multiple of the base alignment of the structure.
1476 RoundToPow2(size, maxAlignment);
1477
1478 return maxAlignment;
1479 }
1480
1481 // rule 1
1482 if (type.isScalar())
1483 return getBaseAlignmentScalar(type, size);
1484
1485 // rules 2 and 3
1486 if (type.isVector()) {
1487 int scalarAlign = getBaseAlignmentScalar(type, size);
1488 switch (type.getVectorSize()) {
1489 case 1: // HLSL has this, GLSL does not
1490 return scalarAlign;
1491 case 2:
1492 size *= 2;
1493 return 2 * scalarAlign;
1494 default:
1495 size *= type.getVectorSize();
1496 return 4 * scalarAlign;
1497 }
1498 }
1499
1500 // rules 5 and 7
1501 if (type.isMatrix()) {
1502 // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
1503 TType derefType(type, 0, rowMajor);
1504
1505 alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1506 if (std140)
1507 alignment = std::max(baseAlignmentVec4Std140, alignment);
1508 RoundToPow2(size, alignment);
1509 stride = size; // use intra-matrix stride for stride of a just a matrix
1510 if (rowMajor)
1511 size = stride * type.getMatrixRows();
1512 else
1513 size = stride * type.getMatrixCols();
1514
1515 return alignment;
1516 }
1517
1518 assert(0); // all cases should be covered above
1519 size = baseAlignmentVec4Std140;
1520 return baseAlignmentVec4Std140;
1521 }
1522
1523 // To aid the basic HLSL rule about crossing vec4 boundaries.
improperStraddle(const TType & type,int size,int offset)1524 bool TIntermediate::improperStraddle(const TType& type, int size, int offset)
1525 {
1526 if (! type.isVector() || type.isArray())
1527 return false;
1528
1529 return size <= 16 ? offset / 16 != (offset + size - 1) / 16
1530 : offset % 16 != 0;
1531 }
1532
getScalarAlignment(const TType & type,int & size,int & stride,bool rowMajor)1533 int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
1534 {
1535 int alignment;
1536
1537 stride = 0;
1538 int dummyStride;
1539
1540 if (type.isArray()) {
1541 TType derefType(type, 0);
1542 alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
1543
1544 stride = size;
1545 RoundToPow2(stride, alignment);
1546
1547 size = stride * (type.getOuterArraySize() - 1) + size;
1548 return alignment;
1549 }
1550
1551 if (type.getBasicType() == EbtStruct) {
1552 const TTypeList& memberList = *type.getStruct();
1553
1554 size = 0;
1555 int maxAlignment = 0;
1556 for (size_t m = 0; m < memberList.size(); ++m) {
1557 int memberSize;
1558 // modify just the children's view of matrix layout, if there is one for this member
1559 TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
1560 int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
1561 (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
1562 maxAlignment = std::max(maxAlignment, memberAlignment);
1563 RoundToPow2(size, memberAlignment);
1564 size += memberSize;
1565 }
1566
1567 return maxAlignment;
1568 }
1569
1570 if (type.isScalar())
1571 return getBaseAlignmentScalar(type, size);
1572
1573 if (type.isVector()) {
1574 int scalarAlign = getBaseAlignmentScalar(type, size);
1575
1576 size *= type.getVectorSize();
1577 return scalarAlign;
1578 }
1579
1580 if (type.isMatrix()) {
1581 TType derefType(type, 0, rowMajor);
1582
1583 alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
1584
1585 stride = size; // use intra-matrix stride for stride of a just a matrix
1586 if (rowMajor)
1587 size = stride * type.getMatrixRows();
1588 else
1589 size = stride * type.getMatrixCols();
1590
1591 return alignment;
1592 }
1593
1594 assert(0); // all cases should be covered above
1595 size = 1;
1596 return 1;
1597 }
1598
getMemberAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)1599 int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
1600 {
1601 if (layoutPacking == glslang::ElpScalar) {
1602 return getScalarAlignment(type, size, stride, rowMajor);
1603 } else {
1604 return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
1605 }
1606 }
1607
1608 } // end namespace glslang
1609