• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2013 LunarG, Inc.
3 // Copyright (C) 2017 ARM Limited.
4 // Copyright (C) 2015-2018 Google, Inc.
5 //
6 // All rights reserved.
7 //
8 // Redistribution and use in source and binary forms, with or without
9 // modification, are permitted provided that the following conditions
10 // are met:
11 //
12 //    Redistributions of source code must retain the above copyright
13 //    notice, this list of conditions and the following disclaimer.
14 //
15 //    Redistributions in binary form must reproduce the above
16 //    copyright notice, this list of conditions and the following
17 //    disclaimer in the documentation and/or other materials provided
18 //    with the distribution.
19 //
20 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21 //    contributors may be used to endorse or promote products derived
22 //    from this software without specific prior written permission.
23 //
24 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 // POSSIBILITY OF SUCH DAMAGE.
36 //
37 
38 //
39 // Do link-time merging and validation of intermediate representations.
40 //
41 // Basic model is that during compilation, each compilation unit (shader) is
42 // compiled into one TIntermediate instance.  Then, at link time, multiple
43 // units for the same stage can be merged together, which can generate errors.
44 // Then, after all merging, a single instance of TIntermediate represents
45 // the whole stage.  A final error check can be done on the resulting stage,
46 // even if no merging was done (i.e., the stage was only one compilation unit).
47 //
48 
49 #include "localintermediate.h"
50 #include "../Include/InfoSink.h"
51 #include "SymbolTable.h"
52 
53 namespace glslang {
54 
55 //
56 // Link-time error emitter.
57 //
error(TInfoSink & infoSink,const char * message,EShLanguage unitStage)58 void TIntermediate::error(TInfoSink& infoSink, const char* message, EShLanguage unitStage)
59 {
60     infoSink.info.prefix(EPrefixError);
61     if (unitStage < EShLangCount)
62         infoSink.info << "Linking " << StageName(getStage()) << " and " << StageName(unitStage) << " stages: " << message << "\n";
63     else
64         infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
65 
66     ++numErrors;
67 }
68 
69 // Link-time warning.
warn(TInfoSink & infoSink,const char * message,EShLanguage unitStage)70 void TIntermediate::warn(TInfoSink& infoSink, const char* message, EShLanguage unitStage)
71 {
72     infoSink.info.prefix(EPrefixWarning);
73     if (unitStage < EShLangCount)
74         infoSink.info << "Linking " << StageName(language) << " and " << StageName(unitStage) << " stages: " << message << "\n";
75     else
76         infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
77 }
78 
79 // TODO: 4.4 offset/align:  "Two blocks linked together in the same program with the same block
80 // name must have the exact same set of members qualified with offset and their integral-constant
81 // expression values must be the same, or a link-time error results."
82 
83 //
84 // Merge the information from 'unit' into 'this'
85 //
merge(TInfoSink & infoSink,TIntermediate & unit)86 void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
87 {
88     mergeCallGraphs(infoSink, unit);
89     mergeModes(infoSink, unit);
90     mergeTrees(infoSink, unit);
91 }
92 
93 //
94 // check that link objects between stages
95 //
mergeUniformObjects(TInfoSink & infoSink,TIntermediate & unit)96 void TIntermediate::mergeUniformObjects(TInfoSink& infoSink, TIntermediate& unit) {
97     if (unit.treeRoot == nullptr || treeRoot == nullptr)
98         return;
99 
100     // Get the linker-object lists
101     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
102     TIntermSequence unitLinkerObjects = unit.findLinkerObjects()->getSequence();
103 
104     // filter unitLinkerObjects to only contain uniforms
105     auto end = std::remove_if(unitLinkerObjects.begin(), unitLinkerObjects.end(),
106         [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqUniform &&
107                                       node->getAsSymbolNode()->getQualifier().storage != EvqBuffer; });
108     unitLinkerObjects.resize(end - unitLinkerObjects.begin());
109 
110     // merge uniforms and do error checking
111     bool mergeExistingOnly = false;
112     mergeGlobalUniformBlocks(infoSink, unit, mergeExistingOnly);
113     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
114 }
115 
116 //
117 // do error checking on the shader boundary in / out vars
118 //
checkStageIO(TInfoSink & infoSink,TIntermediate & unit)119 void TIntermediate::checkStageIO(TInfoSink& infoSink, TIntermediate& unit) {
120     if (unit.treeRoot == nullptr || treeRoot == nullptr)
121         return;
122 
123     // Get copies of the linker-object lists
124     TIntermSequence linkerObjects = findLinkerObjects()->getSequence();
125     TIntermSequence unitLinkerObjects = unit.findLinkerObjects()->getSequence();
126 
127     // filter linkerObjects to only contain out variables
128     auto end = std::remove_if(linkerObjects.begin(), linkerObjects.end(),
129         [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqVaryingOut; });
130     linkerObjects.resize(end - linkerObjects.begin());
131 
132     // filter unitLinkerObjects to only contain in variables
133     auto unitEnd = std::remove_if(unitLinkerObjects.begin(), unitLinkerObjects.end(),
134         [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqVaryingIn; });
135     unitLinkerObjects.resize(unitEnd - unitLinkerObjects.begin());
136 
137     // do matching and error checking
138     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
139 
140     // TODO: final check; make sure that any statically used `in` have matching `out` written to
141 }
142 
mergeCallGraphs(TInfoSink & infoSink,TIntermediate & unit)143 void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
144 {
145     if (unit.getNumEntryPoints() > 0) {
146         if (getNumEntryPoints() > 0)
147             error(infoSink, "can't handle multiple entry points per stage");
148         else {
149             entryPointName = unit.getEntryPointName();
150             entryPointMangledName = unit.getEntryPointMangledName();
151         }
152     }
153     numEntryPoints += unit.getNumEntryPoints();
154 
155     callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
156 }
157 
158 #define MERGE_MAX(member) member = std::max(member, unit.member)
159 #define MERGE_TRUE(member) if (unit.member) member = unit.member;
160 
mergeModes(TInfoSink & infoSink,TIntermediate & unit)161 void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
162 {
163     if (language != unit.language)
164         error(infoSink, "stages must match when linking into a single stage");
165 
166     if (getSource() == EShSourceNone)
167         setSource(unit.getSource());
168     if (getSource() != unit.getSource())
169         error(infoSink, "can't link compilation units from different source languages");
170 
171     if (treeRoot == nullptr) {
172         profile = unit.profile;
173         version = unit.version;
174         requestedExtensions = unit.requestedExtensions;
175     } else {
176         if ((isEsProfile()) != (unit.isEsProfile()))
177             error(infoSink, "Cannot cross link ES and desktop profiles");
178         else if (unit.profile == ECompatibilityProfile)
179             profile = ECompatibilityProfile;
180         version = std::max(version, unit.version);
181         requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
182     }
183 
184     MERGE_MAX(spvVersion.spv);
185     MERGE_MAX(spvVersion.vulkanGlsl);
186     MERGE_MAX(spvVersion.vulkan);
187     MERGE_MAX(spvVersion.openGl);
188     MERGE_TRUE(spvVersion.vulkanRelaxed);
189 
190     numErrors += unit.getNumErrors();
191     // Only one push_constant is allowed, mergeLinkerObjects() will ensure the push_constant
192     // is the same for all units.
193     if (numPushConstants > 1 || unit.numPushConstants > 1)
194         error(infoSink, "Only one push_constant block is allowed per stage");
195     numPushConstants = std::min(numPushConstants + unit.numPushConstants, 1);
196 
197     if (unit.invocations != TQualifier::layoutNotSet) {
198         if (invocations == TQualifier::layoutNotSet)
199             invocations = unit.invocations;
200         else if (invocations != unit.invocations)
201             error(infoSink, "number of invocations must match between compilation units");
202     }
203 
204     if (vertices == TQualifier::layoutNotSet)
205         vertices = unit.vertices;
206     else if (unit.vertices != TQualifier::layoutNotSet && vertices != unit.vertices) {
207         if (language == EShLangGeometry || language == EShLangMesh)
208             error(infoSink, "Contradictory layout max_vertices values");
209         else if (language == EShLangTessControl)
210             error(infoSink, "Contradictory layout vertices values");
211         else
212             assert(0);
213     }
214     if (primitives == TQualifier::layoutNotSet)
215         primitives = unit.primitives;
216     else if (primitives != unit.primitives) {
217         if (language == EShLangMesh)
218             error(infoSink, "Contradictory layout max_primitives values");
219         else
220             assert(0);
221     }
222 
223     if (inputPrimitive == ElgNone)
224         inputPrimitive = unit.inputPrimitive;
225     else if (unit.inputPrimitive != ElgNone && inputPrimitive != unit.inputPrimitive)
226         error(infoSink, "Contradictory input layout primitives");
227 
228     if (outputPrimitive == ElgNone)
229         outputPrimitive = unit.outputPrimitive;
230     else if (unit.outputPrimitive != ElgNone && outputPrimitive != unit.outputPrimitive)
231         error(infoSink, "Contradictory output layout primitives");
232 
233     if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
234         error(infoSink, "gl_FragCoord redeclarations must match across shaders");
235 
236     if (vertexSpacing == EvsNone)
237         vertexSpacing = unit.vertexSpacing;
238     else if (vertexSpacing != unit.vertexSpacing)
239         error(infoSink, "Contradictory input vertex spacing");
240 
241     if (vertexOrder == EvoNone)
242         vertexOrder = unit.vertexOrder;
243     else if (vertexOrder != unit.vertexOrder)
244         error(infoSink, "Contradictory triangle ordering");
245 
246     MERGE_TRUE(pointMode);
247 
248     for (int i = 0; i < 3; ++i) {
249         if (unit.localSizeNotDefault[i]) {
250             if (!localSizeNotDefault[i]) {
251                 localSize[i] = unit.localSize[i];
252                 localSizeNotDefault[i] = true;
253             }
254             else if (localSize[i] != unit.localSize[i])
255                 error(infoSink, "Contradictory local size");
256         }
257 
258         if (localSizeSpecId[i] == TQualifier::layoutNotSet)
259             localSizeSpecId[i] = unit.localSizeSpecId[i];
260         else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
261             error(infoSink, "Contradictory local size specialization ids");
262     }
263 
264     MERGE_TRUE(earlyFragmentTests);
265     MERGE_TRUE(postDepthCoverage);
266     MERGE_TRUE(nonCoherentColorAttachmentReadEXT);
267     MERGE_TRUE(nonCoherentDepthAttachmentReadEXT);
268     MERGE_TRUE(nonCoherentStencilAttachmentReadEXT);
269 
270     if (depthLayout == EldNone)
271         depthLayout = unit.depthLayout;
272     else if (depthLayout != unit.depthLayout)
273         error(infoSink, "Contradictory depth layouts");
274 
275     MERGE_TRUE(depthReplacing);
276     MERGE_TRUE(hlslFunctionality1);
277 
278     blendEquations |= unit.blendEquations;
279 
280     MERGE_TRUE(xfbMode);
281 
282     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
283         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
284             xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
285         else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
286             error(infoSink, "Contradictory xfb_stride");
287         xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
288         if (unit.xfbBuffers[b].contains64BitType)
289             xfbBuffers[b].contains64BitType = true;
290         if (unit.xfbBuffers[b].contains32BitType)
291             xfbBuffers[b].contains32BitType = true;
292         if (unit.xfbBuffers[b].contains16BitType)
293             xfbBuffers[b].contains16BitType = true;
294         // TODO: 4.4 link: enhanced layouts: compare ranges
295     }
296 
297     MERGE_TRUE(multiStream);
298     MERGE_TRUE(layoutOverrideCoverage);
299     MERGE_TRUE(geoPassthroughEXT);
300 
301     for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
302         if (unit.shiftBinding[i] > 0)
303             setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
304     }
305 
306     for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
307         for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
308             setShiftBindingForSet((TResourceType)i, it->second, it->first);
309     }
310 
311     resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
312 
313     MERGE_TRUE(autoMapBindings);
314     MERGE_TRUE(autoMapLocations);
315     MERGE_TRUE(invertY);
316     MERGE_TRUE(dxPositionW);
317     MERGE_TRUE(debugInfo);
318     MERGE_TRUE(flattenUniformArrays);
319     MERGE_TRUE(useUnknownFormat);
320     MERGE_TRUE(hlslOffsets);
321     MERGE_TRUE(useStorageBuffer);
322     MERGE_TRUE(invariantAll);
323     MERGE_TRUE(hlslIoMapping);
324 
325     // TODO: sourceFile
326     // TODO: sourceText
327     // TODO: processes
328 
329     MERGE_TRUE(needToLegalize);
330     MERGE_TRUE(binaryDoubleOutput);
331     MERGE_TRUE(usePhysicalStorageBuffer);
332 }
333 
334 //
335 // Merge the 'unit' AST into 'this' AST.
336 // That includes rationalizing the unique IDs, which were set up independently,
337 // and might have overlaps that are not the same symbol, or might have different
338 // IDs for what should be the same shared symbol.
339 //
mergeTrees(TInfoSink & infoSink,TIntermediate & unit)340 void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
341 {
342     if (unit.treeRoot == nullptr)
343         return;
344 
345     if (treeRoot == nullptr) {
346         treeRoot = unit.treeRoot;
347         return;
348     }
349 
350     // Getting this far means we have two existing trees to merge...
351     numShaderRecordBlocks += unit.numShaderRecordBlocks;
352     numTaskNVBlocks += unit.numTaskNVBlocks;
353 
354     // Get the top-level globals of each unit
355     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
356     TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
357 
358     // Get the linker-object lists
359     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
360     const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
361 
362     // Map by global name to unique ID to rationalize the same object having
363     // differing IDs in different trees.
364     TIdMaps idMaps;
365     long long idShift;
366     seedIdMap(idMaps, idShift);
367     remapIds(idMaps, idShift + 1, unit);
368 
369     mergeBodies(infoSink, globals, unitGlobals);
370     bool mergeExistingOnly = false;
371     mergeGlobalUniformBlocks(infoSink, unit, mergeExistingOnly);
372     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
373     ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
374 }
375 
getNameForIdMap(TIntermSymbol * symbol)376 static const TString& getNameForIdMap(TIntermSymbol* symbol)
377 {
378     TShaderInterface si = symbol->getType().getShaderInterface();
379     if (si == EsiNone)
380         return symbol->getName();
381     else
382         return symbol->getType().getTypeName();
383 }
384 
385 
386 
387 // Traverser that seeds an ID map with all built-ins, and tracks the
388 // maximum ID used, currently using (maximum ID + 1) as new symbol id shift seed.
389 // Level id will keep same after shifting.
390 // (It would be nice to put this in a function, but that causes warnings
391 // on having no bodies for the copy-constructor/operator=.)
392 class TBuiltInIdTraverser : public TIntermTraverser {
393 public:
TBuiltInIdTraverser(TIdMaps & idMaps)394     TBuiltInIdTraverser(TIdMaps& idMaps) : idMaps(idMaps), idShift(0) { }
395     // If it's a built in, add it to the map.
visitSymbol(TIntermSymbol * symbol)396     virtual void visitSymbol(TIntermSymbol* symbol)
397     {
398         const TQualifier& qualifier = symbol->getType().getQualifier();
399         if (qualifier.builtIn != EbvNone) {
400             TShaderInterface si = symbol->getType().getShaderInterface();
401             idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
402         }
403         idShift = (symbol->getId() & ~TSymbolTable::uniqueIdMask) |
404                 std::max(idShift & TSymbolTable::uniqueIdMask,
405                          symbol->getId() & TSymbolTable::uniqueIdMask);
406     }
getIdShift() const407     long long getIdShift() const { return idShift; }
408 protected:
409     TBuiltInIdTraverser(TBuiltInIdTraverser&);
410     TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
411     TIdMaps& idMaps;
412     long long idShift;
413 };
414 
415 // Traverser that seeds an ID map with non-builtins.
416 // (It would be nice to put this in a function, but that causes warnings
417 // on having no bodies for the copy-constructor/operator=.)
418 class TUserIdTraverser : public TIntermTraverser {
419 public:
TUserIdTraverser(TIdMaps & idMaps)420     TUserIdTraverser(TIdMaps& idMaps) : idMaps(idMaps) { }
421     // If its a non-built-in global, add it to the map.
visitSymbol(TIntermSymbol * symbol)422     virtual void visitSymbol(TIntermSymbol* symbol)
423     {
424         const TQualifier& qualifier = symbol->getType().getQualifier();
425         if (qualifier.builtIn == EbvNone) {
426             TShaderInterface si = symbol->getType().getShaderInterface();
427             idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
428         }
429     }
430 
431 protected:
432     TUserIdTraverser(TUserIdTraverser&);
433     TUserIdTraverser& operator=(TUserIdTraverser&);
434     TIdMaps& idMaps; // over biggest id
435 };
436 
437 // Initialize the the ID map with what we know of 'this' AST.
seedIdMap(TIdMaps & idMaps,long long & idShift)438 void TIntermediate::seedIdMap(TIdMaps& idMaps, long long& idShift)
439 {
440     // all built-ins everywhere need to align on IDs and contribute to the max ID
441     TBuiltInIdTraverser builtInIdTraverser(idMaps);
442     treeRoot->traverse(&builtInIdTraverser);
443     idShift = builtInIdTraverser.getIdShift() & TSymbolTable::uniqueIdMask;
444 
445     // user variables in the linker object list need to align on ids
446     TUserIdTraverser userIdTraverser(idMaps);
447     findLinkerObjects()->traverse(&userIdTraverser);
448 }
449 
450 // Traverser to map an AST ID to what was known from the seeding AST.
451 // (It would be nice to put this in a function, but that causes warnings
452 // on having no bodies for the copy-constructor/operator=.)
453 class TRemapIdTraverser : public TIntermTraverser {
454 public:
TRemapIdTraverser(const TIdMaps & idMaps,long long idShift)455     TRemapIdTraverser(const TIdMaps& idMaps, long long idShift) : idMaps(idMaps), idShift(idShift) { }
456     // Do the mapping:
457     //  - if the same symbol, adopt the 'this' ID
458     //  - otherwise, ensure a unique ID by shifting to a new space
visitSymbol(TIntermSymbol * symbol)459     virtual void visitSymbol(TIntermSymbol* symbol)
460     {
461         const TQualifier& qualifier = symbol->getType().getQualifier();
462         bool remapped = false;
463         if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
464             TShaderInterface si = symbol->getType().getShaderInterface();
465             auto it = idMaps[si].find(getNameForIdMap(symbol));
466             if (it != idMaps[si].end()) {
467                 uint64_t id = (symbol->getId() & ~TSymbolTable::uniqueIdMask) |
468                     (it->second & TSymbolTable::uniqueIdMask);
469                 symbol->changeId(id);
470                 remapped = true;
471             }
472         }
473         if (!remapped)
474             symbol->changeId(symbol->getId() + idShift);
475     }
476 protected:
477     TRemapIdTraverser(TRemapIdTraverser&);
478     TRemapIdTraverser& operator=(TRemapIdTraverser&);
479     const TIdMaps& idMaps;
480     long long idShift;
481 };
482 
remapIds(const TIdMaps & idMaps,long long idShift,TIntermediate & unit)483 void TIntermediate::remapIds(const TIdMaps& idMaps, long long idShift, TIntermediate& unit)
484 {
485     // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
486     TRemapIdTraverser idTraverser(idMaps, idShift);
487     unit.getTreeRoot()->traverse(&idTraverser);
488 }
489 
490 //
491 // Merge the function bodies and global-level initializers from unitGlobals into globals.
492 // Will error check duplication of function bodies for the same signature.
493 //
mergeBodies(TInfoSink & infoSink,TIntermSequence & globals,const TIntermSequence & unitGlobals)494 void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
495 {
496     // TODO: link-time performance: Processing in alphabetical order will be faster
497 
498     // Error check the global objects, not including the linker objects
499     for (unsigned int child = 0; child < globals.size() - 1; ++child) {
500         for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
501             TIntermAggregate* body = globals[child]->getAsAggregate();
502             TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
503             if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
504                 error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
505                 infoSink.info << "    " << globals[child]->getAsAggregate()->getName() << "\n";
506             }
507         }
508     }
509 
510     // Merge the global objects, just in front of the linker objects
511     globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
512 }
513 
isSameInterface(TIntermSymbol * symbol,EShLanguage stage,TIntermSymbol * unitSymbol,EShLanguage unitStage)514 static inline bool isSameInterface(TIntermSymbol* symbol, EShLanguage stage, TIntermSymbol* unitSymbol, EShLanguage unitStage) {
515     return // 1) same stage and same shader interface
516         (stage == unitStage && symbol->getType().getShaderInterface() == unitSymbol->getType().getShaderInterface()) ||
517         // 2) accross stages and both are uniform or buffer
518         (symbol->getQualifier().storage == EvqUniform  && unitSymbol->getQualifier().storage == EvqUniform) ||
519         (symbol->getQualifier().storage == EvqBuffer   && unitSymbol->getQualifier().storage == EvqBuffer) ||
520         // 3) in/out matched across stage boundary
521         (stage < unitStage && symbol->getQualifier().storage == EvqVaryingOut  && unitSymbol->getQualifier().storage == EvqVaryingIn) ||
522         (unitStage < stage && symbol->getQualifier().storage == EvqVaryingIn && unitSymbol->getQualifier().storage == EvqVaryingOut);
523 }
524 
525 //
526 // Global Unfiform block stores any default uniforms (i.e. uniforms without a block)
527 // If two linked stages declare the same member, they are meant to be the same uniform
528 // and need to be in the same block
529 // merge the members of different stages to allow them to be linked properly
530 // as a single block
531 //
mergeGlobalUniformBlocks(TInfoSink & infoSink,TIntermediate & unit,bool mergeExistingOnly)532 void TIntermediate::mergeGlobalUniformBlocks(TInfoSink& infoSink, TIntermediate& unit, bool mergeExistingOnly)
533 {
534     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
535     TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
536 
537     // build lists of default blocks from the intermediates
538     TIntermSequence defaultBlocks;
539     TIntermSequence unitDefaultBlocks;
540 
541     auto filter = [](TIntermSequence& list, TIntermNode* node) {
542         if (node->getAsSymbolNode()->getQualifier().defaultBlock) {
543             list.push_back(node);
544         }
545     };
546 
547     std::for_each(linkerObjects.begin(), linkerObjects.end(),
548         [&defaultBlocks, &filter](TIntermNode* node) {
549             filter(defaultBlocks, node);
550         });
551     std::for_each(unitLinkerObjects.begin(), unitLinkerObjects.end(),
552         [&unitDefaultBlocks, &filter](TIntermNode* node) {
553             filter(unitDefaultBlocks, node);
554     });
555 
556     auto itUnitBlock = unitDefaultBlocks.begin();
557     for (; itUnitBlock != unitDefaultBlocks.end(); itUnitBlock++) {
558 
559         bool add = !mergeExistingOnly;
560         auto itBlock = defaultBlocks.begin();
561 
562         for (; itBlock != defaultBlocks.end(); itBlock++) {
563             TIntermSymbol* block = (*itBlock)->getAsSymbolNode();
564             TIntermSymbol* unitBlock = (*itUnitBlock)->getAsSymbolNode();
565 
566             assert(block && unitBlock);
567 
568             // if the two default blocks match, then merge their definitions
569             if (block->getType().getTypeName() == unitBlock->getType().getTypeName() &&
570                 block->getQualifier().storage == unitBlock->getQualifier().storage) {
571                 add = false;
572                 mergeBlockDefinitions(infoSink, block, unitBlock, &unit);
573             }
574         }
575         if (add) {
576             // push back on original list; won't change the size of the list we're iterating over
577             linkerObjects.push_back(*itUnitBlock);
578         }
579     }
580 }
581 
mergeBlockDefinitions(TInfoSink & infoSink,TIntermSymbol * block,TIntermSymbol * unitBlock,TIntermediate * unit)582 void TIntermediate::mergeBlockDefinitions(TInfoSink& infoSink, TIntermSymbol* block, TIntermSymbol* unitBlock, TIntermediate* unit) {
583 
584     if (block->getType().getTypeName() != unitBlock->getType().getTypeName() ||
585         block->getType().getBasicType() != unitBlock->getType().getBasicType() ||
586         block->getQualifier().storage != unitBlock->getQualifier().storage ||
587         block->getQualifier().layoutSet != unitBlock->getQualifier().layoutSet) {
588         // different block names likely means different blocks
589         return;
590     }
591 
592     // merge the struct
593     // order of declarations doesn't matter and they matched based on member name
594     TTypeList* memberList = block->getType().getWritableStruct();
595     TTypeList* unitMemberList = unitBlock->getType().getWritableStruct();
596 
597     // keep track of which members have changed position
598     // so we don't have to search the array again
599     std::map<unsigned int, unsigned int> memberIndexUpdates;
600 
601     size_t memberListStartSize = memberList->size();
602     for (unsigned int i = 0; i < unitMemberList->size(); ++i) {
603         bool merge = true;
604         for (unsigned int j = 0; j < memberListStartSize; ++j) {
605             if ((*memberList)[j].type->getFieldName() == (*unitMemberList)[i].type->getFieldName()) {
606                 merge = false;
607                 const TType* memberType = (*memberList)[j].type;
608                 const TType* unitMemberType = (*unitMemberList)[i].type;
609 
610                 // compare types
611                 // don't need as many checks as when merging symbols, since
612                 // initializers and most qualifiers are stripped when the member is moved into the block
613                 if ((*memberType) != (*unitMemberType)) {
614                     error(infoSink, "Types must match:");
615                     infoSink.info << "    " << memberType->getFieldName() << ": ";
616                     infoSink.info << "\"" << memberType->getCompleteString() << "\" versus ";
617                     infoSink.info << "\"" << unitMemberType->getCompleteString() << "\"\n";
618                 }
619 
620                 memberIndexUpdates[i] = j;
621             }
622         }
623         if (merge) {
624             memberList->push_back((*unitMemberList)[i]);
625             memberIndexUpdates[i] = (unsigned int)memberList->size() - 1;
626         }
627     }
628 
629     // update symbol node in unit tree,
630     // and other nodes that may reference it
631     class TMergeBlockTraverser : public TIntermTraverser {
632     public:
633         TMergeBlockTraverser(const TIntermSymbol* newSym)
634             : newSymbol(newSym), newType(nullptr), unit(nullptr), memberIndexUpdates(nullptr)
635         {
636         }
637         TMergeBlockTraverser(const TIntermSymbol* newSym, const glslang::TType* unitType, glslang::TIntermediate* unit,
638                              const std::map<unsigned int, unsigned int>* memberIdxUpdates)
639             : TIntermTraverser(false, true), newSymbol(newSym), newType(unitType), unit(unit), memberIndexUpdates(memberIdxUpdates)
640         {
641         }
642         virtual ~TMergeBlockTraverser() {}
643 
644         const TIntermSymbol* newSymbol;
645         const glslang::TType* newType; // shallow copy of the new type
646         glslang::TIntermediate* unit;   // intermediate that is being updated
647         const std::map<unsigned int, unsigned int>* memberIndexUpdates;
648 
649         virtual void visitSymbol(TIntermSymbol* symbol)
650         {
651             if (newSymbol->getAccessName() == symbol->getAccessName() &&
652                 newSymbol->getQualifier().getBlockStorage() == symbol->getQualifier().getBlockStorage()) {
653                 // Each symbol node may have a local copy of the block structure.
654                 // Update those structures to match the new one post-merge
655                 *(symbol->getWritableType().getWritableStruct()) = *(newSymbol->getType().getStruct());
656             }
657         }
658 
659         virtual bool visitBinary(TVisit, glslang::TIntermBinary* node)
660         {
661             if (!unit || !newType || !memberIndexUpdates || memberIndexUpdates->empty())
662                 return true;
663 
664             if (node->getOp() == EOpIndexDirectStruct && node->getLeft()->getType() == *newType) {
665                 // this is a dereference to a member of the block since the
666                 // member list changed, need to update this to point to the
667                 // right index
668                 assert(node->getRight()->getAsConstantUnion());
669 
670                 glslang::TIntermConstantUnion* constNode = node->getRight()->getAsConstantUnion();
671                 unsigned int memberIdx = constNode->getConstArray()[0].getUConst();
672                 unsigned int newIdx = memberIndexUpdates->at(memberIdx);
673                 TIntermTyped* newConstNode = unit->addConstantUnion(newIdx, node->getRight()->getLoc());
674 
675                 node->setRight(newConstNode);
676                 delete constNode;
677 
678                 return true;
679             }
680             return true;
681         }
682     };
683 
684     // 'this' may have symbols that are using the old block structure, so traverse the tree to update those
685     // in 'visitSymbol'
686     TMergeBlockTraverser finalLinkTraverser(block);
687     getTreeRoot()->traverse(&finalLinkTraverser);
688 
689     // The 'unit' intermediate needs the block structures update, but also structure entry indices
690     // may have changed from the old block to the new one that it was merged into, so update those
691     // in 'visitBinary'
692     TType newType;
693     newType.shallowCopy(block->getType());
694     TMergeBlockTraverser unitFinalLinkTraverser(block, &newType, unit, &memberIndexUpdates);
695     unit->getTreeRoot()->traverse(&unitFinalLinkTraverser);
696 
697     // update the member list
698     (*unitMemberList) = (*memberList);
699 }
700 
701 //
702 // Merge the linker objects from unitLinkerObjects into linkerObjects.
703 // Duplication is expected and filtered out, but contradictions are an error.
704 //
mergeLinkerObjects(TInfoSink & infoSink,TIntermSequence & linkerObjects,const TIntermSequence & unitLinkerObjects,EShLanguage unitStage)705 void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects, EShLanguage unitStage)
706 {
707     // Error check and merge the linker objects (duplicates should not be created)
708     std::size_t initialNumLinkerObjects = linkerObjects.size();
709     for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
710         bool merge = true;
711         for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
712             TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
713             TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
714             assert(symbol && unitSymbol);
715 
716             bool isSameSymbol = false;
717             // If they are both blocks in the same shader interface,
718             // match by the block-name, not the identifier name.
719             if (symbol->getType().getBasicType() == EbtBlock && unitSymbol->getType().getBasicType() == EbtBlock) {
720                 if (isSameInterface(symbol, getStage(), unitSymbol, unitStage)) {
721                     isSameSymbol = symbol->getType().getTypeName() == unitSymbol->getType().getTypeName();
722                 }
723             }
724             else if (symbol->getName() == unitSymbol->getName())
725                 isSameSymbol = true;
726 
727             if (isSameSymbol) {
728                 // filter out copy
729                 merge = false;
730 
731                 // but if one has an initializer and the other does not, update
732                 // the initializer
733                 if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
734                     symbol->setConstArray(unitSymbol->getConstArray());
735 
736                 // Similarly for binding
737                 if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
738                     symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
739 
740                 // Similarly for location
741                 if (!symbol->getQualifier().hasLocation() && unitSymbol->getQualifier().hasLocation()) {
742                     symbol->getQualifier().layoutLocation = unitSymbol->getQualifier().layoutLocation;
743                 }
744 
745                 // Update implicit array sizes
746                 if (symbol->getWritableType().isImplicitlySizedArray() && unitSymbol->getType().isImplicitlySizedArray()) {
747                     if (unitSymbol->getType().getImplicitArraySize() > symbol->getType().getImplicitArraySize()){
748                         symbol->getWritableType().updateImplicitArraySize(unitSymbol->getType().getImplicitArraySize());
749                     }
750                 }
751                 else if (symbol->getWritableType().isImplicitlySizedArray() && unitSymbol->getType().isSizedArray()) {
752                     if (symbol->getWritableType().getImplicitArraySize() > unitSymbol->getType().getOuterArraySize())
753                         error(infoSink, "Implicit size of unsized array doesn't match same symbol among multiple shaders.");
754                 }
755                 else if (unitSymbol->getType().isImplicitlySizedArray() && symbol->getWritableType().isSizedArray()) {
756                     if (unitSymbol->getType().getImplicitArraySize() > symbol->getWritableType().getOuterArraySize())
757                         error(infoSink, "Implicit size of unsized array doesn't match same symbol among multiple shaders.");
758                 }
759 
760                 // Update implicit array sizes
761                 mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
762 
763                 // Check for consistent types/qualification/initializers etc.
764                 mergeErrorCheck(infoSink, *symbol, *unitSymbol, unitStage);
765             }
766             // If different symbols, verify they arn't push_constant since there can only be one per stage
767             else if (symbol->getQualifier().isPushConstant() && unitSymbol->getQualifier().isPushConstant() && getStage() == unitStage)
768                 error(infoSink, "Only one push_constant block is allowed per stage");
769         }
770 
771         // Check conflicts between preset primitives and sizes of I/O variables among multiple geometry shaders
772         if (language == EShLangGeometry && unitStage == EShLangGeometry)
773         {
774             TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
775             if (unitSymbol->isArray() && unitSymbol->getQualifier().storage == EvqVaryingIn && unitSymbol->getQualifier().builtIn == EbvNone)
776                 if ((unitSymbol->getArraySizes()->isImplicitlySized() &&
777                         unitSymbol->getArraySizes()->getImplicitSize() != TQualifier::mapGeometryToSize(getInputPrimitive())) ||
778                     (! unitSymbol->getArraySizes()->isImplicitlySized() &&
779                         unitSymbol->getArraySizes()->getDimSize(0) != TQualifier::mapGeometryToSize(getInputPrimitive())))
780                     error(infoSink, "Not all array sizes match across all geometry shaders in the program");
781         }
782 
783         if (merge) {
784             linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
785 
786             // for anonymous blocks, check that their members don't conflict with other names
787             if (unitLinkerObjects[unitLinkObj]->getAsSymbolNode()->getBasicType() == EbtBlock &&
788                 IsAnonymous(unitLinkerObjects[unitLinkObj]->getAsSymbolNode()->getName())) {
789                 for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
790                     TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
791                     TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
792                     assert(symbol && unitSymbol);
793 
794                     auto checkName = [this, unitSymbol, &infoSink](const TString& name) {
795                         for (unsigned int i = 0; i < unitSymbol->getType().getStruct()->size(); ++i) {
796                             if (name == (*unitSymbol->getType().getStruct())[i].type->getFieldName()
797                                 && !((*unitSymbol->getType().getStruct())[i].type->getQualifier().hasLocation()
798                                     || unitSymbol->getType().getQualifier().hasLocation())
799                                 ) {
800                                 error(infoSink, "Anonymous member name used for global variable or other anonymous member: ");
801                                 infoSink.info << (*unitSymbol->getType().getStruct())[i].type->getCompleteString() << "\n";
802                             }
803                         }
804                     };
805 
806                     if (isSameInterface(symbol, getStage(), unitSymbol, unitStage)) {
807                         checkName(symbol->getName());
808 
809                         // check members of other anonymous blocks
810                         if (symbol->getBasicType() == EbtBlock && IsAnonymous(symbol->getName())) {
811                             for (unsigned int i = 0; i < symbol->getType().getStruct()->size(); ++i) {
812                                 checkName((*symbol->getType().getStruct())[i].type->getFieldName());
813                             }
814                         }
815                     }
816                 }
817             }
818         }
819     }
820 }
821 
822 // TODO 4.5 link functionality: cull distance array size checking
823 
824 // Recursively merge the implicit array sizes through the objects' respective type trees.
mergeImplicitArraySizes(TType & type,const TType & unitType)825 void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
826 {
827     if (type.isUnsizedArray()) {
828         if (unitType.isUnsizedArray()) {
829             type.updateImplicitArraySize(unitType.getImplicitArraySize());
830             if (unitType.isArrayVariablyIndexed())
831                 type.setArrayVariablyIndexed();
832         } else if (unitType.isSizedArray())
833             type.changeOuterArraySize(unitType.getOuterArraySize());
834     }
835 
836     // Type mismatches are caught and reported after this, just be careful for now.
837     if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
838         return;
839 
840     for (int i = 0; i < (int)type.getStruct()->size(); ++i)
841         mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
842 }
843 
844 //
845 // Compare two global objects from two compilation units and see if they match
846 // well enough.  Rules can be different for intra- vs. cross-stage matching.
847 //
848 // This function only does one of intra- or cross-stage matching per call.
849 //
mergeErrorCheck(TInfoSink & infoSink,const TIntermSymbol & symbol,const TIntermSymbol & unitSymbol,EShLanguage unitStage)850 void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, EShLanguage unitStage)
851 {
852     bool crossStage = getStage() != unitStage;
853     bool writeTypeComparison = false;
854     bool errorReported = false;
855     bool printQualifiers = false;
856     bool printPrecision = false;
857     bool printType = false;
858 
859     // Types have to match
860     {
861         // but, we make an exception if one is an implicit array and the other is sized
862         // or if the array sizes differ because of the extra array dimension on some in/out boundaries
863         bool arraysMatch = false;
864         if (isIoResizeArray(symbol.getType(), getStage()) || isIoResizeArray(unitSymbol.getType(), unitStage)) {
865             // if the arrays have an extra dimension because of the stage.
866             // compare dimensions while ignoring the outer dimension
867             unsigned int firstDim = isIoResizeArray(symbol.getType(), getStage()) ? 1 : 0;
868             unsigned int numDim = symbol.getArraySizes()
869                 ? symbol.getArraySizes()->getNumDims() : 0;
870             unsigned int unitFirstDim = isIoResizeArray(unitSymbol.getType(), unitStage) ? 1 : 0;
871             unsigned int unitNumDim = unitSymbol.getArraySizes()
872                 ? unitSymbol.getArraySizes()->getNumDims() : 0;
873             arraysMatch = (numDim - firstDim) == (unitNumDim - unitFirstDim);
874             // check that array sizes match as well
875             for (unsigned int i = 0; i < (numDim - firstDim) && arraysMatch; i++) {
876                 if (symbol.getArraySizes()->getDimSize(firstDim + i) !=
877                     unitSymbol.getArraySizes()->getDimSize(unitFirstDim + i)) {
878                     arraysMatch = false;
879                     break;
880                 }
881             }
882         }
883         else {
884             arraysMatch = symbol.getType().sameArrayness(unitSymbol.getType()) ||
885                 (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
886                  (symbol.getType().isImplicitlySizedArray() || unitSymbol.getType().isImplicitlySizedArray() ||
887                   symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()));
888         }
889 
890         int lpidx = -1;
891         int rpidx = -1;
892         if (!symbol.getType().sameElementType(unitSymbol.getType(), &lpidx, &rpidx)) {
893             if (lpidx >= 0 && rpidx >= 0) {
894                 error(infoSink, "Member names and types must match:", unitStage);
895                 infoSink.info << "    Block: " << symbol.getType().getTypeName() << "\n";
896                 infoSink.info << "        " << StageName(getStage()) << " stage: \""
897                               << (*symbol.getType().getStruct())[lpidx].type->getCompleteString(true, false, false, true,
898                                       (*symbol.getType().getStruct())[lpidx].type->getFieldName()) << "\"\n";
899                 infoSink.info << "        " << StageName(unitStage) << " stage: \""
900                               << (*unitSymbol.getType().getStruct())[rpidx].type->getCompleteString(true, false, false, true,
901                                       (*unitSymbol.getType().getStruct())[rpidx].type->getFieldName()) << "\"\n";
902                 errorReported = true;
903             } else if (lpidx >= 0 && rpidx == -1) {
904                   TString errmsg = StageName(getStage());
905                   errmsg.append(" block member has no corresponding member in ").append(StageName(unitStage)).append(" block:");
906                   error(infoSink, errmsg.c_str(), unitStage);
907                   infoSink.info << "    " << StageName(getStage()) << " stage: Block: " << symbol.getType().getTypeName() << ", Member: "
908                     << (*symbol.getType().getStruct())[lpidx].type->getFieldName() << "\n";
909                   infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << ", Member: n/a \n";
910                   errorReported = true;
911             } else if (lpidx == -1 && rpidx >= 0) {
912                   TString errmsg = StageName(unitStage);
913                   errmsg.append(" block member has no corresponding member in ").append(StageName(getStage())).append(" block:");
914                   error(infoSink, errmsg.c_str(), unitStage);
915                   infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << ", Member: "
916                     << (*unitSymbol.getType().getStruct())[rpidx].type->getFieldName() << "\n";
917                   infoSink.info << "    " << StageName(getStage()) << " stage: Block: " << symbol.getType().getTypeName() << ", Member: n/a \n";
918                   errorReported = true;
919             } else {
920                   error(infoSink, "Types must match:", unitStage);
921                   writeTypeComparison = true;
922                   printType = true;
923             }
924         } else if (!arraysMatch) {
925             error(infoSink, "Array sizes must be compatible:", unitStage);
926             writeTypeComparison = true;
927             printType = true;
928         } else if (!symbol.getType().sameTypeParameters(unitSymbol.getType())) {
929             error(infoSink, "Type parameters must match:", unitStage);
930             writeTypeComparison = true;
931             printType = true;
932         }
933     }
934 
935     // Interface block  member-wise layout qualifiers have to match
936     if (symbol.getType().getBasicType() == EbtBlock && unitSymbol.getType().getBasicType() == EbtBlock &&
937         symbol.getType().getStruct() && unitSymbol.getType().getStruct() &&
938         symbol.getType().sameStructType(unitSymbol.getType())) {
939         unsigned int li = 0;
940         unsigned int ri = 0;
941         while (li < symbol.getType().getStruct()->size() && ri < unitSymbol.getType().getStruct()->size()) {
942             if ((*symbol.getType().getStruct())[li].type->hiddenMember()) {
943                 ++li;
944                 continue;
945             }
946             if ((*unitSymbol.getType().getStruct())[ri].type->hiddenMember()) {
947                 ++ri;
948                 continue;
949             }
950             const TQualifier& qualifier = (*symbol.getType().getStruct())[li].type->getQualifier();
951             const TQualifier & unitQualifier = (*unitSymbol.getType().getStruct())[ri].type->getQualifier();
952             bool layoutQualifierError = false;
953             if (qualifier.layoutMatrix != unitQualifier.layoutMatrix) {
954                 error(infoSink, "Interface block member layout matrix qualifier must match:", unitStage);
955                 layoutQualifierError = true;
956             }
957             if (qualifier.layoutOffset != unitQualifier.layoutOffset) {
958                 error(infoSink, "Interface block member layout offset qualifier must match:", unitStage);
959                 layoutQualifierError = true;
960             }
961             if (qualifier.layoutAlign != unitQualifier.layoutAlign) {
962                 error(infoSink, "Interface block member layout align qualifier must match:", unitStage);
963                 layoutQualifierError = true;
964             }
965             if (qualifier.layoutLocation != unitQualifier.layoutLocation) {
966                 error(infoSink, "Interface block member layout location qualifier must match:", unitStage);
967                 layoutQualifierError = true;
968             }
969             if (qualifier.layoutComponent != unitQualifier.layoutComponent) {
970                 error(infoSink, "Interface block member layout component qualifier must match:", unitStage);
971                 layoutQualifierError = true;
972             }
973             if (layoutQualifierError) {
974                 infoSink.info << "    " << StageName(getStage()) << " stage: Block: " << symbol.getType().getTypeName() << ", Member: "
975                               << (*symbol.getType().getStruct())[li].type->getFieldName() << " \""
976                               << (*symbol.getType().getStruct())[li].type->getCompleteString(true, true, false, false) << "\"\n";
977                 infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << ", Member: "
978                               << (*unitSymbol.getType().getStruct())[ri].type->getFieldName() << " \""
979                               << (*unitSymbol.getType().getStruct())[ri].type->getCompleteString(true, true, false, false) << "\"\n";
980                 errorReported = true;
981             }
982             ++li;
983             ++ri;
984         }
985     }
986 
987     bool isInOut = crossStage &&
988                    ((symbol.getQualifier().storage == EvqVaryingIn && unitSymbol.getQualifier().storage == EvqVaryingOut) ||
989                    (symbol.getQualifier().storage == EvqVaryingOut && unitSymbol.getQualifier().storage == EvqVaryingIn));
990 
991     // Qualifiers have to (almost) match
992     // Storage...
993     if (!isInOut && symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
994         error(infoSink, "Storage qualifiers must match:", unitStage);
995         writeTypeComparison = true;
996         printQualifiers = true;
997     }
998 
999     // Uniform and buffer blocks must either both have an instance name, or
1000     // must both be anonymous. The names don't need to match though.
1001     if (symbol.getQualifier().isUniformOrBuffer() &&
1002         (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()))) {
1003         error(infoSink, "Matched Uniform or Storage blocks must all be anonymous,"
1004                         " or all be named:", unitStage);
1005         writeTypeComparison = true;
1006     }
1007 
1008     if (symbol.getQualifier().storage == unitSymbol.getQualifier().storage &&
1009         (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()) ||
1010          (!IsAnonymous(symbol.getName()) && symbol.getName() != unitSymbol.getName()))) {
1011         warn(infoSink, "Matched shader interfaces are using different instance names.", unitStage);
1012         writeTypeComparison = true;
1013     }
1014 
1015     // Precision...
1016     if (!isInOut && symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
1017         error(infoSink, "Precision qualifiers must match:", unitStage);
1018         writeTypeComparison = true;
1019         printPrecision = true;
1020     }
1021 
1022     // Invariance...
1023     if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
1024         error(infoSink, "Presence of invariant qualifier must match:", unitStage);
1025         writeTypeComparison = true;
1026         printQualifiers = true;
1027     }
1028 
1029     // Precise...
1030     if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) {
1031         error(infoSink, "Presence of precise qualifier must match:", unitStage);
1032         writeTypeComparison = true;
1033         printPrecision = true;
1034     }
1035 
1036     // Auxiliary and interpolation...
1037     // "interpolation qualification (e.g., flat) and auxiliary qualification (e.g. centroid) may differ.
1038     //  These mismatches are allowed between any pair of stages ...
1039     //  those provided in the fragment shader supersede those provided in previous stages."
1040     if (!crossStage &&
1041         (symbol.getQualifier().centroid  != unitSymbol.getQualifier().centroid ||
1042         symbol.getQualifier().smooth    != unitSymbol.getQualifier().smooth ||
1043         symbol.getQualifier().flat      != unitSymbol.getQualifier().flat ||
1044         symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() ||
1045         symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() ||
1046         symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective())) {
1047         error(infoSink, "Interpolation and auxiliary storage qualifiers must match:", unitStage);
1048         writeTypeComparison = true;
1049         printQualifiers = true;
1050     }
1051 
1052     // Memory...
1053     bool memoryQualifierError = false;
1054     if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent) {
1055         error(infoSink, "Memory coherent qualifier must match:", unitStage);
1056         memoryQualifierError = true;
1057     }
1058     if (symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent) {
1059         error(infoSink, "Memory devicecoherent qualifier must match:", unitStage);
1060         memoryQualifierError = true;
1061     }
1062     if (symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent) {
1063         error(infoSink, "Memory queuefamilycoherent qualifier must match:", unitStage);
1064         memoryQualifierError = true;
1065     }
1066     if (symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent) {
1067         error(infoSink, "Memory workgroupcoherent qualifier must match:", unitStage);
1068         memoryQualifierError = true;
1069     }
1070     if (symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent) {
1071         error(infoSink, "Memory subgroupcoherent qualifier must match:", unitStage);
1072         memoryQualifierError = true;
1073     }
1074     if (symbol.getQualifier().shadercallcoherent != unitSymbol.getQualifier().shadercallcoherent) {
1075         error(infoSink, "Memory shadercallcoherent qualifier must match:", unitStage);
1076         memoryQualifierError = true;
1077     }
1078     if (symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate) {
1079         error(infoSink, "Memory nonprivate qualifier must match:", unitStage);
1080         memoryQualifierError = true;
1081     }
1082     if (symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil) {
1083         error(infoSink, "Memory volatil qualifier must match:", unitStage);
1084         memoryQualifierError = true;
1085     }
1086     if (symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict) {
1087         error(infoSink, "Memory restrict qualifier must match:", unitStage);
1088         memoryQualifierError = true;
1089     }
1090     if (symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly) {
1091         error(infoSink, "Memory readonly qualifier must match:", unitStage);
1092         memoryQualifierError = true;
1093     }
1094     if (symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
1095         error(infoSink, "Memory writeonly qualifier must match:", unitStage);
1096         memoryQualifierError = true;
1097     }
1098     if (memoryQualifierError) {
1099           writeTypeComparison = true;
1100           printQualifiers = true;
1101     }
1102 
1103     // Layouts...
1104     // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
1105     //       requires separate user-supplied offset from actual computed offset, but
1106     //       current implementation only has one offset.
1107     bool layoutQualifierError = false;
1108     if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix) {
1109         error(infoSink, "Layout matrix qualifier must match:", unitStage);
1110         layoutQualifierError = true;
1111     }
1112     if (symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking) {
1113         error(infoSink, "Layout packing qualifier must match:", unitStage);
1114         layoutQualifierError = true;
1115     }
1116     if (symbol.getQualifier().hasLocation() && unitSymbol.getQualifier().hasLocation() && symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation) {
1117         error(infoSink, "Layout location qualifier must match:", unitStage);
1118         layoutQualifierError = true;
1119     }
1120     if (symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent) {
1121         error(infoSink, "Layout component qualifier must match:", unitStage);
1122         layoutQualifierError = true;
1123     }
1124     if (symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex) {
1125         error(infoSink, "Layout index qualifier must match:", unitStage);
1126         layoutQualifierError = true;
1127     }
1128     if (symbol.getQualifier().hasBinding() && unitSymbol.getQualifier().hasBinding() && symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding) {
1129         error(infoSink, "Layout binding qualifier must match:", unitStage);
1130         layoutQualifierError = true;
1131     }
1132     if (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset)) {
1133         error(infoSink, "Layout offset qualifier must match:", unitStage);
1134         layoutQualifierError = true;
1135     }
1136     if (layoutQualifierError) {
1137         writeTypeComparison = true;
1138         printQualifiers = true;
1139     }
1140 
1141     // Initializers have to match, if both are present, and if we don't already know the types don't match
1142     if (! writeTypeComparison && ! errorReported) {
1143         if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
1144             if (symbol.getConstArray() != unitSymbol.getConstArray()) {
1145                 error(infoSink, "Initializers must match:", unitStage);
1146                 infoSink.info << "    " << symbol.getName() << "\n";
1147             }
1148         }
1149     }
1150 
1151     if (writeTypeComparison) {
1152         if (symbol.getType().getBasicType() == EbtBlock && unitSymbol.getType().getBasicType() == EbtBlock &&
1153             symbol.getType().getStruct() && unitSymbol.getType().getStruct()) {
1154           if (printType) {
1155             infoSink.info << "    " << StageName(getStage()) << " stage: \"" << symbol.getType().getCompleteString(true, printQualifiers, printPrecision,
1156                                                     printType, symbol.getName(), symbol.getType().getTypeName()) << "\"\n";
1157             infoSink.info << "    " << StageName(unitStage) << " stage: \"" << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision,
1158                                                     printType, unitSymbol.getName(), unitSymbol.getType().getTypeName()) << "\"\n";
1159           } else {
1160             infoSink.info << "    " << StageName(getStage()) << " stage: Block: " << symbol.getType().getTypeName() << " Instance: " << symbol.getName()
1161               << ": \"" << symbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1162             infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << " Instance: " << unitSymbol.getName()
1163               << ": \"" << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1164           }
1165         } else {
1166           if (printType) {
1167             infoSink.info << "    " << StageName(getStage()) << " stage: \""
1168               << symbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType, symbol.getName()) << "\"\n";
1169             infoSink.info << "    " << StageName(unitStage) << " stage: \""
1170               << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType, unitSymbol.getName()) << "\"\n";
1171           } else {
1172             infoSink.info << "    " << StageName(getStage()) << " stage: " << symbol.getName() << " \""
1173               << symbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1174             infoSink.info << "    " << StageName(unitStage) << " stage: " << unitSymbol.getName() << " \""
1175               << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1176           }
1177         }
1178     }
1179 }
1180 
sharedBlockCheck(TInfoSink & infoSink)1181 void TIntermediate::sharedBlockCheck(TInfoSink& infoSink)
1182 {
1183     bool has_shared_block = false;
1184     bool has_shared_non_block = false;
1185     TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
1186     for (size_t i = 0; i < linkObjects.size(); ++i) {
1187         const TType& type = linkObjects[i]->getAsTyped()->getType();
1188         const TQualifier& qualifier = type.getQualifier();
1189         if (qualifier.storage == glslang::EvqShared) {
1190             if (type.getBasicType() == glslang::EbtBlock)
1191                 has_shared_block = true;
1192             else
1193                 has_shared_non_block = true;
1194         }
1195     }
1196     if (has_shared_block && has_shared_non_block)
1197         error(infoSink, "cannot mix use of shared variables inside and outside blocks");
1198 }
1199 
1200 //
1201 // Do final link-time error checking of a complete (merged) intermediate representation.
1202 // (Much error checking was done during merging).
1203 //
1204 // Also, lock in defaults of things not set, including array sizes.
1205 //
finalCheck(TInfoSink & infoSink,bool keepUncalled)1206 void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
1207 {
1208     if (getTreeRoot() == nullptr)
1209         return;
1210 
1211     if (numEntryPoints < 1) {
1212         if (getSource() == EShSourceGlsl)
1213             error(infoSink, "Missing entry point: Each stage requires one entry point");
1214         else
1215             warn(infoSink, "Entry point not found");
1216     }
1217 
1218     // recursion and missing body checking
1219     checkCallGraphCycles(infoSink);
1220     checkCallGraphBodies(infoSink, keepUncalled);
1221 
1222     // overlap/alias/missing I/O, etc.
1223     inOutLocationCheck(infoSink);
1224 
1225     if (getNumPushConstants() > 1)
1226         error(infoSink, "Only one push_constant block is allowed per stage");
1227 
1228     // invocations
1229     if (invocations == TQualifier::layoutNotSet)
1230         invocations = 1;
1231 
1232     if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
1233         error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
1234     if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
1235         error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
1236 
1237     if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
1238         error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
1239     if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
1240         error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
1241 
1242     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
1243         if (xfbBuffers[b].contains64BitType)
1244             RoundToPow2(xfbBuffers[b].implicitStride, 8);
1245         else if (xfbBuffers[b].contains32BitType)
1246             RoundToPow2(xfbBuffers[b].implicitStride, 4);
1247         else if (xfbBuffers[b].contains16BitType)
1248             RoundToPow2(xfbBuffers[b].implicitStride, 2);
1249 
1250         // "It is a compile-time or link-time error to have
1251         // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
1252         // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
1253         // compile-time or link-time error to have different values specified for the stride for the same buffer."
1254         if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
1255             error(infoSink, "xfb_stride is too small to hold all buffer entries:");
1256             infoSink.info.prefix(EPrefixError);
1257             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
1258         }
1259         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
1260             xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
1261 
1262         // "If the buffer is capturing any
1263         // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
1264         // multiple of 4, or a compile-time or link-time error results."
1265         if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
1266             error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
1267             infoSink.info.prefix(EPrefixError);
1268             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1269         } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
1270             error(infoSink, "xfb_stride must be multiple of 4:");
1271             infoSink.info.prefix(EPrefixError);
1272             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1273         }
1274         // "If the buffer is capturing any
1275         // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
1276         else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
1277             error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
1278             infoSink.info.prefix(EPrefixError);
1279             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1280         }
1281 
1282         // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
1283         // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
1284         if (xfbBuffers[b].stride > (unsigned int)(4 * resources->maxTransformFeedbackInterleavedComponents)) {
1285             error(infoSink, "xfb_stride is too large:");
1286             infoSink.info.prefix(EPrefixError);
1287             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources->maxTransformFeedbackInterleavedComponents << "\n";
1288         }
1289     }
1290 
1291     switch (language) {
1292     case EShLangVertex:
1293         break;
1294     case EShLangTessControl:
1295         if (vertices == TQualifier::layoutNotSet)
1296             error(infoSink, "At least one shader must specify an output layout(vertices=...)");
1297         break;
1298     case EShLangTessEvaluation:
1299         if (getSource() == EShSourceGlsl) {
1300             if (inputPrimitive == ElgNone)
1301                 error(infoSink, "At least one shader must specify an input layout primitive");
1302             if (vertexSpacing == EvsNone)
1303                 vertexSpacing = EvsEqual;
1304             if (vertexOrder == EvoNone)
1305                 vertexOrder = EvoCcw;
1306         }
1307         break;
1308     case EShLangGeometry:
1309         if (inputPrimitive == ElgNone)
1310             error(infoSink, "At least one shader must specify an input layout primitive");
1311         if (outputPrimitive == ElgNone)
1312             error(infoSink, "At least one shader must specify an output layout primitive");
1313         if (vertices == TQualifier::layoutNotSet)
1314             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
1315         break;
1316     case EShLangFragment:
1317         // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
1318         // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
1319         // requiring explicit early_fragment_tests
1320         if (getPostDepthCoverage() && !getEarlyFragmentTests())
1321             error(infoSink, "post_depth_coverage requires early_fragment_tests");
1322         break;
1323     case EShLangCompute:
1324         sharedBlockCheck(infoSink);
1325         break;
1326     case EShLangRayGen:
1327     case EShLangIntersect:
1328     case EShLangAnyHit:
1329     case EShLangClosestHit:
1330     case EShLangMiss:
1331     case EShLangCallable:
1332         if (numShaderRecordBlocks > 1)
1333             error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
1334         break;
1335     case EShLangMesh:
1336         // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
1337         if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
1338             error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
1339         if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
1340             error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
1341         if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
1342             error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
1343         if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
1344             error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
1345         if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
1346             error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
1347         if (outputPrimitive == ElgNone)
1348             error(infoSink, "At least one shader must specify an output layout primitive");
1349         if (vertices == TQualifier::layoutNotSet)
1350             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
1351         if (primitives == TQualifier::layoutNotSet)
1352             error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
1353         [[fallthrough]];
1354     case EShLangTask:
1355         if (numTaskNVBlocks > 1)
1356             error(infoSink, "Only one taskNV interface block is allowed per shader");
1357         if (numTaskEXTPayloads > 1)
1358             error(infoSink, "Only single variable of type taskPayloadSharedEXT is allowed per shader");
1359         sharedBlockCheck(infoSink);
1360         break;
1361     default:
1362         error(infoSink, "Unknown Stage.");
1363         break;
1364     }
1365 
1366     // Process the tree for any node-specific work.
1367     class TFinalLinkTraverser : public TIntermTraverser {
1368     public:
1369         TFinalLinkTraverser() { }
1370         virtual ~TFinalLinkTraverser() { }
1371 
1372         virtual void visitSymbol(TIntermSymbol* symbol)
1373         {
1374             // Implicitly size arrays.
1375             // If an unsized array is left as unsized, it effectively
1376             // becomes run-time sized.
1377             symbol->getWritableType().adoptImplicitArraySizes(false);
1378         }
1379     } finalLinkTraverser;
1380 
1381     treeRoot->traverse(&finalLinkTraverser);
1382 }
1383 
1384 //
1385 // See if the call graph contains any static recursion, which is disallowed
1386 // by the specification.
1387 //
checkCallGraphCycles(TInfoSink & infoSink)1388 void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
1389 {
1390     // Clear fields we'll use for this.
1391     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1392         call->visited = false;
1393         call->currentPath = false;
1394         call->errorGiven = false;
1395     }
1396 
1397     //
1398     // Loop, looking for a new connected subgraph.  One subgraph is handled per loop iteration.
1399     //
1400 
1401     TCall* newRoot;
1402     do {
1403         // See if we have unvisited parts of the graph.
1404         newRoot = nullptr;
1405         for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1406             if (! call->visited) {
1407                 newRoot = &(*call);
1408                 break;
1409             }
1410         }
1411 
1412         // If not, we are done.
1413         if (! newRoot)
1414             break;
1415 
1416         // Otherwise, we found a new subgraph, process it:
1417         // See what all can be reached by this new root, and if any of
1418         // that is recursive.  This is done by depth-first traversals, seeing
1419         // if a new call is found that was already in the currentPath (a back edge),
1420         // thereby detecting recursion.
1421         std::list<TCall*> stack;
1422         newRoot->currentPath = true; // currentPath will be true iff it is on the stack
1423         stack.push_back(newRoot);
1424         while (! stack.empty()) {
1425             // get a caller
1426             TCall* call = stack.back();
1427 
1428             // Add to the stack just one callee.
1429             // This algorithm always terminates, because only !visited and !currentPath causes a push
1430             // and all pushes change currentPath to true, and all pops change visited to true.
1431             TGraph::iterator child = callGraph.begin();
1432             for (; child != callGraph.end(); ++child) {
1433 
1434                 // If we already visited this node, its whole subgraph has already been processed, so skip it.
1435                 if (child->visited)
1436                     continue;
1437 
1438                 if (call->callee == child->caller) {
1439                     if (child->currentPath) {
1440                         // Then, we found a back edge
1441                         if (! child->errorGiven) {
1442                             error(infoSink, "Recursion detected:");
1443                             infoSink.info << "    " << call->callee << " calling " << child->callee << "\n";
1444                             child->errorGiven = true;
1445                             recursive = true;
1446                         }
1447                     } else {
1448                         child->currentPath = true;
1449                         stack.push_back(&(*child));
1450                         break;
1451                     }
1452                 }
1453             }
1454             if (child == callGraph.end()) {
1455                 // no more callees, we bottomed out, never look at this node again
1456                 stack.back()->currentPath = false;
1457                 stack.back()->visited = true;
1458                 stack.pop_back();
1459             }
1460         }  // end while, meaning nothing left to process in this subtree
1461 
1462     } while (newRoot);  // redundant loop check; should always exit via the 'break' above
1463 }
1464 
1465 //
1466 // See which functions are reachable from the entry point and which have bodies.
1467 // Reachable ones with missing bodies are errors.
1468 // Unreachable bodies are dead code.
1469 //
checkCallGraphBodies(TInfoSink & infoSink,bool keepUncalled)1470 void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
1471 {
1472     // Clear fields we'll use for this.
1473     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1474         call->visited = false;
1475         call->calleeBodyPosition = -1;
1476     }
1477 
1478     // The top level of the AST includes function definitions (bodies).
1479     // Compare these to function calls in the call graph.
1480     // We'll end up knowing which have bodies, and if so,
1481     // how to map the call-graph node to the location in the AST.
1482     TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
1483     std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
1484     for (int f = 0; f < (int)functionSequence.size(); ++f) {
1485         glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
1486         if (node && (node->getOp() == glslang::EOpFunction)) {
1487             if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
1488                 reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
1489             for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1490                 if (call->callee == node->getName())
1491                     call->calleeBodyPosition = f;
1492             }
1493         }
1494     }
1495 
1496     // Start call-graph traversal by visiting the entry point nodes.
1497     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1498         if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
1499             call->visited = true;
1500     }
1501 
1502     // Propagate 'visited' through the call-graph to every part of the graph it
1503     // can reach (seeded with the entry-point setting above).
1504     bool changed;
1505     do {
1506         changed = false;
1507         for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
1508             if (call1->visited) {
1509                 for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
1510                     if (! call2->visited) {
1511                         if (call1->callee == call2->caller) {
1512                             changed = true;
1513                             call2->visited = true;
1514                         }
1515                     }
1516                 }
1517             }
1518         }
1519     } while (changed);
1520 
1521     // Any call-graph node set to visited but without a callee body is an error.
1522     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1523         if (call->visited) {
1524             if (call->calleeBodyPosition == -1) {
1525                 error(infoSink, "No function definition (body) found: ");
1526                 infoSink.info << "    " << call->callee << "\n";
1527             } else
1528                 reachable[call->calleeBodyPosition] = true;
1529         }
1530     }
1531 
1532     // Bodies in the AST not reached by the call graph are dead;
1533     // clear them out, since they can't be reached and also can't
1534     // be translated further due to possibility of being ill defined.
1535     if (! keepUncalled) {
1536         for (int f = 0; f < (int)functionSequence.size(); ++f) {
1537             if (! reachable[f])
1538             {
1539                 resetTopLevelUncalledStatus(functionSequence[f]->getAsAggregate()->getName());
1540                 functionSequence[f] = nullptr;
1541             }
1542         }
1543         functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
1544     }
1545 }
1546 
1547 //
1548 // Satisfy rules for location qualifiers on inputs and outputs
1549 //
inOutLocationCheck(TInfoSink & infoSink)1550 void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
1551 {
1552     // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
1553     bool fragOutWithNoLocation = false;
1554     int numFragOut = 0;
1555 
1556     // TODO: linker functionality: location collision checking
1557 
1558     TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
1559     for (size_t i = 0; i < linkObjects.size(); ++i) {
1560         const TType& type = linkObjects[i]->getAsTyped()->getType();
1561         const TQualifier& qualifier = type.getQualifier();
1562         if (language == EShLangFragment) {
1563             if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
1564                 ++numFragOut;
1565                 if (!qualifier.hasAnyLocation())
1566                     fragOutWithNoLocation = true;
1567             }
1568         }
1569     }
1570 
1571     if (isEsProfile()) {
1572         if (numFragOut > 1 && fragOutWithNoLocation)
1573             error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
1574     }
1575 }
1576 
findLinkerObjects() const1577 TIntermAggregate* TIntermediate::findLinkerObjects() const
1578 {
1579     // Get the top-level globals
1580     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
1581 
1582     // Get the last member of the sequences, expected to be the linker-object lists
1583     assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
1584 
1585     return globals.back()->getAsAggregate();
1586 }
1587 
1588 // See if a variable was both a user-declared output and used.
1589 // Note: the spec discusses writing to one, but this looks at read or write, which
1590 // is more useful, and perhaps the spec should be changed to reflect that.
userOutputUsed() const1591 bool TIntermediate::userOutputUsed() const
1592 {
1593     const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
1594 
1595     bool found = false;
1596     for (size_t i = 0; i < linkerObjects.size(); ++i) {
1597         const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
1598         if (symbolNode.getQualifier().storage == EvqVaryingOut &&
1599             symbolNode.getName().compare(0, 3, "gl_") != 0 &&
1600             inIoAccessed(symbolNode.getName())) {
1601             found = true;
1602             break;
1603         }
1604     }
1605 
1606     return found;
1607 }
1608 
1609 // Accumulate locations used for inputs, outputs, and uniforms, payload, callable data, and tileImageEXT
1610 // and check for collisions as the accumulation is done.
1611 //
1612 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1613 //
1614 // typeCollision is set to true if there is no direct collision, but the types in the same location
1615 // are different.
1616 //
addUsedLocation(const TQualifier & qualifier,const TType & type,bool & typeCollision)1617 int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
1618 {
1619     typeCollision = false;
1620 
1621     int set;
1622     if (qualifier.isPipeInput())
1623         set = 0;
1624     else if (qualifier.isPipeOutput())
1625         set = 1;
1626     else if (qualifier.storage == EvqUniform)
1627         set = 2;
1628     else if (qualifier.storage == EvqBuffer)
1629         set = 3;
1630     else if (qualifier.storage == EvqTileImageEXT)
1631         set = 4;
1632     else if (qualifier.isAnyPayload())
1633         set = 0;
1634     else if (qualifier.isAnyCallable())
1635         set = 1;
1636     else if (qualifier.isHitObjectAttrNV())
1637         set = 2;
1638     else
1639         return -1;
1640 
1641     int size;
1642     if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
1643         size = 1;
1644     } else if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
1645         if (type.isSizedArray())
1646             size = type.getCumulativeArraySize();
1647         else
1648             size = 1;
1649     } else {
1650         // Strip off the outer array dimension for those having an extra one.
1651         if (type.isArray() && qualifier.isArrayedIo(language)) {
1652             TType elementType(type, 0);
1653             size = computeTypeLocationSize(elementType, language);
1654         } else
1655             size = computeTypeLocationSize(type, language);
1656     }
1657 
1658     // Locations, and components within locations.
1659     //
1660     // Almost always, dealing with components means a single location is involved.
1661     // The exception is a dvec3. From the spec:
1662     //
1663     // "A dvec3 will consume all four components of the first location and components 0 and 1 of
1664     // the second location. This leaves components 2 and 3 available for other component-qualified
1665     // declarations."
1666     //
1667     // That means, without ever mentioning a component, a component range
1668     // for a different location gets specified, if it's not a vertex shader input. (!)
1669     // (A vertex shader input will show using only one location, even for a dvec3/4.)
1670     //
1671     // So, for the case of dvec3, we need two independent ioRanges.
1672     //
1673     // For raytracing IO (payloads and callabledata) each declaration occupies a single
1674     // slot irrespective of type.
1675     int collision = -1; // no collision
1676     if (qualifier.isAnyPayload() || qualifier.isAnyCallable() || qualifier.isHitObjectAttrNV()) {
1677         TRange range(qualifier.layoutLocation, qualifier.layoutLocation);
1678         collision = checkLocationRT(set, qualifier.layoutLocation);
1679         if (collision < 0)
1680             usedIoRT[set].push_back(range);
1681         return collision;
1682     }
1683     if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
1684         (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
1685         // Dealing with dvec3 in/out split across two locations.
1686         // Need two io-ranges.
1687         // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
1688 
1689         // First range:
1690         TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
1691         TRange componentRange(0, 3);
1692         TIoRange range(locationRange, componentRange, type.getBasicType(), 0, qualifier.centroid, qualifier.smooth, qualifier.flat);
1693 
1694         // check for collisions
1695         collision = checkLocationRange(set, range, type, typeCollision);
1696         if (collision < 0) {
1697             usedIo[set].push_back(range);
1698 
1699             // Second range:
1700             TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
1701             TRange componentRange2(0, 1);
1702             TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0, qualifier.centroid, qualifier.smooth, qualifier.flat);
1703 
1704             // check for collisions
1705             collision = checkLocationRange(set, range2, type, typeCollision);
1706             if (collision < 0)
1707                 usedIo[set].push_back(range2);
1708         }
1709         return collision;
1710     }
1711 
1712     // Not a dvec3 in/out split across two locations, generic path.
1713     // Need a single IO-range block.
1714 
1715     TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
1716     TRange componentRange(0, 3);
1717     if (qualifier.hasComponent() || type.getVectorSize() > 0) {
1718         int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
1719         if (qualifier.hasComponent())
1720             componentRange.start = qualifier.layoutComponent;
1721         componentRange.last  = componentRange.start + consumedComponents - 1;
1722     }
1723 
1724     // combine location and component ranges
1725     TBasicType basicTy = type.getBasicType();
1726     if (basicTy == EbtSampler && type.getSampler().isAttachmentEXT())
1727         basicTy = type.getSampler().type;
1728     TIoRange range(locationRange, componentRange, basicTy, qualifier.hasIndex() ? qualifier.getIndex() : 0, qualifier.centroid, qualifier.smooth, qualifier.flat);
1729 
1730     // check for collisions, except for vertex inputs on desktop targeting OpenGL
1731     if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
1732         collision = checkLocationRange(set, range, type, typeCollision);
1733 
1734     if (collision < 0)
1735         usedIo[set].push_back(range);
1736 
1737     return collision;
1738 }
1739 
1740 // Compare a new (the passed in) 'range' against the existing set, and see
1741 // if there are any collisions.
1742 //
1743 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1744 //
checkLocationRange(int set,const TIoRange & range,const TType & type,bool & typeCollision)1745 int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
1746 {
1747     for (size_t r = 0; r < usedIo[set].size(); ++r) {
1748         if (range.overlap(usedIo[set][r])) {
1749             // there is a collision; pick one
1750             return std::max(range.location.start, usedIo[set][r].location.start);
1751         } else if (range.location.overlap(usedIo[set][r].location) &&
1752                    (type.getBasicType() != usedIo[set][r].basicType ||
1753                     type.getQualifier().centroid != usedIo[set][r].centroid ||
1754                     type.getQualifier().smooth != usedIo[set][r].smooth ||
1755                     type.getQualifier().flat != usedIo[set][r].flat)) {
1756             // aliased-type mismatch
1757             typeCollision = true;
1758             return std::max(range.location.start, usedIo[set][r].location.start);
1759         }
1760     }
1761 
1762     // check typeCollision between tileImageEXT and out
1763     if (set == 4 || set == 1) {
1764       // if the set is "tileImageEXT", check against "out" and vice versa
1765       int againstSet = (set == 4) ? 1 : 4;
1766       for (size_t r = 0; r < usedIo[againstSet].size(); ++r) {
1767         if (range.location.overlap(usedIo[againstSet][r].location) && type.getBasicType() != usedIo[againstSet][r].basicType) {
1768             // aliased-type mismatch
1769             typeCollision = true;
1770             return std::max(range.location.start, usedIo[againstSet][r].location.start);
1771         }
1772       }
1773     }
1774 
1775     return -1; // no collision
1776 }
1777 
checkLocationRT(int set,int location)1778 int TIntermediate::checkLocationRT(int set, int location) {
1779     TRange range(location, location);
1780     for (size_t r = 0; r < usedIoRT[set].size(); ++r) {
1781         if (range.overlap(usedIoRT[set][r])) {
1782             return range.start;
1783         }
1784     }
1785     return -1; // no collision
1786 }
1787 
1788 // Accumulate bindings and offsets, and check for collisions
1789 // as the accumulation is done.
1790 //
1791 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1792 //
addUsedOffsets(int binding,int offset,int numOffsets)1793 int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
1794 {
1795     TRange bindingRange(binding, binding);
1796     TRange offsetRange(offset, offset + numOffsets - 1);
1797     TOffsetRange range(bindingRange, offsetRange);
1798 
1799     // check for collisions, except for vertex inputs on desktop
1800     for (size_t r = 0; r < usedAtomics.size(); ++r) {
1801         if (range.overlap(usedAtomics[r])) {
1802             // there is a collision; pick one
1803             return std::max(offset, usedAtomics[r].offset.start);
1804         }
1805     }
1806 
1807     usedAtomics.push_back(range);
1808 
1809     return -1; // no collision
1810 }
1811 
1812 // Accumulate used constant_id values.
1813 //
1814 // Return false is one was already used.
addUsedConstantId(int id)1815 bool TIntermediate::addUsedConstantId(int id)
1816 {
1817     if (usedConstantId.find(id) != usedConstantId.end())
1818         return false;
1819 
1820     usedConstantId.insert(id);
1821 
1822     return true;
1823 }
1824 
1825 // Recursively figure out how many locations are used up by an input or output type.
1826 // Return the size of type, as measured by "locations".
computeTypeLocationSize(const TType & type,EShLanguage stage)1827 int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
1828 {
1829     // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
1830     // consecutive locations..."
1831     if (type.isArray()) {
1832         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1833         // TODO: are there valid cases of having an unsized array with a location?  If so, running this code too early.
1834         TType elementType(type, 0);
1835         if (type.isSizedArray() && !type.getQualifier().isPerView())
1836             return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
1837         else {
1838             // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
1839             elementType.getQualifier().perViewNV = false;
1840             return computeTypeLocationSize(elementType, stage);
1841         }
1842     }
1843 
1844     // "The locations consumed by block and structure members are determined by applying the rules above
1845     // recursively..."
1846     if (type.isStruct()) {
1847         int size = 0;
1848         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1849             TType memberType(type, member);
1850             size += computeTypeLocationSize(memberType, stage);
1851         }
1852         return size;
1853     }
1854 
1855     // ES: "If a shader input is any scalar or vector type, it will consume a single location."
1856 
1857     // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
1858     // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
1859     // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
1860     // consume only a single location, in all stages."
1861     if (type.isScalar())
1862         return 1;
1863     if (type.isVector()) {
1864         if (stage == EShLangVertex && type.getQualifier().isPipeInput())
1865             return 1;
1866         if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
1867             return 2;
1868         else
1869             return 1;
1870     }
1871 
1872     // "If the declared input is an n x m single- or double-precision matrix, ...
1873     // The number of locations assigned for each matrix will be the same as
1874     // for an n-element array of m-component vectors..."
1875     if (type.isMatrix()) {
1876         TType columnType(type, 0);
1877         return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
1878     }
1879 
1880     assert(0);
1881     return 1;
1882 }
1883 
1884 // Same as computeTypeLocationSize but for uniforms
computeTypeUniformLocationSize(const TType & type)1885 int TIntermediate::computeTypeUniformLocationSize(const TType& type)
1886 {
1887     // "Individual elements of a uniform array are assigned
1888     // consecutive locations with the first element taking location
1889     // location."
1890     if (type.isArray()) {
1891         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1892         TType elementType(type, 0);
1893         if (type.isSizedArray()) {
1894             return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
1895         } else {
1896             // TODO: are there valid cases of having an implicitly-sized array with a location?  If so, running this code too early.
1897             return computeTypeUniformLocationSize(elementType);
1898         }
1899     }
1900 
1901     // "Each subsequent inner-most member or element gets incremental
1902     // locations for the entire structure or array."
1903     if (type.isStruct()) {
1904         int size = 0;
1905         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1906             TType memberType(type, member);
1907             size += computeTypeUniformLocationSize(memberType);
1908         }
1909         return size;
1910     }
1911 
1912     return 1;
1913 }
1914 
1915 // Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
1916 //
1917 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1918 //
addXfbBufferOffset(const TType & type)1919 int TIntermediate::addXfbBufferOffset(const TType& type)
1920 {
1921     const TQualifier& qualifier = type.getQualifier();
1922 
1923     assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
1924     TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
1925 
1926     // compute the range
1927     unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
1928     buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
1929     TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
1930 
1931     // check for collisions
1932     for (size_t r = 0; r < buffer.ranges.size(); ++r) {
1933         if (range.overlap(buffer.ranges[r])) {
1934             // there is a collision; pick an example to return
1935             return std::max(range.start, buffer.ranges[r].start);
1936         }
1937     }
1938 
1939     buffer.ranges.push_back(range);
1940 
1941     return -1;  // no collision
1942 }
1943 
1944 // Recursively figure out how many bytes of xfb buffer are used by the given type.
1945 // Return the size of type, in bytes.
1946 // Sets contains64BitType to true if the type contains a 64-bit data type.
1947 // Sets contains32BitType to true if the type contains a 32-bit data type.
1948 // Sets contains16BitType to true if the type contains a 16-bit data type.
1949 // N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
computeTypeXfbSize(const TType & type,bool & contains64BitType,bool & contains32BitType,bool & contains16BitType) const1950 unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
1951 {
1952     // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1953     // and the space taken in the buffer will be a multiple of 8.
1954     // ...within the qualified entity, subsequent components are each
1955     // assigned, in order, to the next available offset aligned to a multiple of
1956     // that component's size.  Aggregate types are flattened down to the component
1957     // level to get this sequence of components."
1958 
1959     if (type.isSizedArray()) {
1960         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1961         // Unsized array use to xfb should be a compile error.
1962         TType elementType(type, 0);
1963         return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
1964     }
1965 
1966     if (type.isStruct()) {
1967         unsigned int size = 0;
1968         bool structContains64BitType = false;
1969         bool structContains32BitType = false;
1970         bool structContains16BitType = false;
1971         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1972             TType memberType(type, member);
1973             // "... if applied to
1974             // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1975             // and the space taken in the buffer will be a multiple of 8."
1976             bool memberContains64BitType = false;
1977             bool memberContains32BitType = false;
1978             bool memberContains16BitType = false;
1979             int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
1980             if (memberContains64BitType) {
1981                 structContains64BitType = true;
1982                 RoundToPow2(size, 8);
1983             } else if (memberContains32BitType) {
1984                 structContains32BitType = true;
1985                 RoundToPow2(size, 4);
1986             } else if (memberContains16BitType) {
1987                 structContains16BitType = true;
1988                 RoundToPow2(size, 2);
1989             }
1990             size += memberSize;
1991         }
1992 
1993         if (structContains64BitType) {
1994             contains64BitType = true;
1995             RoundToPow2(size, 8);
1996         } else if (structContains32BitType) {
1997             contains32BitType = true;
1998             RoundToPow2(size, 4);
1999         } else if (structContains16BitType) {
2000             contains16BitType = true;
2001             RoundToPow2(size, 2);
2002         }
2003         return size;
2004     }
2005 
2006     int numComponents {0};
2007     if (type.isScalar())
2008         numComponents = 1;
2009     else if (type.isVector())
2010         numComponents = type.getVectorSize();
2011     else if (type.isMatrix())
2012         numComponents = type.getMatrixCols() * type.getMatrixRows();
2013     else {
2014         assert(0);
2015         numComponents = 1;
2016     }
2017 
2018     if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
2019         contains64BitType = true;
2020         return 8 * numComponents;
2021     } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
2022         contains16BitType = true;
2023         return 2 * numComponents;
2024     } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
2025         return numComponents;
2026     else {
2027         contains32BitType = true;
2028         return 4 * numComponents;
2029     }
2030 }
2031 
2032 const int baseAlignmentVec4Std140 = 16;
2033 
2034 // Return the size and alignment of a component of the given type.
2035 // The size is returned in the 'size' parameter
2036 // Return value is the alignment..
getBaseAlignmentScalar(const TType & type,int & size)2037 int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
2038 {
2039     switch (type.getBasicType()) {
2040     case EbtInt64:
2041     case EbtUint64:
2042     case EbtDouble:  size = 8; return 8;
2043     case EbtFloat16: size = 2; return 2;
2044     case EbtInt8:
2045     case EbtUint8:   size = 1; return 1;
2046     case EbtInt16:
2047     case EbtUint16:  size = 2; return 2;
2048     case EbtReference: size = 8; return 8;
2049     case EbtSampler:
2050     {
2051         if (type.isBindlessImage() || type.isBindlessTexture()) {
2052             size = 8; return 8;
2053         }
2054         else {
2055             size = 4; return 4;
2056         }
2057     }
2058     default:         size = 4; return 4;
2059     }
2060 }
2061 
2062 // Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
2063 // Operates recursively.
2064 //
2065 // If std140 is true, it does the rounding up to vec4 size required by std140,
2066 // otherwise it does not, yielding std430 rules.
2067 //
2068 // The size is returned in the 'size' parameter
2069 //
2070 // The stride is only non-0 for arrays or matrices, and is the stride of the
2071 // top-level object nested within the type.  E.g., for an array of matrices,
2072 // it is the distances needed between matrices, despite the rules saying the
2073 // stride comes from the flattening down to vectors.
2074 //
2075 // Return value is the alignment of the type.
getBaseAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)2076 int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
2077 {
2078     int alignment;
2079 
2080     bool std140 = layoutPacking == glslang::ElpStd140;
2081     // When using the std140 storage layout, structures will be laid out in buffer
2082     // storage with its members stored in monotonically increasing order based on their
2083     // location in the declaration. A structure and each structure member have a base
2084     // offset and a base alignment, from which an aligned offset is computed by rounding
2085     // the base offset up to a multiple of the base alignment. The base offset of the first
2086     // member of a structure is taken from the aligned offset of the structure itself. The
2087     // base offset of all other structure members is derived by taking the offset of the
2088     // last basic machine unit consumed by the previous member and adding one. Each
2089     // structure member is stored in memory at its aligned offset. The members of a top-
2090     // level uniform block are laid out in buffer storage by treating the uniform block as
2091     // a structure with a base offset of zero.
2092     //
2093     //   1. If the member is a scalar consuming N basic machine units, the base alignment is N.
2094     //
2095     //   2. If the member is a two- or four-component vector with components consuming N basic
2096     //      machine units, the base alignment is 2N or 4N, respectively.
2097     //
2098     //   3. If the member is a three-component vector with components consuming N
2099     //      basic machine units, the base alignment is 4N.
2100     //
2101     //   4. If the member is an array of scalars or vectors, the base alignment and array
2102     //      stride are set to match the base alignment of a single array element, according
2103     //      to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
2104     //      array may have padding at the end; the base offset of the member following
2105     //      the array is rounded up to the next multiple of the base alignment.
2106     //
2107     //   5. If the member is a column-major matrix with C columns and R rows, the
2108     //      matrix is stored identically to an array of C column vectors with R
2109     //      components each, according to rule (4).
2110     //
2111     //   6. If the member is an array of S column-major matrices with C columns and
2112     //      R rows, the matrix is stored identically to a row of S X C column vectors
2113     //      with R components each, according to rule (4).
2114     //
2115     //   7. If the member is a row-major matrix with C columns and R rows, the matrix
2116     //      is stored identically to an array of R row vectors with C components each,
2117     //      according to rule (4).
2118     //
2119     //   8. If the member is an array of S row-major matrices with C columns and R
2120     //      rows, the matrix is stored identically to a row of S X R row vectors with C
2121     //      components each, according to rule (4).
2122     //
2123     //   9. If the member is a structure, the base alignment of the structure is N , where
2124     //      N is the largest base alignment value of any    of its members, and rounded
2125     //      up to the base alignment of a vec4. The individual members of this substructure
2126     //      are then assigned offsets by applying this set of rules recursively,
2127     //      where the base offset of the first member of the sub-structure is equal to the
2128     //      aligned offset of the structure. The structure may have padding at the end;
2129     //      the base offset of the member following the sub-structure is rounded up to
2130     //      the next multiple of the base alignment of the structure.
2131     //
2132     //   10. If the member is an array of S structures, the S elements of the array are laid
2133     //       out in order, according to rule (9).
2134     //
2135     //   Assuming, for rule 10:  The stride is the same as the size of an element.
2136 
2137     stride = 0;
2138     int dummyStride;
2139 
2140     // rules 4, 6, 8, and 10
2141     if (type.isArray()) {
2142         // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
2143         TType derefType(type, 0);
2144         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
2145         if (std140)
2146             alignment = std::max(baseAlignmentVec4Std140, alignment);
2147         RoundToPow2(size, alignment);
2148         stride = size;  // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
2149                         // uses the assumption for rule 10 in the comment above
2150         // use one element to represent the last member of SSBO which is unsized array
2151         int arraySize = (type.isUnsizedArray() && (type.getOuterArraySize() == 0)) ? 1 : type.getOuterArraySize();
2152         size = stride * arraySize;
2153         return alignment;
2154     }
2155 
2156     // rule 9
2157     if (type.getBasicType() == EbtStruct || type.getBasicType() == EbtBlock) {
2158         const TTypeList& memberList = *type.getStruct();
2159 
2160         size = 0;
2161         int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
2162         for (size_t m = 0; m < memberList.size(); ++m) {
2163             int memberSize;
2164             // modify just the children's view of matrix layout, if there is one for this member
2165             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
2166             int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
2167                                                    (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
2168             maxAlignment = std::max(maxAlignment, memberAlignment);
2169             RoundToPow2(size, memberAlignment);
2170             size += memberSize;
2171         }
2172 
2173         // The structure may have padding at the end; the base offset of
2174         // the member following the sub-structure is rounded up to the next
2175         // multiple of the base alignment of the structure.
2176         RoundToPow2(size, maxAlignment);
2177 
2178         return maxAlignment;
2179     }
2180 
2181     // rule 1
2182     if (type.isScalar())
2183         return getBaseAlignmentScalar(type, size);
2184 
2185     // rules 2 and 3
2186     if (type.isVector()) {
2187         int scalarAlign = getBaseAlignmentScalar(type, size);
2188         switch (type.getVectorSize()) {
2189         case 1: // HLSL has this, GLSL does not
2190             return scalarAlign;
2191         case 2:
2192             size *= 2;
2193             return 2 * scalarAlign;
2194         default:
2195             size *= type.getVectorSize();
2196             return 4 * scalarAlign;
2197         }
2198     }
2199 
2200     // rules 5 and 7
2201     if (type.isMatrix()) {
2202         // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
2203         TType derefType(type, 0, rowMajor);
2204 
2205         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
2206         if (std140)
2207             alignment = std::max(baseAlignmentVec4Std140, alignment);
2208         RoundToPow2(size, alignment);
2209         stride = size;  // use intra-matrix stride for stride of a just a matrix
2210         if (rowMajor)
2211             size = stride * type.getMatrixRows();
2212         else
2213             size = stride * type.getMatrixCols();
2214 
2215         return alignment;
2216     }
2217 
2218     assert(0);  // all cases should be covered above
2219     size = baseAlignmentVec4Std140;
2220     return baseAlignmentVec4Std140;
2221 }
2222 
2223 // To aid the basic HLSL rule about crossing vec4 boundaries.
improperStraddle(const TType & type,int size,int offset,bool vectorLike)2224 bool TIntermediate::improperStraddle(const TType& type, int size, int offset, bool vectorLike)
2225 {
2226     if (! vectorLike || type.isArray())
2227         return false;
2228 
2229     return size <= 16 ? offset / 16 != (offset + size - 1) / 16
2230                       : offset % 16 != 0;
2231 }
2232 
getScalarAlignment(const TType & type,int & size,int & stride,bool rowMajor)2233 int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
2234 {
2235     int alignment;
2236 
2237     stride = 0;
2238     int dummyStride;
2239 
2240     if (type.isArray()) {
2241         TType derefType(type, 0);
2242         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
2243 
2244         stride = size;
2245         RoundToPow2(stride, alignment);
2246 
2247         size = stride * (type.getOuterArraySize() - 1) + size;
2248         return alignment;
2249     }
2250 
2251     if (type.getBasicType() == EbtStruct) {
2252         const TTypeList& memberList = *type.getStruct();
2253 
2254         size = 0;
2255         int maxAlignment = 0;
2256         for (size_t m = 0; m < memberList.size(); ++m) {
2257             int memberSize;
2258             // modify just the children's view of matrix layout, if there is one for this member
2259             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
2260             int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
2261                                                      (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
2262             maxAlignment = std::max(maxAlignment, memberAlignment);
2263             RoundToPow2(size, memberAlignment);
2264             size += memberSize;
2265         }
2266 
2267         return maxAlignment;
2268     }
2269 
2270     if (type.isScalar())
2271         return getBaseAlignmentScalar(type, size);
2272 
2273     if (type.isVector()) {
2274         int scalarAlign = getBaseAlignmentScalar(type, size);
2275 
2276         size *= type.getVectorSize();
2277         return scalarAlign;
2278     }
2279 
2280     if (type.isMatrix()) {
2281         TType derefType(type, 0, rowMajor);
2282 
2283         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
2284 
2285         stride = size;  // use intra-matrix stride for stride of a just a matrix
2286         if (rowMajor)
2287             size = stride * type.getMatrixRows();
2288         else
2289             size = stride * type.getMatrixCols();
2290 
2291         return alignment;
2292     }
2293 
2294     assert(0);  // all cases should be covered above
2295     size = 1;
2296     return 1;
2297 }
2298 
getMemberAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)2299 int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
2300 {
2301     if (layoutPacking == glslang::ElpScalar) {
2302         return getScalarAlignment(type, size, stride, rowMajor);
2303     } else {
2304         return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
2305     }
2306 }
2307 
2308 // shared calculation by getOffset and getOffsets
updateOffset(const TType & parentType,const TType & memberType,int & offset,int & memberSize)2309 void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
2310 {
2311     int dummyStride;
2312 
2313     // modify just the children's view of matrix layout, if there is one for this member
2314     TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
2315     int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
2316                                              parentType.getQualifier().layoutPacking,
2317                                              subMatrixLayout != ElmNone
2318                                                  ? subMatrixLayout == ElmRowMajor
2319                                                  : parentType.getQualifier().layoutMatrix == ElmRowMajor);
2320     RoundToPow2(offset, memberAlignment);
2321 }
2322 
2323 // Lookup or calculate the offset of a block member, using the recursively
2324 // defined block offset rules.
getOffset(const TType & type,int index)2325 int TIntermediate::getOffset(const TType& type, int index)
2326 {
2327     const TTypeList& memberList = *type.getStruct();
2328 
2329     // Don't calculate offset if one is present, it could be user supplied
2330     // and different than what would be calculated.  That is, this is faster,
2331     // but not just an optimization.
2332     if (memberList[index].type->getQualifier().hasOffset())
2333         return memberList[index].type->getQualifier().layoutOffset;
2334 
2335     int memberSize = 0;
2336     int offset = 0;
2337     for (int m = 0; m <= index; ++m) {
2338         updateOffset(type, *memberList[m].type, offset, memberSize);
2339 
2340         if (m < index)
2341             offset += memberSize;
2342     }
2343 
2344     return offset;
2345 }
2346 
2347 // Calculate the block data size.
2348 // Block arrayness is not taken into account, each element is backed by a separate buffer.
getBlockSize(const TType & blockType)2349 int TIntermediate::getBlockSize(const TType& blockType)
2350 {
2351     const TTypeList& memberList = *blockType.getStruct();
2352     int lastIndex = (int)memberList.size() - 1;
2353     int lastOffset = getOffset(blockType, lastIndex);
2354 
2355     int lastMemberSize;
2356     int dummyStride;
2357     getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
2358                        blockType.getQualifier().layoutPacking,
2359                        blockType.getQualifier().layoutMatrix == ElmRowMajor);
2360 
2361     return lastOffset + lastMemberSize;
2362 }
2363 
computeBufferReferenceTypeSize(const TType & type)2364 int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
2365 {
2366     assert(type.isReference());
2367     int size = getBlockSize(*type.getReferentType());
2368 
2369     int align = type.getBufferReferenceAlignment();
2370 
2371     if (align) {
2372         size = (size + align - 1) & ~(align-1);
2373     }
2374 
2375     return size;
2376 }
2377 
isIoResizeArray(const TType & type,EShLanguage language)2378 bool TIntermediate::isIoResizeArray(const TType& type, EShLanguage language) {
2379     return type.isArray() &&
2380             ((language == EShLangGeometry    && type.getQualifier().storage == EvqVaryingIn) ||
2381             (language == EShLangTessControl && (type.getQualifier().storage == EvqVaryingIn || type.getQualifier().storage == EvqVaryingOut) &&
2382                 ! type.getQualifier().patch) ||
2383             (language == EShLangTessEvaluation && type.getQualifier().storage == EvqVaryingIn) ||
2384             (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn &&
2385              (type.getQualifier().pervertexNV || type.getQualifier().pervertexEXT)) ||
2386             (language == EShLangMesh && type.getQualifier().storage == EvqVaryingOut &&
2387                 !type.getQualifier().perTaskNV));
2388 }
2389 
2390 } // end namespace glslang
2391