1 //
2 // Copyright (C) 2013-2016 LunarG, Inc.
3 //
4 // All rights reserved.
5 //
6 // Redistribution and use in source and binary forms, with or without
7 // modification, are permitted provided that the following conditions
8 // are met:
9 //
10 // Redistributions of source code must retain the above copyright
11 // notice, this list of conditions and the following disclaimer.
12 //
13 // Redistributions in binary form must reproduce the above
14 // copyright notice, this list of conditions and the following
15 // disclaimer in the documentation and/or other materials provided
16 // with the distribution.
17 //
18 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
19 // contributors may be used to endorse or promote products derived
20 // from this software without specific prior written permission.
21 //
22 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 // POSSIBILITY OF SUCH DAMAGE.
34 //
35
36 #include "../Include/Common.h"
37 #include "reflection.h"
38 #include "LiveTraverser.h"
39 #include "localintermediate.h"
40
41 #include "gl_types.h"
42
43 //
44 // Grow the reflection database through a friend traverser class of TReflection and a
45 // collection of functions to do a liveness traversal that note what uniforms are used
46 // in semantically non-dead code.
47 //
48 // Can be used multiple times, once per stage, to grow a program reflection.
49 //
50 // High-level algorithm for one stage:
51 //
52 // 1. Put the entry point on the list of live functions.
53 //
54 // 2. Traverse any live function, while skipping if-tests with a compile-time constant
55 // condition of false, and while adding any encountered function calls to the live
56 // function list.
57 //
58 // Repeat until the live function list is empty.
59 //
60 // 3. Add any encountered uniform variables and blocks to the reflection database.
61 //
62 // Can be attempted with a failed link, but will return false if recursion had been detected, or
63 // there wasn't exactly one entry point.
64 //
65
66 namespace glslang {
67
68 //
69 // The traverser: mostly pass through, except
70 // - processing binary nodes to see if they are dereferences of an aggregates to track
71 // - processing symbol nodes to see if they are non-aggregate objects to track
72 //
73 // This ignores semantically dead code by using TLiveTraverser.
74 //
75 // This is in the glslang namespace directly so it can be a friend of TReflection.
76 //
77
78 class TReflectionTraverser : public TIntermTraverser {
79 public:
TReflectionTraverser(const TIntermediate & i,TReflection & r)80 TReflectionTraverser(const TIntermediate& i, TReflection& r) :
81 TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { }
82
83 virtual bool visitBinary(TVisit, TIntermBinary* node);
84 virtual void visitSymbol(TIntermSymbol* base);
85
86 // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
87 // However, no dereference doesn't mean simple... it could be a complex aggregate.
addUniform(const TIntermSymbol & base)88 void addUniform(const TIntermSymbol& base)
89 {
90 if (processedDerefs.find(&base) == processedDerefs.end()) {
91 processedDerefs.insert(&base);
92
93 int blockIndex = -1;
94 int offset = -1;
95 TList<TIntermBinary*> derefs;
96 TString baseName = base.getName();
97
98 if (base.getType().getBasicType() == EbtBlock) {
99 offset = 0;
100 bool anonymous = IsAnonymous(baseName);
101 const TString& blockName = base.getType().getTypeName();
102
103 if (!anonymous)
104 baseName = blockName;
105 else
106 baseName = "";
107
108 blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
109 }
110
111 // Use a degenerate (empty) set of dereferences to immediately put as at the end of
112 // the dereference change expected by blowUpActiveAggregate.
113 blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, -1, 0,
114 base.getQualifier().storage, updateStageMasks);
115 }
116 }
117
addPipeIOVariable(const TIntermSymbol & base)118 void addPipeIOVariable(const TIntermSymbol& base)
119 {
120 if (processedDerefs.find(&base) == processedDerefs.end()) {
121 processedDerefs.insert(&base);
122
123 const TString &name = base.getName();
124 const TType &type = base.getType();
125 const bool input = base.getQualifier().isPipeInput();
126
127 TReflection::TMapIndexToReflection &ioItems =
128 input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
129
130
131 TReflection::TNameToIndex &ioMapper =
132 input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex;
133
134 if (reflection.options & EShReflectionUnwrapIOBlocks) {
135 bool anonymous = IsAnonymous(name);
136
137 TString baseName;
138 if (type.getBasicType() == EbtBlock) {
139 baseName = anonymous ? TString() : type.getTypeName();
140 } else {
141 baseName = anonymous ? TString() : name;
142 }
143
144 // by convention if this is an arrayed block we ignore the array in the reflection
145 if (type.isArray() && type.getBasicType() == EbtBlock) {
146 blowUpIOAggregate(input, baseName, TType(type, 0));
147 } else {
148 blowUpIOAggregate(input, baseName, type);
149 }
150 } else {
151 TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str());
152 if (it == ioMapper.end()) {
153 // seperate pipe i/o params from uniforms and blocks
154 // in is only for input in first stage as out is only for last stage. check traverse in call stack.
155 ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
156 ioItems.push_back(
157 TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
158 EShLanguageMask& stages = ioItems.back().stages;
159 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
160 } else {
161 EShLanguageMask& stages = ioItems[it->second].stages;
162 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
163 }
164 }
165 }
166 }
167
168 // Lookup or calculate the offset of all block members at once, using the recursively
169 // defined block offset rules.
getOffsets(const TType & type,TVector<int> & offsets)170 void getOffsets(const TType& type, TVector<int>& offsets)
171 {
172 const TTypeList& memberList = *type.getStruct();
173 int memberSize = 0;
174 int offset = 0;
175
176 for (size_t m = 0; m < offsets.size(); ++m) {
177 // if the user supplied an offset, snap to it now
178 if (memberList[m].type->getQualifier().hasOffset())
179 offset = memberList[m].type->getQualifier().layoutOffset;
180
181 // calculate the offset of the next member and align the current offset to this member
182 intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
183
184 // save the offset of this member
185 offsets[m] = offset;
186
187 // update for the next member
188 offset += memberSize;
189 }
190 }
191
192 // Calculate the stride of an array type
getArrayStride(const TType & baseType,const TType & type)193 int getArrayStride(const TType& baseType, const TType& type)
194 {
195 int dummySize;
196 int stride;
197
198 // consider blocks to have 0 stride, so that all offsets are relative to the start of their block
199 if (type.getBasicType() == EbtBlock)
200 return 0;
201
202 TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
203 intermediate.getMemberAlignment(type, dummySize, stride,
204 baseType.getQualifier().layoutPacking,
205 subMatrixLayout != ElmNone
206 ? subMatrixLayout == ElmRowMajor
207 : baseType.getQualifier().layoutMatrix == ElmRowMajor);
208
209 return stride;
210 }
211
212 // count the total number of leaf members from iterating out of a block type
countAggregateMembers(const TType & parentType)213 int countAggregateMembers(const TType& parentType)
214 {
215 if (! parentType.isStruct())
216 return 1;
217
218 const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
219
220 bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
221
222 const TTypeList &memberList = *parentType.getStruct();
223
224 int ret = 0;
225
226 for (size_t i = 0; i < memberList.size(); i++)
227 {
228 const TType &memberType = *memberList[i].type;
229 int numMembers = countAggregateMembers(memberType);
230 // for sized arrays of structs, apply logic to expand out the same as we would below in
231 // blowUpActiveAggregate
232 if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
233 if (! strictArraySuffix || ! blockParent)
234 numMembers *= memberType.getArraySizes()->getCumulativeSize();
235 }
236 ret += numMembers;
237 }
238
239 return ret;
240 }
241
242 // Traverse the provided deref chain, including the base, and
243 // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
244 // - recursively expand any variable array index in the middle of that traversal
245 // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
246 //
247 // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
248 // A value of 0 for arraySize will mean to use the full array's size.
blowUpActiveAggregate(const TType & baseType,const TString & baseName,const TList<TIntermBinary * > & derefs,TList<TIntermBinary * >::const_iterator deref,int offset,int blockIndex,int arraySize,int topLevelArraySize,int topLevelArrayStride,TStorageQualifier baseStorage,bool active)249 void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
250 TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
251 int topLevelArraySize, int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
252 {
253 // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
254 // Broadly:
255 // * arrays-of-structs always have a [x] suffix.
256 // * with array-of-struct variables in the root of a buffer block, only ever return [0].
257 // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
258 const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
259
260 // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
261 bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
262
263 // process the part of the dereference chain that was explicit in the shader
264 TString name = baseName;
265 const TType* terminalType = &baseType;
266 for (; deref != derefs.end(); ++deref) {
267 TIntermBinary* visitNode = *deref;
268 terminalType = &visitNode->getType();
269 int index;
270 switch (visitNode->getOp()) {
271 case EOpIndexIndirect: {
272 int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
273
274 if (topLevelArrayStride == 0)
275 topLevelArrayStride = stride;
276
277 // Visit all the indices of this array, and for each one add on the remaining dereferencing
278 for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
279 TString newBaseName = name;
280 if (terminalType->getBasicType() == EbtBlock) {}
281 else if (strictArraySuffix && blockParent)
282 newBaseName.append(TString("[0]"));
283 else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
284 newBaseName.append(TString("[") + String(i) + "]");
285 TList<TIntermBinary*>::const_iterator nextDeref = deref;
286 ++nextDeref;
287 blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
288 topLevelArraySize, topLevelArrayStride, baseStorage, active);
289
290 if (offset >= 0)
291 offset += stride;
292 }
293
294 // it was all completed in the recursive calls above
295 return;
296 }
297 case EOpIndexDirect: {
298 int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
299
300 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
301 if (terminalType->getBasicType() == EbtBlock) {}
302 else if (strictArraySuffix && blockParent)
303 name.append(TString("[0]"));
304 else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
305 name.append(TString("[") + String(index) + "]");
306
307 if (offset >= 0)
308 offset += stride * index;
309 }
310
311 if (topLevelArrayStride == 0)
312 topLevelArrayStride = stride;
313
314 // expand top-level arrays in blocks with [0] suffix
315 if (topLevelArrayStride != 0 && visitNode->getLeft()->getType().isArray()) {
316 blockParent = false;
317 }
318 break;
319 }
320 case EOpIndexDirectStruct:
321 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
322 if (offset >= 0)
323 offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
324 if (name.size() > 0)
325 name.append(".");
326 name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
327
328 // expand non top-level arrays with [x] suffix
329 if (visitNode->getLeft()->getType().getBasicType() != EbtBlock && terminalType->isArray())
330 {
331 blockParent = false;
332 }
333 break;
334 default:
335 break;
336 }
337 }
338
339 // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
340 if (! isReflectionGranularity(*terminalType)) {
341 // the base offset of this node, that children are relative to
342 int baseOffset = offset;
343
344 if (terminalType->isArray()) {
345 // Visit all the indices of this array, and for each one,
346 // fully explode the remaining aggregate to dereference
347
348 int stride = 0;
349 if (offset >= 0)
350 stride = getArrayStride(baseType, *terminalType);
351
352 int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
353
354 // for top-level arrays in blocks, only expand [0] to avoid explosion of items
355 if ((strictArraySuffix && blockParent) ||
356 ((topLevelArraySize == arrayIterateSize) && (topLevelArrayStride == 0))) {
357 arrayIterateSize = 1;
358 }
359
360 if (topLevelArrayStride == 0)
361 topLevelArrayStride = stride;
362
363 for (int i = 0; i < arrayIterateSize; ++i) {
364 TString newBaseName = name;
365 if (terminalType->getBasicType() != EbtBlock)
366 newBaseName.append(TString("[") + String(i) + "]");
367 TType derefType(*terminalType, 0);
368 if (offset >= 0)
369 offset = baseOffset + stride * i;
370
371 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
372 topLevelArraySize, topLevelArrayStride, baseStorage, active);
373 }
374 } else {
375 // Visit all members of this aggregate, and for each one,
376 // fully explode the remaining aggregate to dereference
377 const TTypeList& typeList = *terminalType->getStruct();
378
379 TVector<int> memberOffsets;
380
381 if (baseOffset >= 0) {
382 memberOffsets.resize(typeList.size());
383 getOffsets(*terminalType, memberOffsets);
384 }
385
386 for (int i = 0; i < (int)typeList.size(); ++i) {
387 TString newBaseName = name;
388 if (newBaseName.size() > 0)
389 newBaseName.append(".");
390 newBaseName.append(typeList[i].type->getFieldName());
391 TType derefType(*terminalType, i);
392 if (offset >= 0)
393 offset = baseOffset + memberOffsets[i];
394
395 int arrayStride = topLevelArrayStride;
396 if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
397 derefType.isArray()) {
398 arrayStride = getArrayStride(baseType, derefType);
399 }
400
401 if (topLevelArraySize == -1 && arrayStride == 0 && blockParent)
402 topLevelArraySize = 1;
403
404 if (strictArraySuffix && blockParent) {
405 // if this member is an array, store the top-level array stride but start the explosion from
406 // the inner struct type.
407 if (derefType.isArray() && derefType.isStruct()) {
408 newBaseName.append("[0]");
409 auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
410 blowUpActiveAggregate(TType(derefType, 0), newBaseName, derefs, derefs.end(), memberOffsets[i],
411 blockIndex, 0, dimSize, arrayStride, terminalType->getQualifier().storage, false);
412 }
413 else if (derefType.isArray()) {
414 auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
415 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
416 0, dimSize, 0, terminalType->getQualifier().storage, false);
417 }
418 else {
419 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
420 0, 1, 0, terminalType->getQualifier().storage, false);
421 }
422 } else {
423 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
424 topLevelArraySize, arrayStride, baseStorage, active);
425 }
426 }
427 }
428
429 // it was all completed in the recursive calls above
430 return;
431 }
432
433 if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
434 name.append(TString("[0]"));
435 }
436
437 // Finally, add a full string to the reflection database, and update the array size if necessary.
438 // If the dereferenced entity to record is an array, compute the size and update the maximum size.
439
440 // there might not be a final array dereference, it could have been copied as an array object
441 if (arraySize == 0)
442 arraySize = mapToGlArraySize(*terminalType);
443
444 TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
445
446 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
447 if (it == reflection.nameToIndex.end()) {
448 int uniformIndex = (int)variables.size();
449 reflection.nameToIndex[name.c_str()] = uniformIndex;
450 variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
451 arraySize, blockIndex));
452 if (terminalType->isArray()) {
453 variables.back().arrayStride = getArrayStride(baseType, *terminalType);
454 if (topLevelArrayStride == 0)
455 topLevelArrayStride = variables.back().arrayStride;
456 }
457
458 if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
459 reflection.atomicCounterUniformIndices.push_back(uniformIndex);
460
461 variables.back().topLevelArraySize = topLevelArraySize;
462 variables.back().topLevelArrayStride = topLevelArrayStride;
463
464 if ((reflection.options & EShReflectionAllBlockVariables) && active) {
465 EShLanguageMask& stages = variables.back().stages;
466 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
467 }
468 } else {
469 if (arraySize > 1) {
470 int& reflectedArraySize = variables[it->second].size;
471 reflectedArraySize = std::max(arraySize, reflectedArraySize);
472 }
473
474 if ((reflection.options & EShReflectionAllBlockVariables) && active) {
475 EShLanguageMask& stages = variables[it->second].stages;
476 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
477 }
478 }
479 }
480
481 // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
blowUpIOAggregate(bool input,const TString & baseName,const TType & type)482 void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
483 {
484 TString name = baseName;
485
486 // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
487 if (! isReflectionGranularity(type)) {
488 if (type.isArray()) {
489 // Visit all the indices of this array, and for each one,
490 // fully explode the remaining aggregate to dereference
491 for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
492 TString newBaseName = name;
493 newBaseName.append(TString("[") + String(i) + "]");
494 TType derefType(type, 0);
495
496 blowUpIOAggregate(input, newBaseName, derefType);
497 }
498 } else {
499 // Visit all members of this aggregate, and for each one,
500 // fully explode the remaining aggregate to dereference
501 const TTypeList& typeList = *type.getStruct();
502
503 for (int i = 0; i < (int)typeList.size(); ++i) {
504 TString newBaseName = name;
505 if (newBaseName.size() > 0)
506 newBaseName.append(".");
507 newBaseName.append(typeList[i].type->getFieldName());
508 TType derefType(type, i);
509
510 blowUpIOAggregate(input, newBaseName, derefType);
511 }
512 }
513
514 // it was all completed in the recursive calls above
515 return;
516 }
517
518 if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
519 name.append(TString("[0]"));
520 }
521
522 TReflection::TMapIndexToReflection &ioItems =
523 input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
524
525 std::string namespacedName = input ? "in " : "out ";
526 namespacedName += name.c_str();
527
528 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
529 if (it == reflection.nameToIndex.end()) {
530 reflection.nameToIndex[namespacedName] = (int)ioItems.size();
531 ioItems.push_back(
532 TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
533
534 EShLanguageMask& stages = ioItems.back().stages;
535 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
536 } else {
537 EShLanguageMask& stages = ioItems[it->second].stages;
538 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
539 }
540 }
541
542 // Add a uniform dereference where blocks/struct/arrays are involved in the access.
543 // Handles the situation where the left node is at the correct or too coarse a
544 // granularity for reflection. (That is, further dereferences up the tree will be
545 // skipped.) Earlier dereferences, down the tree, will be handled
546 // at the same time, and logged to prevent reprocessing as the tree is traversed.
547 //
548 // Note: Other things like the following must be caught elsewhere:
549 // - a simple non-array, non-struct variable (no dereference even conceivable)
550 // - an aggregrate consumed en masse, without a dereference
551 //
552 // So, this code is for cases like
553 // - a struct/block dereferencing a member (whether the member is array or not)
554 // - an array of struct
555 // - structs/arrays containing the above
556 //
addDereferencedUniform(TIntermBinary * topNode)557 void addDereferencedUniform(TIntermBinary* topNode)
558 {
559 // See if too fine-grained to process (wait to get further down the tree)
560 const TType& leftType = topNode->getLeft()->getType();
561 if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
562 return;
563
564 // We have an array or structure or block dereference, see if it's a uniform
565 // based dereference (if not, skip it).
566 TIntermSymbol* base = findBase(topNode);
567 if (! base || ! base->getQualifier().isUniformOrBuffer())
568 return;
569
570 // See if we've already processed this (e.g., in the middle of something
571 // we did earlier), and if so skip it
572 if (processedDerefs.find(topNode) != processedDerefs.end())
573 return;
574
575 // Process this uniform dereference
576
577 int offset = -1;
578 int blockIndex = -1;
579 bool anonymous = false;
580
581 // See if we need to record the block itself
582 bool block = base->getBasicType() == EbtBlock;
583 if (block) {
584 offset = 0;
585 anonymous = IsAnonymous(base->getName());
586
587 const TString& blockName = base->getType().getTypeName();
588 TString baseName;
589
590 if (! anonymous)
591 baseName = blockName;
592
593 blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
594
595 if (reflection.options & EShReflectionAllBlockVariables) {
596 // Use a degenerate (empty) set of dereferences to immediately put as at the end of
597 // the dereference change expected by blowUpActiveAggregate.
598 TList<TIntermBinary*> derefs;
599
600 // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
601 // expanding root arrays anyway, just start the iteration from the base block type.
602 blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, -1, 0,
603 base->getQualifier().storage, false);
604 }
605 }
606
607 // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
608 // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
609 TList<TIntermBinary*> derefs;
610 for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
611 if (isReflectionGranularity(visitNode->getLeft()->getType()))
612 continue;
613
614 derefs.push_front(visitNode);
615 processedDerefs.insert(visitNode);
616 }
617 processedDerefs.insert(base);
618
619 // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
620 int arraySize = 0;
621 if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
622 if (topNode->getOp() == EOpIndexDirect)
623 arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
624 }
625
626 // Put the dereference chain together, forward
627 TString baseName;
628 if (! anonymous) {
629 if (block)
630 baseName = base->getType().getTypeName();
631 else
632 baseName = base->getName();
633 }
634 blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, -1, 0,
635 base->getQualifier().storage, true);
636 }
637
addBlockName(const TString & name,const TType & type,int size)638 int addBlockName(const TString& name, const TType& type, int size)
639 {
640 int blockIndex = 0;
641 if (type.isArray()) {
642 TType derefType(type, 0);
643 for (int e = 0; e < type.getOuterArraySize(); ++e) {
644 int memberBlockIndex = addBlockName(name + "[" + String(e) + "]", derefType, size);
645 if (e == 0)
646 blockIndex = memberBlockIndex;
647 }
648 } else {
649 TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
650
651 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
652 if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
653 blockIndex = (int)blocks.size();
654 reflection.nameToIndex[name.c_str()] = blockIndex;
655 blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, blockIndex));
656
657 blocks.back().numMembers = countAggregateMembers(type);
658
659 if (updateStageMasks) {
660 EShLanguageMask& stages = blocks.back().stages;
661 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
662 }
663 }
664 else {
665 blockIndex = it->second;
666 if (updateStageMasks) {
667 EShLanguageMask& stages = blocks[blockIndex].stages;
668 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
669 }
670 }
671 }
672
673 return blockIndex;
674 }
675
676 // Are we at a level in a dereference chain at which individual active uniform queries are made?
isReflectionGranularity(const TType & type)677 bool isReflectionGranularity(const TType& type)
678 {
679 return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
680 }
681
682 // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
683 // Return nullptr if the topology does not fit this situation.
findBase(const TIntermBinary * node)684 TIntermSymbol* findBase(const TIntermBinary* node)
685 {
686 TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
687 if (base)
688 return base;
689 TIntermBinary* left = node->getLeft()->getAsBinaryNode();
690 if (! left)
691 return nullptr;
692
693 return findBase(left);
694 }
695
696 //
697 // Translate a glslang sampler type into the GL API #define number.
698 //
mapSamplerToGlType(TSampler sampler)699 int mapSamplerToGlType(TSampler sampler)
700 {
701 if (! sampler.image) {
702 // a sampler...
703 switch (sampler.type) {
704 case EbtFloat:
705 switch ((int)sampler.dim) {
706 case Esd1D:
707 switch ((int)sampler.shadow) {
708 case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
709 case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
710 }
711 case Esd2D:
712 switch ((int)sampler.ms) {
713 case false:
714 switch ((int)sampler.shadow) {
715 case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
716 case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
717 }
718 case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
719 }
720 case Esd3D:
721 return GL_SAMPLER_3D;
722 case EsdCube:
723 switch ((int)sampler.shadow) {
724 case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
725 case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
726 }
727 case EsdRect:
728 return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
729 case EsdBuffer:
730 return GL_SAMPLER_BUFFER;
731 }
732 case EbtFloat16:
733 switch ((int)sampler.dim) {
734 case Esd1D:
735 switch ((int)sampler.shadow) {
736 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD;
737 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD;
738 }
739 case Esd2D:
740 switch ((int)sampler.ms) {
741 case false:
742 switch ((int)sampler.shadow) {
743 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD;
744 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD;
745 }
746 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD;
747 }
748 case Esd3D:
749 return GL_FLOAT16_SAMPLER_3D_AMD;
750 case EsdCube:
751 switch ((int)sampler.shadow) {
752 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD;
753 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD;
754 }
755 case EsdRect:
756 return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD;
757 case EsdBuffer:
758 return GL_FLOAT16_SAMPLER_BUFFER_AMD;
759 }
760 case EbtInt:
761 switch ((int)sampler.dim) {
762 case Esd1D:
763 return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
764 case Esd2D:
765 switch ((int)sampler.ms) {
766 case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
767 case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
768 : GL_INT_SAMPLER_2D_MULTISAMPLE;
769 }
770 case Esd3D:
771 return GL_INT_SAMPLER_3D;
772 case EsdCube:
773 return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
774 case EsdRect:
775 return GL_INT_SAMPLER_2D_RECT;
776 case EsdBuffer:
777 return GL_INT_SAMPLER_BUFFER;
778 }
779 case EbtUint:
780 switch ((int)sampler.dim) {
781 case Esd1D:
782 return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
783 case Esd2D:
784 switch ((int)sampler.ms) {
785 case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
786 case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
787 : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
788 }
789 case Esd3D:
790 return GL_UNSIGNED_INT_SAMPLER_3D;
791 case EsdCube:
792 return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
793 case EsdRect:
794 return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
795 case EsdBuffer:
796 return GL_UNSIGNED_INT_SAMPLER_BUFFER;
797 }
798 default:
799 return 0;
800 }
801 } else {
802 // an image...
803 switch (sampler.type) {
804 case EbtFloat:
805 switch ((int)sampler.dim) {
806 case Esd1D:
807 return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
808 case Esd2D:
809 switch ((int)sampler.ms) {
810 case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
811 case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
812 }
813 case Esd3D:
814 return GL_IMAGE_3D;
815 case EsdCube:
816 return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
817 case EsdRect:
818 return GL_IMAGE_2D_RECT;
819 case EsdBuffer:
820 return GL_IMAGE_BUFFER;
821 }
822 case EbtFloat16:
823 switch ((int)sampler.dim) {
824 case Esd1D:
825 return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD;
826 case Esd2D:
827 switch ((int)sampler.ms) {
828 case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD;
829 case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD;
830 }
831 case Esd3D:
832 return GL_FLOAT16_IMAGE_3D_AMD;
833 case EsdCube:
834 return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD;
835 case EsdRect:
836 return GL_FLOAT16_IMAGE_2D_RECT_AMD;
837 case EsdBuffer:
838 return GL_FLOAT16_IMAGE_BUFFER_AMD;
839 }
840 case EbtInt:
841 switch ((int)sampler.dim) {
842 case Esd1D:
843 return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
844 case Esd2D:
845 switch ((int)sampler.ms) {
846 case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
847 case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
848 }
849 case Esd3D:
850 return GL_INT_IMAGE_3D;
851 case EsdCube:
852 return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
853 case EsdRect:
854 return GL_INT_IMAGE_2D_RECT;
855 case EsdBuffer:
856 return GL_INT_IMAGE_BUFFER;
857 }
858 case EbtUint:
859 switch ((int)sampler.dim) {
860 case Esd1D:
861 return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
862 case Esd2D:
863 switch ((int)sampler.ms) {
864 case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
865 case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
866 : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
867 }
868 case Esd3D:
869 return GL_UNSIGNED_INT_IMAGE_3D;
870 case EsdCube:
871 return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
872 case EsdRect:
873 return GL_UNSIGNED_INT_IMAGE_2D_RECT;
874 case EsdBuffer:
875 return GL_UNSIGNED_INT_IMAGE_BUFFER;
876 }
877 default:
878 return 0;
879 }
880 }
881 }
882
883 //
884 // Translate a glslang type into the GL API #define number.
885 // Ignores arrayness.
886 //
mapToGlType(const TType & type)887 int mapToGlType(const TType& type)
888 {
889 switch (type.getBasicType()) {
890 case EbtSampler:
891 return mapSamplerToGlType(type.getSampler());
892 case EbtStruct:
893 case EbtBlock:
894 case EbtVoid:
895 return 0;
896 default:
897 break;
898 }
899
900 if (type.isVector()) {
901 int offset = type.getVectorSize() - 2;
902 switch (type.getBasicType()) {
903 case EbtFloat: return GL_FLOAT_VEC2 + offset;
904 case EbtDouble: return GL_DOUBLE_VEC2 + offset;
905 case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
906 case EbtInt: return GL_INT_VEC2 + offset;
907 case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
908 case EbtInt64: return GL_INT64_VEC2_ARB + offset;
909 case EbtUint64: return GL_UNSIGNED_INT64_VEC2_ARB + offset;
910 case EbtBool: return GL_BOOL_VEC2 + offset;
911 case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
912 default: return 0;
913 }
914 }
915 if (type.isMatrix()) {
916 switch (type.getBasicType()) {
917 case EbtFloat:
918 switch (type.getMatrixCols()) {
919 case 2:
920 switch (type.getMatrixRows()) {
921 case 2: return GL_FLOAT_MAT2;
922 case 3: return GL_FLOAT_MAT2x3;
923 case 4: return GL_FLOAT_MAT2x4;
924 default: return 0;
925 }
926 case 3:
927 switch (type.getMatrixRows()) {
928 case 2: return GL_FLOAT_MAT3x2;
929 case 3: return GL_FLOAT_MAT3;
930 case 4: return GL_FLOAT_MAT3x4;
931 default: return 0;
932 }
933 case 4:
934 switch (type.getMatrixRows()) {
935 case 2: return GL_FLOAT_MAT4x2;
936 case 3: return GL_FLOAT_MAT4x3;
937 case 4: return GL_FLOAT_MAT4;
938 default: return 0;
939 }
940 }
941 case EbtDouble:
942 switch (type.getMatrixCols()) {
943 case 2:
944 switch (type.getMatrixRows()) {
945 case 2: return GL_DOUBLE_MAT2;
946 case 3: return GL_DOUBLE_MAT2x3;
947 case 4: return GL_DOUBLE_MAT2x4;
948 default: return 0;
949 }
950 case 3:
951 switch (type.getMatrixRows()) {
952 case 2: return GL_DOUBLE_MAT3x2;
953 case 3: return GL_DOUBLE_MAT3;
954 case 4: return GL_DOUBLE_MAT3x4;
955 default: return 0;
956 }
957 case 4:
958 switch (type.getMatrixRows()) {
959 case 2: return GL_DOUBLE_MAT4x2;
960 case 3: return GL_DOUBLE_MAT4x3;
961 case 4: return GL_DOUBLE_MAT4;
962 default: return 0;
963 }
964 }
965 case EbtFloat16:
966 switch (type.getMatrixCols()) {
967 case 2:
968 switch (type.getMatrixRows()) {
969 case 2: return GL_FLOAT16_MAT2_AMD;
970 case 3: return GL_FLOAT16_MAT2x3_AMD;
971 case 4: return GL_FLOAT16_MAT2x4_AMD;
972 default: return 0;
973 }
974 case 3:
975 switch (type.getMatrixRows()) {
976 case 2: return GL_FLOAT16_MAT3x2_AMD;
977 case 3: return GL_FLOAT16_MAT3_AMD;
978 case 4: return GL_FLOAT16_MAT3x4_AMD;
979 default: return 0;
980 }
981 case 4:
982 switch (type.getMatrixRows()) {
983 case 2: return GL_FLOAT16_MAT4x2_AMD;
984 case 3: return GL_FLOAT16_MAT4x3_AMD;
985 case 4: return GL_FLOAT16_MAT4_AMD;
986 default: return 0;
987 }
988 }
989 default:
990 return 0;
991 }
992 }
993 if (type.getVectorSize() == 1) {
994 switch (type.getBasicType()) {
995 case EbtFloat: return GL_FLOAT;
996 case EbtDouble: return GL_DOUBLE;
997 case EbtFloat16: return GL_FLOAT16_NV;
998 case EbtInt: return GL_INT;
999 case EbtUint: return GL_UNSIGNED_INT;
1000 case EbtInt64: return GL_INT64_ARB;
1001 case EbtUint64: return GL_UNSIGNED_INT64_ARB;
1002 case EbtBool: return GL_BOOL;
1003 case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
1004 default: return 0;
1005 }
1006 }
1007
1008 return 0;
1009 }
1010
mapToGlArraySize(const TType & type)1011 int mapToGlArraySize(const TType& type)
1012 {
1013 return type.isArray() ? type.getOuterArraySize() : 1;
1014 }
1015
1016 const TIntermediate& intermediate;
1017 TReflection& reflection;
1018 std::set<const TIntermNode*> processedDerefs;
1019 bool updateStageMasks;
1020
1021 protected:
1022 TReflectionTraverser(TReflectionTraverser&);
1023 TReflectionTraverser& operator=(TReflectionTraverser&);
1024 };
1025
1026 //
1027 // Implement the traversal functions of interest.
1028 //
1029
1030 // To catch dereferenced aggregates that must be reflected.
1031 // This catches them at the highest level possible in the tree.
visitBinary(TVisit,TIntermBinary * node)1032 bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
1033 {
1034 switch (node->getOp()) {
1035 case EOpIndexDirect:
1036 case EOpIndexIndirect:
1037 case EOpIndexDirectStruct:
1038 addDereferencedUniform(node);
1039 break;
1040 default:
1041 break;
1042 }
1043
1044 // still need to visit everything below, which could contain sub-expressions
1045 // containing different uniforms
1046 return true;
1047 }
1048
1049 // To reflect non-dereferenced objects.
visitSymbol(TIntermSymbol * base)1050 void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
1051 {
1052 if (base->getQualifier().storage == EvqUniform) {
1053 if (base->getBasicType() == EbtBlock) {
1054 if (reflection.options & EShReflectionSharedStd140UBO) {
1055 addUniform(*base);
1056 }
1057 } else {
1058 addUniform(*base);
1059 }
1060 }
1061
1062 // #TODO add std140/layout active rules for ssbo, same with ubo.
1063 // Storage buffer blocks will be collected and expanding in this part.
1064 if((reflection.options & EShReflectionSharedStd140SSBO) &&
1065 (base->getQualifier().storage == EvqBuffer && base->getBasicType() == EbtBlock &&
1066 (base->getQualifier().layoutPacking == ElpStd140 || base->getQualifier().layoutPacking == ElpShared)))
1067 addUniform(*base);
1068
1069 if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
1070 (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
1071 addPipeIOVariable(*base);
1072 }
1073
1074 //
1075 // Implement TObjectReflection methods.
1076 //
1077
TObjectReflection(const std::string & pName,const TType & pType,int pOffset,int pGLDefineType,int pSize,int pIndex)1078 TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
1079 int pSize, int pIndex)
1080 : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
1081 numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
1082 {
1083 }
1084
getBinding() const1085 int TObjectReflection::getBinding() const
1086 {
1087 if (type == nullptr || !type->getQualifier().hasBinding())
1088 return -1;
1089 return type->getQualifier().layoutBinding;
1090 }
1091
dump() const1092 void TObjectReflection::dump() const
1093 {
1094 printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
1095 index, getBinding(), stages);
1096
1097 if (counterIndex != -1)
1098 printf(", counter %d", counterIndex);
1099
1100 if (numMembers != -1)
1101 printf(", numMembers %d", numMembers);
1102
1103 if (arrayStride != 0)
1104 printf(", arrayStride %d", arrayStride);
1105
1106 if (topLevelArrayStride != 0)
1107 printf(", topLevelArrayStride %d", topLevelArrayStride);
1108
1109 printf("\n");
1110 }
1111
1112 //
1113 // Implement TReflection methods.
1114 //
1115
1116 // Track any required attribute reflection, such as compute shader numthreads.
1117 //
buildAttributeReflection(EShLanguage stage,const TIntermediate & intermediate)1118 void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
1119 {
1120 if (stage == EShLangCompute) {
1121 // Remember thread dimensions
1122 for (int dim=0; dim<3; ++dim)
1123 localSize[dim] = intermediate.getLocalSize(dim);
1124 }
1125 }
1126
1127 // build counter block index associations for buffers
buildCounterIndices(const TIntermediate & intermediate)1128 void TReflection::buildCounterIndices(const TIntermediate& intermediate)
1129 {
1130 #ifdef ENABLE_HLSL
1131 // search for ones that have counters
1132 for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
1133 const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
1134 const int index = getIndex(counterName);
1135
1136 if (index >= 0)
1137 indexToUniformBlock[i].counterIndex = index;
1138 }
1139 #else
1140 (void)intermediate;
1141 #endif
1142 }
1143
1144 // build Shader Stages mask for all uniforms
buildUniformStageMask(const TIntermediate & intermediate)1145 void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
1146 {
1147 if (options & EShReflectionAllBlockVariables)
1148 return;
1149
1150 for (int i = 0; i < int(indexToUniform.size()); ++i) {
1151 indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
1152 }
1153
1154 for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
1155 indexToBufferVariable[i].stages =
1156 static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
1157 }
1158 }
1159
1160 // Merge live symbols from 'intermediate' into the existing reflection database.
1161 //
1162 // Returns false if the input is too malformed to do this.
addStage(EShLanguage stage,const TIntermediate & intermediate)1163 bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
1164 {
1165 if (intermediate.getTreeRoot() == nullptr ||
1166 intermediate.getNumEntryPoints() != 1 ||
1167 intermediate.isRecursive())
1168 return false;
1169
1170 buildAttributeReflection(stage, intermediate);
1171
1172 TReflectionTraverser it(intermediate, *this);
1173
1174 for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) {
1175 if (sequnence->getAsAggregate() != nullptr) {
1176 if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) {
1177 it.updateStageMasks = false;
1178 TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
1179 for (auto& sequnence : linkerObjects->getSequence()) {
1180 auto pNode = sequnence->getAsSymbolNode();
1181 if (pNode != nullptr) {
1182 if ((pNode->getQualifier().storage == EvqUniform &&
1183 (options & EShReflectionSharedStd140UBO)) ||
1184 (pNode->getQualifier().storage == EvqBuffer &&
1185 (options & EShReflectionSharedStd140SSBO))) {
1186 // collect std140 and shared uniform block form AST
1187 if ((pNode->getBasicType() == EbtBlock) &&
1188 ((pNode->getQualifier().layoutPacking == ElpStd140) ||
1189 (pNode->getQualifier().layoutPacking == ElpShared))) {
1190 pNode->traverse(&it);
1191 }
1192 }
1193 else if ((options & EShReflectionAllIOVariables) &&
1194 (pNode->getQualifier().isPipeInput() || pNode->getQualifier().isPipeOutput()))
1195 {
1196 pNode->traverse(&it);
1197 }
1198 }
1199 }
1200 } else {
1201 // This traverser will travers all function in AST.
1202 // If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled.
1203 // When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function.
1204 // This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function.
1205 //
1206 // If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false.
1207 // When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST.
1208 // So, travers all function node can equivalent to travers live function.
1209 it.updateStageMasks = true;
1210 sequnence->getAsAggregate()->traverse(&it);
1211 }
1212 }
1213 }
1214 it.updateStageMasks = true;
1215
1216 buildCounterIndices(intermediate);
1217 buildUniformStageMask(intermediate);
1218
1219 return true;
1220 }
1221
dump()1222 void TReflection::dump()
1223 {
1224 printf("Uniform reflection:\n");
1225 for (size_t i = 0; i < indexToUniform.size(); ++i)
1226 indexToUniform[i].dump();
1227 printf("\n");
1228
1229 printf("Uniform block reflection:\n");
1230 for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
1231 indexToUniformBlock[i].dump();
1232 printf("\n");
1233
1234 printf("Buffer variable reflection:\n");
1235 for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
1236 indexToBufferVariable[i].dump();
1237 printf("\n");
1238
1239 printf("Buffer block reflection:\n");
1240 for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
1241 indexToBufferBlock[i].dump();
1242 printf("\n");
1243
1244 printf("Pipeline input reflection:\n");
1245 for (size_t i = 0; i < indexToPipeInput.size(); ++i)
1246 indexToPipeInput[i].dump();
1247 printf("\n");
1248
1249 printf("Pipeline output reflection:\n");
1250 for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
1251 indexToPipeOutput[i].dump();
1252 printf("\n");
1253
1254 if (getLocalSize(0) > 1) {
1255 static const char* axis[] = { "X", "Y", "Z" };
1256
1257 for (int dim=0; dim<3; ++dim)
1258 if (getLocalSize(dim) > 1)
1259 printf("Local size %s: %u\n", axis[dim], getLocalSize(dim));
1260
1261 printf("\n");
1262 }
1263
1264 // printf("Live names\n");
1265 // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
1266 // printf("%s: %d\n", it->first.c_str(), it->second);
1267 // printf("\n");
1268 }
1269
1270 } // end namespace glslang
1271