1 //
2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2016 LunarG, Inc.
4 // Copyright (C) 2017 ARM Limited.
5 // Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 //
7 // All rights reserved.
8 //
9 // Redistribution and use in source and binary forms, with or without
10 // modification, are permitted provided that the following conditions
11 // are met:
12 //
13 // Redistributions of source code must retain the above copyright
14 // notice, this list of conditions and the following disclaimer.
15 //
16 // Redistributions in binary form must reproduce the above
17 // copyright notice, this list of conditions and the following
18 // disclaimer in the documentation and/or other materials provided
19 // with the distribution.
20 //
21 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
22 // contributors may be used to endorse or promote products derived
23 // from this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 // POSSIBILITY OF SUCH DAMAGE.
37 //
38
39 //
40 // Definition of the in-memory high-level intermediate representation
41 // of shaders. This is a tree that parser creates.
42 //
43 // Nodes in the tree are defined as a hierarchy of classes derived from
44 // TIntermNode. Each is a node in a tree. There is no preset branching factor;
45 // each node can have it's own type of list of children.
46 //
47
48 #ifndef __INTERMEDIATE_H
49 #define __INTERMEDIATE_H
50
51 #if defined(_MSC_VER) && _MSC_VER >= 1900
52 #pragma warning(disable : 4464) // relative include path contains '..'
53 #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted
54 #endif
55
56 #include "../Include/Common.h"
57 #include "../Include/Types.h"
58 #include "../Include/ConstantUnion.h"
59
60 namespace glslang {
61
62 class TIntermediate;
63
64 //
65 // Operators used by the high-level (parse tree) representation.
66 //
67 enum TOperator {
68 EOpNull, // if in a node, should only mean a node is still being built
69 EOpSequence, // denotes a list of statements, or parameters, etc.
70 EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
71 EOpFunctionCall,
72 EOpFunction, // For function definition
73 EOpParameters, // an aggregate listing the parameters to a function
74
75 //
76 // Unary operators
77 //
78
79 EOpNegative,
80 EOpLogicalNot,
81 EOpVectorLogicalNot,
82 EOpBitwiseNot,
83
84 EOpPostIncrement,
85 EOpPostDecrement,
86 EOpPreIncrement,
87 EOpPreDecrement,
88
89 EOpCopyObject,
90
91 // (u)int* -> bool
92 EOpConvInt8ToBool,
93 EOpConvUint8ToBool,
94 EOpConvInt16ToBool,
95 EOpConvUint16ToBool,
96 EOpConvIntToBool,
97 EOpConvUintToBool,
98 EOpConvInt64ToBool,
99 EOpConvUint64ToBool,
100
101 // float* -> bool
102 EOpConvFloat16ToBool,
103 EOpConvFloatToBool,
104 EOpConvDoubleToBool,
105
106 // bool -> (u)int*
107 EOpConvBoolToInt8,
108 EOpConvBoolToUint8,
109 EOpConvBoolToInt16,
110 EOpConvBoolToUint16,
111 EOpConvBoolToInt,
112 EOpConvBoolToUint,
113 EOpConvBoolToInt64,
114 EOpConvBoolToUint64,
115
116 // bool -> float*
117 EOpConvBoolToFloat16,
118 EOpConvBoolToFloat,
119 EOpConvBoolToDouble,
120
121 // int8_t -> (u)int*
122 EOpConvInt8ToInt16,
123 EOpConvInt8ToInt,
124 EOpConvInt8ToInt64,
125 EOpConvInt8ToUint8,
126 EOpConvInt8ToUint16,
127 EOpConvInt8ToUint,
128 EOpConvInt8ToUint64,
129
130 // uint8_t -> (u)int*
131 EOpConvUint8ToInt8,
132 EOpConvUint8ToInt16,
133 EOpConvUint8ToInt,
134 EOpConvUint8ToInt64,
135 EOpConvUint8ToUint16,
136 EOpConvUint8ToUint,
137 EOpConvUint8ToUint64,
138
139 // int8_t -> float*
140 EOpConvInt8ToFloat16,
141 EOpConvInt8ToFloat,
142 EOpConvInt8ToDouble,
143
144 // uint8_t -> float*
145 EOpConvUint8ToFloat16,
146 EOpConvUint8ToFloat,
147 EOpConvUint8ToDouble,
148
149 // int16_t -> (u)int*
150 EOpConvInt16ToInt8,
151 EOpConvInt16ToInt,
152 EOpConvInt16ToInt64,
153 EOpConvInt16ToUint8,
154 EOpConvInt16ToUint16,
155 EOpConvInt16ToUint,
156 EOpConvInt16ToUint64,
157
158 // uint16_t -> (u)int*
159 EOpConvUint16ToInt8,
160 EOpConvUint16ToInt16,
161 EOpConvUint16ToInt,
162 EOpConvUint16ToInt64,
163 EOpConvUint16ToUint8,
164 EOpConvUint16ToUint,
165 EOpConvUint16ToUint64,
166
167 // int16_t -> float*
168 EOpConvInt16ToFloat16,
169 EOpConvInt16ToFloat,
170 EOpConvInt16ToDouble,
171
172 // uint16_t -> float*
173 EOpConvUint16ToFloat16,
174 EOpConvUint16ToFloat,
175 EOpConvUint16ToDouble,
176
177 // int32_t -> (u)int*
178 EOpConvIntToInt8,
179 EOpConvIntToInt16,
180 EOpConvIntToInt64,
181 EOpConvIntToUint8,
182 EOpConvIntToUint16,
183 EOpConvIntToUint,
184 EOpConvIntToUint64,
185
186 // uint32_t -> (u)int*
187 EOpConvUintToInt8,
188 EOpConvUintToInt16,
189 EOpConvUintToInt,
190 EOpConvUintToInt64,
191 EOpConvUintToUint8,
192 EOpConvUintToUint16,
193 EOpConvUintToUint64,
194
195 // int32_t -> float*
196 EOpConvIntToFloat16,
197 EOpConvIntToFloat,
198 EOpConvIntToDouble,
199
200 // uint32_t -> float*
201 EOpConvUintToFloat16,
202 EOpConvUintToFloat,
203 EOpConvUintToDouble,
204
205 // int64_t -> (u)int*
206 EOpConvInt64ToInt8,
207 EOpConvInt64ToInt16,
208 EOpConvInt64ToInt,
209 EOpConvInt64ToUint8,
210 EOpConvInt64ToUint16,
211 EOpConvInt64ToUint,
212 EOpConvInt64ToUint64,
213
214 // uint64_t -> (u)int*
215 EOpConvUint64ToInt8,
216 EOpConvUint64ToInt16,
217 EOpConvUint64ToInt,
218 EOpConvUint64ToInt64,
219 EOpConvUint64ToUint8,
220 EOpConvUint64ToUint16,
221 EOpConvUint64ToUint,
222
223 // int64_t -> float*
224 EOpConvInt64ToFloat16,
225 EOpConvInt64ToFloat,
226 EOpConvInt64ToDouble,
227
228 // uint64_t -> float*
229 EOpConvUint64ToFloat16,
230 EOpConvUint64ToFloat,
231 EOpConvUint64ToDouble,
232
233 // float16_t -> (u)int*
234 EOpConvFloat16ToInt8,
235 EOpConvFloat16ToInt16,
236 EOpConvFloat16ToInt,
237 EOpConvFloat16ToInt64,
238 EOpConvFloat16ToUint8,
239 EOpConvFloat16ToUint16,
240 EOpConvFloat16ToUint,
241 EOpConvFloat16ToUint64,
242
243 // float16_t -> float*
244 EOpConvFloat16ToFloat,
245 EOpConvFloat16ToDouble,
246
247 // float -> (u)int*
248 EOpConvFloatToInt8,
249 EOpConvFloatToInt16,
250 EOpConvFloatToInt,
251 EOpConvFloatToInt64,
252 EOpConvFloatToUint8,
253 EOpConvFloatToUint16,
254 EOpConvFloatToUint,
255 EOpConvFloatToUint64,
256
257 // float -> float*
258 EOpConvFloatToFloat16,
259 EOpConvFloatToDouble,
260
261 // float64 _t-> (u)int*
262 EOpConvDoubleToInt8,
263 EOpConvDoubleToInt16,
264 EOpConvDoubleToInt,
265 EOpConvDoubleToInt64,
266 EOpConvDoubleToUint8,
267 EOpConvDoubleToUint16,
268 EOpConvDoubleToUint,
269 EOpConvDoubleToUint64,
270
271 // float64_t -> float*
272 EOpConvDoubleToFloat16,
273 EOpConvDoubleToFloat,
274
275 // uint64_t <-> pointer
276 EOpConvUint64ToPtr,
277 EOpConvPtrToUint64,
278
279 // uvec2 <-> pointer
280 EOpConvUvec2ToPtr,
281 EOpConvPtrToUvec2,
282
283 //
284 // binary operations
285 //
286
287 EOpAdd,
288 EOpSub,
289 EOpMul,
290 EOpDiv,
291 EOpMod,
292 EOpRightShift,
293 EOpLeftShift,
294 EOpAnd,
295 EOpInclusiveOr,
296 EOpExclusiveOr,
297 EOpEqual,
298 EOpNotEqual,
299 EOpVectorEqual,
300 EOpVectorNotEqual,
301 EOpLessThan,
302 EOpGreaterThan,
303 EOpLessThanEqual,
304 EOpGreaterThanEqual,
305 EOpComma,
306
307 EOpVectorTimesScalar,
308 EOpVectorTimesMatrix,
309 EOpMatrixTimesVector,
310 EOpMatrixTimesScalar,
311
312 EOpLogicalOr,
313 EOpLogicalXor,
314 EOpLogicalAnd,
315
316 EOpIndexDirect,
317 EOpIndexIndirect,
318 EOpIndexDirectStruct,
319
320 EOpVectorSwizzle,
321
322 EOpMethod,
323 EOpScoping,
324
325 //
326 // Built-in functions mapped to operators
327 //
328
329 EOpRadians,
330 EOpDegrees,
331 EOpSin,
332 EOpCos,
333 EOpTan,
334 EOpAsin,
335 EOpAcos,
336 EOpAtan,
337 EOpSinh,
338 EOpCosh,
339 EOpTanh,
340 EOpAsinh,
341 EOpAcosh,
342 EOpAtanh,
343
344 EOpPow,
345 EOpExp,
346 EOpLog,
347 EOpExp2,
348 EOpLog2,
349 EOpSqrt,
350 EOpInverseSqrt,
351
352 EOpAbs,
353 EOpSign,
354 EOpFloor,
355 EOpTrunc,
356 EOpRound,
357 EOpRoundEven,
358 EOpCeil,
359 EOpFract,
360 EOpModf,
361 EOpMin,
362 EOpMax,
363 EOpClamp,
364 EOpMix,
365 EOpStep,
366 EOpSmoothStep,
367
368 EOpIsNan,
369 EOpIsInf,
370
371 EOpFma,
372
373 EOpFrexp,
374 EOpLdexp,
375
376 EOpFloatBitsToInt,
377 EOpFloatBitsToUint,
378 EOpIntBitsToFloat,
379 EOpUintBitsToFloat,
380 EOpDoubleBitsToInt64,
381 EOpDoubleBitsToUint64,
382 EOpInt64BitsToDouble,
383 EOpUint64BitsToDouble,
384 EOpFloat16BitsToInt16,
385 EOpFloat16BitsToUint16,
386 EOpInt16BitsToFloat16,
387 EOpUint16BitsToFloat16,
388 EOpPackSnorm2x16,
389 EOpUnpackSnorm2x16,
390 EOpPackUnorm2x16,
391 EOpUnpackUnorm2x16,
392 EOpPackSnorm4x8,
393 EOpUnpackSnorm4x8,
394 EOpPackUnorm4x8,
395 EOpUnpackUnorm4x8,
396 EOpPackHalf2x16,
397 EOpUnpackHalf2x16,
398 EOpPackDouble2x32,
399 EOpUnpackDouble2x32,
400 EOpPackInt2x32,
401 EOpUnpackInt2x32,
402 EOpPackUint2x32,
403 EOpUnpackUint2x32,
404 EOpPackFloat2x16,
405 EOpUnpackFloat2x16,
406 EOpPackInt2x16,
407 EOpUnpackInt2x16,
408 EOpPackUint2x16,
409 EOpUnpackUint2x16,
410 EOpPackInt4x16,
411 EOpUnpackInt4x16,
412 EOpPackUint4x16,
413 EOpUnpackUint4x16,
414 EOpPack16,
415 EOpPack32,
416 EOpPack64,
417 EOpUnpack32,
418 EOpUnpack16,
419 EOpUnpack8,
420
421 EOpLength,
422 EOpDistance,
423 EOpDot,
424 EOpCross,
425 EOpNormalize,
426 EOpFaceForward,
427 EOpReflect,
428 EOpRefract,
429
430 EOpMin3,
431 EOpMax3,
432 EOpMid3,
433
434 EOpDPdx, // Fragment only
435 EOpDPdy, // Fragment only
436 EOpFwidth, // Fragment only
437 EOpDPdxFine, // Fragment only
438 EOpDPdyFine, // Fragment only
439 EOpFwidthFine, // Fragment only
440 EOpDPdxCoarse, // Fragment only
441 EOpDPdyCoarse, // Fragment only
442 EOpFwidthCoarse, // Fragment only
443
444 EOpInterpolateAtCentroid, // Fragment only
445 EOpInterpolateAtSample, // Fragment only
446 EOpInterpolateAtOffset, // Fragment only
447 EOpInterpolateAtVertex,
448
449 EOpMatrixTimesMatrix,
450 EOpOuterProduct,
451 EOpDeterminant,
452 EOpMatrixInverse,
453 EOpTranspose,
454
455 EOpFtransform,
456
457 EOpNoise,
458
459 EOpEmitVertex, // geometry only
460 EOpEndPrimitive, // geometry only
461 EOpEmitStreamVertex, // geometry only
462 EOpEndStreamPrimitive, // geometry only
463
464 EOpBarrier,
465 EOpMemoryBarrier,
466 EOpMemoryBarrierAtomicCounter,
467 EOpMemoryBarrierBuffer,
468 EOpMemoryBarrierImage,
469 EOpMemoryBarrierShared, // compute only
470 EOpGroupMemoryBarrier, // compute only
471
472 EOpBallot,
473 EOpReadInvocation,
474 EOpReadFirstInvocation,
475
476 EOpAnyInvocation,
477 EOpAllInvocations,
478 EOpAllInvocationsEqual,
479
480 EOpSubgroupGuardStart,
481 EOpSubgroupBarrier,
482 EOpSubgroupMemoryBarrier,
483 EOpSubgroupMemoryBarrierBuffer,
484 EOpSubgroupMemoryBarrierImage,
485 EOpSubgroupMemoryBarrierShared, // compute only
486 EOpSubgroupElect,
487 EOpSubgroupAll,
488 EOpSubgroupAny,
489 EOpSubgroupAllEqual,
490 EOpSubgroupBroadcast,
491 EOpSubgroupBroadcastFirst,
492 EOpSubgroupBallot,
493 EOpSubgroupInverseBallot,
494 EOpSubgroupBallotBitExtract,
495 EOpSubgroupBallotBitCount,
496 EOpSubgroupBallotInclusiveBitCount,
497 EOpSubgroupBallotExclusiveBitCount,
498 EOpSubgroupBallotFindLSB,
499 EOpSubgroupBallotFindMSB,
500 EOpSubgroupShuffle,
501 EOpSubgroupShuffleXor,
502 EOpSubgroupShuffleUp,
503 EOpSubgroupShuffleDown,
504 EOpSubgroupAdd,
505 EOpSubgroupMul,
506 EOpSubgroupMin,
507 EOpSubgroupMax,
508 EOpSubgroupAnd,
509 EOpSubgroupOr,
510 EOpSubgroupXor,
511 EOpSubgroupInclusiveAdd,
512 EOpSubgroupInclusiveMul,
513 EOpSubgroupInclusiveMin,
514 EOpSubgroupInclusiveMax,
515 EOpSubgroupInclusiveAnd,
516 EOpSubgroupInclusiveOr,
517 EOpSubgroupInclusiveXor,
518 EOpSubgroupExclusiveAdd,
519 EOpSubgroupExclusiveMul,
520 EOpSubgroupExclusiveMin,
521 EOpSubgroupExclusiveMax,
522 EOpSubgroupExclusiveAnd,
523 EOpSubgroupExclusiveOr,
524 EOpSubgroupExclusiveXor,
525 EOpSubgroupClusteredAdd,
526 EOpSubgroupClusteredMul,
527 EOpSubgroupClusteredMin,
528 EOpSubgroupClusteredMax,
529 EOpSubgroupClusteredAnd,
530 EOpSubgroupClusteredOr,
531 EOpSubgroupClusteredXor,
532 EOpSubgroupQuadBroadcast,
533 EOpSubgroupQuadSwapHorizontal,
534 EOpSubgroupQuadSwapVertical,
535 EOpSubgroupQuadSwapDiagonal,
536
537 EOpSubgroupPartition,
538 EOpSubgroupPartitionedAdd,
539 EOpSubgroupPartitionedMul,
540 EOpSubgroupPartitionedMin,
541 EOpSubgroupPartitionedMax,
542 EOpSubgroupPartitionedAnd,
543 EOpSubgroupPartitionedOr,
544 EOpSubgroupPartitionedXor,
545 EOpSubgroupPartitionedInclusiveAdd,
546 EOpSubgroupPartitionedInclusiveMul,
547 EOpSubgroupPartitionedInclusiveMin,
548 EOpSubgroupPartitionedInclusiveMax,
549 EOpSubgroupPartitionedInclusiveAnd,
550 EOpSubgroupPartitionedInclusiveOr,
551 EOpSubgroupPartitionedInclusiveXor,
552 EOpSubgroupPartitionedExclusiveAdd,
553 EOpSubgroupPartitionedExclusiveMul,
554 EOpSubgroupPartitionedExclusiveMin,
555 EOpSubgroupPartitionedExclusiveMax,
556 EOpSubgroupPartitionedExclusiveAnd,
557 EOpSubgroupPartitionedExclusiveOr,
558 EOpSubgroupPartitionedExclusiveXor,
559
560 EOpSubgroupGuardStop,
561
562 EOpMinInvocations,
563 EOpMaxInvocations,
564 EOpAddInvocations,
565 EOpMinInvocationsNonUniform,
566 EOpMaxInvocationsNonUniform,
567 EOpAddInvocationsNonUniform,
568 EOpMinInvocationsInclusiveScan,
569 EOpMaxInvocationsInclusiveScan,
570 EOpAddInvocationsInclusiveScan,
571 EOpMinInvocationsInclusiveScanNonUniform,
572 EOpMaxInvocationsInclusiveScanNonUniform,
573 EOpAddInvocationsInclusiveScanNonUniform,
574 EOpMinInvocationsExclusiveScan,
575 EOpMaxInvocationsExclusiveScan,
576 EOpAddInvocationsExclusiveScan,
577 EOpMinInvocationsExclusiveScanNonUniform,
578 EOpMaxInvocationsExclusiveScanNonUniform,
579 EOpAddInvocationsExclusiveScanNonUniform,
580 EOpSwizzleInvocations,
581 EOpSwizzleInvocationsMasked,
582 EOpWriteInvocation,
583 EOpMbcnt,
584
585 EOpCubeFaceIndex,
586 EOpCubeFaceCoord,
587 EOpTime,
588
589 EOpAtomicAdd,
590 EOpAtomicMin,
591 EOpAtomicMax,
592 EOpAtomicAnd,
593 EOpAtomicOr,
594 EOpAtomicXor,
595 EOpAtomicExchange,
596 EOpAtomicCompSwap,
597 EOpAtomicLoad,
598 EOpAtomicStore,
599
600 EOpAtomicCounterIncrement, // results in pre-increment value
601 EOpAtomicCounterDecrement, // results in post-decrement value
602 EOpAtomicCounter,
603 EOpAtomicCounterAdd,
604 EOpAtomicCounterSubtract,
605 EOpAtomicCounterMin,
606 EOpAtomicCounterMax,
607 EOpAtomicCounterAnd,
608 EOpAtomicCounterOr,
609 EOpAtomicCounterXor,
610 EOpAtomicCounterExchange,
611 EOpAtomicCounterCompSwap,
612
613 EOpAny,
614 EOpAll,
615
616 EOpCooperativeMatrixLoad,
617 EOpCooperativeMatrixStore,
618 EOpCooperativeMatrixMulAdd,
619
620 EOpBeginInvocationInterlock, // Fragment only
621 EOpEndInvocationInterlock, // Fragment only
622
623 EOpIsHelperInvocation,
624
625 EOpDebugPrintf,
626
627 //
628 // Branch
629 //
630
631 EOpKill, // Fragment only
632 EOpReturn,
633 EOpBreak,
634 EOpContinue,
635 EOpCase,
636 EOpDefault,
637 EOpDemote, // Fragment only
638
639 //
640 // Constructors
641 //
642
643 EOpConstructGuardStart,
644 EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
645 EOpConstructUint,
646 EOpConstructInt8,
647 EOpConstructUint8,
648 EOpConstructInt16,
649 EOpConstructUint16,
650 EOpConstructInt64,
651 EOpConstructUint64,
652 EOpConstructBool,
653 EOpConstructFloat,
654 EOpConstructDouble,
655 // Keep vector and matrix constructors in a consistent relative order for
656 // TParseContext::constructBuiltIn, which converts between 8/16/32 bit
657 // vector constructors
658 EOpConstructVec2,
659 EOpConstructVec3,
660 EOpConstructVec4,
661 EOpConstructMat2x2,
662 EOpConstructMat2x3,
663 EOpConstructMat2x4,
664 EOpConstructMat3x2,
665 EOpConstructMat3x3,
666 EOpConstructMat3x4,
667 EOpConstructMat4x2,
668 EOpConstructMat4x3,
669 EOpConstructMat4x4,
670 EOpConstructDVec2,
671 EOpConstructDVec3,
672 EOpConstructDVec4,
673 EOpConstructBVec2,
674 EOpConstructBVec3,
675 EOpConstructBVec4,
676 EOpConstructI8Vec2,
677 EOpConstructI8Vec3,
678 EOpConstructI8Vec4,
679 EOpConstructU8Vec2,
680 EOpConstructU8Vec3,
681 EOpConstructU8Vec4,
682 EOpConstructI16Vec2,
683 EOpConstructI16Vec3,
684 EOpConstructI16Vec4,
685 EOpConstructU16Vec2,
686 EOpConstructU16Vec3,
687 EOpConstructU16Vec4,
688 EOpConstructIVec2,
689 EOpConstructIVec3,
690 EOpConstructIVec4,
691 EOpConstructUVec2,
692 EOpConstructUVec3,
693 EOpConstructUVec4,
694 EOpConstructI64Vec2,
695 EOpConstructI64Vec3,
696 EOpConstructI64Vec4,
697 EOpConstructU64Vec2,
698 EOpConstructU64Vec3,
699 EOpConstructU64Vec4,
700 EOpConstructDMat2x2,
701 EOpConstructDMat2x3,
702 EOpConstructDMat2x4,
703 EOpConstructDMat3x2,
704 EOpConstructDMat3x3,
705 EOpConstructDMat3x4,
706 EOpConstructDMat4x2,
707 EOpConstructDMat4x3,
708 EOpConstructDMat4x4,
709 EOpConstructIMat2x2,
710 EOpConstructIMat2x3,
711 EOpConstructIMat2x4,
712 EOpConstructIMat3x2,
713 EOpConstructIMat3x3,
714 EOpConstructIMat3x4,
715 EOpConstructIMat4x2,
716 EOpConstructIMat4x3,
717 EOpConstructIMat4x4,
718 EOpConstructUMat2x2,
719 EOpConstructUMat2x3,
720 EOpConstructUMat2x4,
721 EOpConstructUMat3x2,
722 EOpConstructUMat3x3,
723 EOpConstructUMat3x4,
724 EOpConstructUMat4x2,
725 EOpConstructUMat4x3,
726 EOpConstructUMat4x4,
727 EOpConstructBMat2x2,
728 EOpConstructBMat2x3,
729 EOpConstructBMat2x4,
730 EOpConstructBMat3x2,
731 EOpConstructBMat3x3,
732 EOpConstructBMat3x4,
733 EOpConstructBMat4x2,
734 EOpConstructBMat4x3,
735 EOpConstructBMat4x4,
736 EOpConstructFloat16,
737 EOpConstructF16Vec2,
738 EOpConstructF16Vec3,
739 EOpConstructF16Vec4,
740 EOpConstructF16Mat2x2,
741 EOpConstructF16Mat2x3,
742 EOpConstructF16Mat2x4,
743 EOpConstructF16Mat3x2,
744 EOpConstructF16Mat3x3,
745 EOpConstructF16Mat3x4,
746 EOpConstructF16Mat4x2,
747 EOpConstructF16Mat4x3,
748 EOpConstructF16Mat4x4,
749 EOpConstructStruct,
750 EOpConstructTextureSampler,
751 EOpConstructNonuniform, // expected to be transformed away, not present in final AST
752 EOpConstructReference,
753 EOpConstructCooperativeMatrix,
754 EOpConstructGuardEnd,
755
756 //
757 // moves
758 //
759
760 EOpAssign,
761 EOpAddAssign,
762 EOpSubAssign,
763 EOpMulAssign,
764 EOpVectorTimesMatrixAssign,
765 EOpVectorTimesScalarAssign,
766 EOpMatrixTimesScalarAssign,
767 EOpMatrixTimesMatrixAssign,
768 EOpDivAssign,
769 EOpModAssign,
770 EOpAndAssign,
771 EOpInclusiveOrAssign,
772 EOpExclusiveOrAssign,
773 EOpLeftShiftAssign,
774 EOpRightShiftAssign,
775
776 //
777 // Array operators
778 //
779
780 // Can apply to arrays, vectors, or matrices.
781 // Can be decomposed to a constant at compile time, but this does not always happen,
782 // due to link-time effects. So, consumer can expect either a link-time sized or
783 // run-time sized array.
784 EOpArrayLength,
785
786 //
787 // Image operations
788 //
789
790 EOpImageGuardBegin,
791
792 EOpImageQuerySize,
793 EOpImageQuerySamples,
794 EOpImageLoad,
795 EOpImageStore,
796 EOpImageLoadLod,
797 EOpImageStoreLod,
798 EOpImageAtomicAdd,
799 EOpImageAtomicMin,
800 EOpImageAtomicMax,
801 EOpImageAtomicAnd,
802 EOpImageAtomicOr,
803 EOpImageAtomicXor,
804 EOpImageAtomicExchange,
805 EOpImageAtomicCompSwap,
806 EOpImageAtomicLoad,
807 EOpImageAtomicStore,
808
809 EOpSubpassLoad,
810 EOpSubpassLoadMS,
811 EOpSparseImageLoad,
812 EOpSparseImageLoadLod,
813
814 EOpImageGuardEnd,
815
816 //
817 // Texture operations
818 //
819
820 EOpTextureGuardBegin,
821
822 EOpTextureQuerySize,
823 EOpTextureQueryLod,
824 EOpTextureQueryLevels,
825 EOpTextureQuerySamples,
826
827 EOpSamplingGuardBegin,
828
829 EOpTexture,
830 EOpTextureProj,
831 EOpTextureLod,
832 EOpTextureOffset,
833 EOpTextureFetch,
834 EOpTextureFetchOffset,
835 EOpTextureProjOffset,
836 EOpTextureLodOffset,
837 EOpTextureProjLod,
838 EOpTextureProjLodOffset,
839 EOpTextureGrad,
840 EOpTextureGradOffset,
841 EOpTextureProjGrad,
842 EOpTextureProjGradOffset,
843 EOpTextureGather,
844 EOpTextureGatherOffset,
845 EOpTextureGatherOffsets,
846 EOpTextureClamp,
847 EOpTextureOffsetClamp,
848 EOpTextureGradClamp,
849 EOpTextureGradOffsetClamp,
850 EOpTextureGatherLod,
851 EOpTextureGatherLodOffset,
852 EOpTextureGatherLodOffsets,
853 EOpFragmentMaskFetch,
854 EOpFragmentFetch,
855
856 EOpSparseTextureGuardBegin,
857
858 EOpSparseTexture,
859 EOpSparseTextureLod,
860 EOpSparseTextureOffset,
861 EOpSparseTextureFetch,
862 EOpSparseTextureFetchOffset,
863 EOpSparseTextureLodOffset,
864 EOpSparseTextureGrad,
865 EOpSparseTextureGradOffset,
866 EOpSparseTextureGather,
867 EOpSparseTextureGatherOffset,
868 EOpSparseTextureGatherOffsets,
869 EOpSparseTexelsResident,
870 EOpSparseTextureClamp,
871 EOpSparseTextureOffsetClamp,
872 EOpSparseTextureGradClamp,
873 EOpSparseTextureGradOffsetClamp,
874 EOpSparseTextureGatherLod,
875 EOpSparseTextureGatherLodOffset,
876 EOpSparseTextureGatherLodOffsets,
877
878 EOpSparseTextureGuardEnd,
879
880 EOpImageFootprintGuardBegin,
881 EOpImageSampleFootprintNV,
882 EOpImageSampleFootprintClampNV,
883 EOpImageSampleFootprintLodNV,
884 EOpImageSampleFootprintGradNV,
885 EOpImageSampleFootprintGradClampNV,
886 EOpImageFootprintGuardEnd,
887 EOpSamplingGuardEnd,
888 EOpTextureGuardEnd,
889
890 //
891 // Integer operations
892 //
893
894 EOpAddCarry,
895 EOpSubBorrow,
896 EOpUMulExtended,
897 EOpIMulExtended,
898 EOpBitfieldExtract,
899 EOpBitfieldInsert,
900 EOpBitFieldReverse,
901 EOpBitCount,
902 EOpFindLSB,
903 EOpFindMSB,
904
905 EOpCountLeadingZeros,
906 EOpCountTrailingZeros,
907 EOpAbsDifference,
908 EOpAddSaturate,
909 EOpSubSaturate,
910 EOpAverage,
911 EOpAverageRounded,
912 EOpMul32x16,
913
914 EOpTrace,
915 EOpReportIntersection,
916 EOpIgnoreIntersection,
917 EOpTerminateRay,
918 EOpExecuteCallable,
919 EOpWritePackedPrimitiveIndices4x8NV,
920
921 //
922 // GL_EXT_ray_query operations
923 //
924
925 EOpRayQueryInitialize,
926 EOpRayQueryTerminate,
927 EOpRayQueryGenerateIntersection,
928 EOpRayQueryConfirmIntersection,
929 EOpRayQueryProceed,
930 EOpRayQueryGetIntersectionType,
931 EOpRayQueryGetRayTMin,
932 EOpRayQueryGetRayFlags,
933 EOpRayQueryGetIntersectionT,
934 EOpRayQueryGetIntersectionInstanceCustomIndex,
935 EOpRayQueryGetIntersectionInstanceId,
936 EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset,
937 EOpRayQueryGetIntersectionGeometryIndex,
938 EOpRayQueryGetIntersectionPrimitiveIndex,
939 EOpRayQueryGetIntersectionBarycentrics,
940 EOpRayQueryGetIntersectionFrontFace,
941 EOpRayQueryGetIntersectionCandidateAABBOpaque,
942 EOpRayQueryGetIntersectionObjectRayDirection,
943 EOpRayQueryGetIntersectionObjectRayOrigin,
944 EOpRayQueryGetWorldRayDirection,
945 EOpRayQueryGetWorldRayOrigin,
946 EOpRayQueryGetIntersectionObjectToWorld,
947 EOpRayQueryGetIntersectionWorldToObject,
948
949 //
950 // HLSL operations
951 //
952
953 EOpClip, // discard if input value < 0
954 EOpIsFinite,
955 EOpLog10, // base 10 log
956 EOpRcp, // 1/x
957 EOpSaturate, // clamp from 0 to 1
958 EOpSinCos, // sin and cos in out parameters
959 EOpGenMul, // mul(x,y) on any of mat/vec/scalars
960 EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
961 EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
962 EOpInterlockedAnd, // ...
963 EOpInterlockedCompareExchange, // ...
964 EOpInterlockedCompareStore, // ...
965 EOpInterlockedExchange, // ...
966 EOpInterlockedMax, // ...
967 EOpInterlockedMin, // ...
968 EOpInterlockedOr, // ...
969 EOpInterlockedXor, // ...
970 EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
971 EOpDeviceMemoryBarrier, // ...
972 EOpDeviceMemoryBarrierWithGroupSync, // ...
973 EOpWorkgroupMemoryBarrier, // ...
974 EOpWorkgroupMemoryBarrierWithGroupSync, // ...
975 EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
976 EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
977 EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
978 EOpLit, // HLSL lighting coefficient vector
979 EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
980 EOpAsDouble, // slightly different from EOpUint64BitsToDouble
981 EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
982
983 EOpMethodSample, // Texture object methods. These are translated to existing
984 EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
985 EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
986 EOpMethodSampleCmpLevelZero, // ...
987 EOpMethodSampleGrad, // ...
988 EOpMethodSampleLevel, // ...
989 EOpMethodLoad, // ...
990 EOpMethodGetDimensions, // ...
991 EOpMethodGetSamplePosition, // ...
992 EOpMethodGather, // ...
993 EOpMethodCalculateLevelOfDetail, // ...
994 EOpMethodCalculateLevelOfDetailUnclamped, // ...
995
996 // Load already defined above for textures
997 EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
998 EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
999 EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
1000 EOpMethodStore, // ...
1001 EOpMethodStore2, // ...
1002 EOpMethodStore3, // ...
1003 EOpMethodStore4, // ...
1004 EOpMethodIncrementCounter, // ...
1005 EOpMethodDecrementCounter, // ...
1006 // EOpMethodAppend is defined for geo shaders below
1007 EOpMethodConsume,
1008
1009 // SM5 texture methods
1010 EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
1011 EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
1012 EOpMethodGatherBlue, // because HLSL arguments are slightly different.
1013 EOpMethodGatherAlpha, // ...
1014 EOpMethodGatherCmp, // ...
1015 EOpMethodGatherCmpRed, // ...
1016 EOpMethodGatherCmpGreen, // ...
1017 EOpMethodGatherCmpBlue, // ...
1018 EOpMethodGatherCmpAlpha, // ...
1019
1020 // geometry methods
1021 EOpMethodAppend, // Geometry shader methods
1022 EOpMethodRestartStrip, // ...
1023
1024 // matrix
1025 EOpMatrixSwizzle, // select multiple matrix components (non-column)
1026
1027 // SM6 wave ops
1028 EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
1029 EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
1030 EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
1031 EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
1032
1033 // Shader Clock Ops
1034 EOpReadClockSubgroupKHR,
1035 EOpReadClockDeviceKHR,
1036 };
1037
1038 class TIntermTraverser;
1039 class TIntermOperator;
1040 class TIntermAggregate;
1041 class TIntermUnary;
1042 class TIntermBinary;
1043 class TIntermConstantUnion;
1044 class TIntermSelection;
1045 class TIntermSwitch;
1046 class TIntermBranch;
1047 class TIntermTyped;
1048 class TIntermMethod;
1049 class TIntermSymbol;
1050 class TIntermLoop;
1051
1052 } // end namespace glslang
1053
1054 //
1055 // Base class for the tree nodes
1056 //
1057 // (Put outside the glslang namespace, as it's used as part of the external interface.)
1058 //
1059 class TIntermNode {
1060 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1061 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1062
1063 TIntermNode() { loc.init(); }
getLoc()1064 virtual const glslang::TSourceLoc& getLoc() const { return loc; }
setLoc(const glslang::TSourceLoc & l)1065 virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
1066 virtual void traverse(glslang::TIntermTraverser*) = 0;
getAsTyped()1067 virtual glslang::TIntermTyped* getAsTyped() { return 0; }
getAsOperator()1068 virtual glslang::TIntermOperator* getAsOperator() { return 0; }
getAsConstantUnion()1069 virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; }
getAsAggregate()1070 virtual glslang::TIntermAggregate* getAsAggregate() { return 0; }
getAsUnaryNode()1071 virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; }
getAsBinaryNode()1072 virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; }
getAsSelectionNode()1073 virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; }
getAsSwitchNode()1074 virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; }
getAsMethodNode()1075 virtual glslang::TIntermMethod* getAsMethodNode() { return 0; }
getAsSymbolNode()1076 virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; }
getAsBranchNode()1077 virtual glslang::TIntermBranch* getAsBranchNode() { return 0; }
getAsLoopNode()1078 virtual glslang::TIntermLoop* getAsLoopNode() { return 0; }
1079
getAsTyped()1080 virtual const glslang::TIntermTyped* getAsTyped() const { return 0; }
getAsOperator()1081 virtual const glslang::TIntermOperator* getAsOperator() const { return 0; }
getAsConstantUnion()1082 virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; }
getAsAggregate()1083 virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; }
getAsUnaryNode()1084 virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; }
getAsBinaryNode()1085 virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; }
getAsSelectionNode()1086 virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; }
getAsSwitchNode()1087 virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; }
getAsMethodNode()1088 virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; }
getAsSymbolNode()1089 virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; }
getAsBranchNode()1090 virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; }
getAsLoopNode()1091 virtual const glslang::TIntermLoop* getAsLoopNode() const { return 0; }
~TIntermNode()1092 virtual ~TIntermNode() { }
1093
1094 protected:
1095 TIntermNode(const TIntermNode&);
1096 TIntermNode& operator=(const TIntermNode&);
1097 glslang::TSourceLoc loc;
1098 };
1099
1100 namespace glslang {
1101
1102 //
1103 // This is just to help yacc.
1104 //
1105 struct TIntermNodePair {
1106 TIntermNode* node1;
1107 TIntermNode* node2;
1108 };
1109
1110 //
1111 // Intermediate class for nodes that have a type.
1112 //
1113 class TIntermTyped : public TIntermNode {
1114 public:
TIntermTyped(const TType & t)1115 TIntermTyped(const TType& t) { type.shallowCopy(t); }
TIntermTyped(TBasicType basicType)1116 TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
getAsTyped()1117 virtual TIntermTyped* getAsTyped() { return this; }
getAsTyped()1118 virtual const TIntermTyped* getAsTyped() const { return this; }
setType(const TType & t)1119 virtual void setType(const TType& t) { type.shallowCopy(t); }
getType()1120 virtual const TType& getType() const { return type; }
getWritableType()1121 virtual TType& getWritableType() { return type; }
1122
getBasicType()1123 virtual TBasicType getBasicType() const { return type.getBasicType(); }
getQualifier()1124 virtual TQualifier& getQualifier() { return type.getQualifier(); }
getQualifier()1125 virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
1126 virtual void propagatePrecision(TPrecisionQualifier);
getVectorSize()1127 virtual int getVectorSize() const { return type.getVectorSize(); }
getMatrixCols()1128 virtual int getMatrixCols() const { return type.getMatrixCols(); }
getMatrixRows()1129 virtual int getMatrixRows() const { return type.getMatrixRows(); }
isMatrix()1130 virtual bool isMatrix() const { return type.isMatrix(); }
isArray()1131 virtual bool isArray() const { return type.isArray(); }
isVector()1132 virtual bool isVector() const { return type.isVector(); }
isScalar()1133 virtual bool isScalar() const { return type.isScalar(); }
isStruct()1134 virtual bool isStruct() const { return type.isStruct(); }
isFloatingDomain()1135 virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
isIntegerDomain()1136 virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
isAtomic()1137 bool isAtomic() const { return type.isAtomic(); }
isReference()1138 bool isReference() const { return type.isReference(); }
getCompleteString()1139 TString getCompleteString() const { return type.getCompleteString(); }
1140
1141 protected:
1142 TIntermTyped& operator=(const TIntermTyped&);
1143 TType type;
1144 };
1145
1146 //
1147 // Handle for, do-while, and while loops.
1148 //
1149 class TIntermLoop : public TIntermNode {
1150 public:
TIntermLoop(TIntermNode * aBody,TIntermTyped * aTest,TIntermTyped * aTerminal,bool testFirst)1151 TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
1152 body(aBody),
1153 test(aTest),
1154 terminal(aTerminal),
1155 first(testFirst),
1156 unroll(false),
1157 dontUnroll(false),
1158 dependency(0),
1159 minIterations(0),
1160 maxIterations(iterationsInfinite),
1161 iterationMultiple(1),
1162 peelCount(0),
1163 partialCount(0)
1164 { }
1165
getAsLoopNode()1166 virtual TIntermLoop* getAsLoopNode() { return this; }
getAsLoopNode()1167 virtual const TIntermLoop* getAsLoopNode() const { return this; }
1168 virtual void traverse(TIntermTraverser*);
getBody()1169 TIntermNode* getBody() const { return body; }
getTest()1170 TIntermTyped* getTest() const { return test; }
getTerminal()1171 TIntermTyped* getTerminal() const { return terminal; }
testFirst()1172 bool testFirst() const { return first; }
1173
setUnroll()1174 void setUnroll() { unroll = true; }
setDontUnroll()1175 void setDontUnroll() {
1176 dontUnroll = true;
1177 peelCount = 0;
1178 partialCount = 0;
1179 }
getUnroll()1180 bool getUnroll() const { return unroll; }
getDontUnroll()1181 bool getDontUnroll() const { return dontUnroll; }
1182
1183 static const unsigned int dependencyInfinite = 0xFFFFFFFF;
1184 static const unsigned int iterationsInfinite = 0xFFFFFFFF;
setLoopDependency(int d)1185 void setLoopDependency(int d) { dependency = d; }
getLoopDependency()1186 int getLoopDependency() const { return dependency; }
1187
setMinIterations(unsigned int v)1188 void setMinIterations(unsigned int v) { minIterations = v; }
getMinIterations()1189 unsigned int getMinIterations() const { return minIterations; }
setMaxIterations(unsigned int v)1190 void setMaxIterations(unsigned int v) { maxIterations = v; }
getMaxIterations()1191 unsigned int getMaxIterations() const { return maxIterations; }
setIterationMultiple(unsigned int v)1192 void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
getIterationMultiple()1193 unsigned int getIterationMultiple() const { return iterationMultiple; }
setPeelCount(unsigned int v)1194 void setPeelCount(unsigned int v) {
1195 peelCount = v;
1196 dontUnroll = false;
1197 }
getPeelCount()1198 unsigned int getPeelCount() const { return peelCount; }
setPartialCount(unsigned int v)1199 void setPartialCount(unsigned int v) {
1200 partialCount = v;
1201 dontUnroll = false;
1202 }
getPartialCount()1203 unsigned int getPartialCount() const { return partialCount; }
1204
1205 protected:
1206 TIntermNode* body; // code to loop over
1207 TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
1208 TIntermTyped* terminal; // exists for for-loops
1209 bool first; // true for while and for, not for do-while
1210 bool unroll; // true if unroll requested
1211 bool dontUnroll; // true if request to not unroll
1212 unsigned int dependency; // loop dependency hint; 0 means not set or unknown
1213 unsigned int minIterations; // as per the SPIR-V specification
1214 unsigned int maxIterations; // as per the SPIR-V specification
1215 unsigned int iterationMultiple; // as per the SPIR-V specification
1216 unsigned int peelCount; // as per the SPIR-V specification
1217 unsigned int partialCount; // as per the SPIR-V specification
1218 };
1219
1220 //
1221 // Handle case, break, continue, return, and kill.
1222 //
1223 class TIntermBranch : public TIntermNode {
1224 public:
TIntermBranch(TOperator op,TIntermTyped * e)1225 TIntermBranch(TOperator op, TIntermTyped* e) :
1226 flowOp(op),
1227 expression(e) { }
getAsBranchNode()1228 virtual TIntermBranch* getAsBranchNode() { return this; }
getAsBranchNode()1229 virtual const TIntermBranch* getAsBranchNode() const { return this; }
1230 virtual void traverse(TIntermTraverser*);
getFlowOp()1231 TOperator getFlowOp() const { return flowOp; }
getExpression()1232 TIntermTyped* getExpression() const { return expression; }
setExpression(TIntermTyped * pExpression)1233 void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
1234 void updatePrecision(TPrecisionQualifier parentPrecision);
1235 protected:
1236 TOperator flowOp;
1237 TIntermTyped* expression;
1238 };
1239
1240 //
1241 // Represent method names before seeing their calling signature
1242 // or resolving them to operations. Just an expression as the base object
1243 // and a textural name.
1244 //
1245 class TIntermMethod : public TIntermTyped {
1246 public:
TIntermMethod(TIntermTyped * o,const TType & t,const TString & m)1247 TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
getAsMethodNode()1248 virtual TIntermMethod* getAsMethodNode() { return this; }
getAsMethodNode()1249 virtual const TIntermMethod* getAsMethodNode() const { return this; }
getMethodName()1250 virtual const TString& getMethodName() const { return method; }
getObject()1251 virtual TIntermTyped* getObject() const { return object; }
1252 virtual void traverse(TIntermTraverser*);
1253 protected:
1254 TIntermTyped* object;
1255 TString method;
1256 };
1257
1258 //
1259 // Nodes that correspond to symbols or constants in the source code.
1260 //
1261 class TIntermSymbol : public TIntermTyped {
1262 public:
1263 // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
1264 // per process threadPoolAllocator, then it causes increased memory usage per compile
1265 // it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(int i,const TString & n,const TType & t)1266 TIntermSymbol(int i, const TString& n, const TType& t)
1267 : TIntermTyped(t), id(i),
1268 #ifndef GLSLANG_WEB
1269 flattenSubset(-1),
1270 #endif
1271 constSubtree(nullptr)
1272 { name = n; }
getId()1273 virtual int getId() const { return id; }
changeId(int i)1274 virtual void changeId(int i) { id = i; }
getName()1275 virtual const TString& getName() const { return name; }
1276 virtual void traverse(TIntermTraverser*);
getAsSymbolNode()1277 virtual TIntermSymbol* getAsSymbolNode() { return this; }
getAsSymbolNode()1278 virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
setConstArray(const TConstUnionArray & c)1279 void setConstArray(const TConstUnionArray& c) { constArray = c; }
getConstArray()1280 const TConstUnionArray& getConstArray() const { return constArray; }
setConstSubtree(TIntermTyped * subtree)1281 void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
getConstSubtree()1282 TIntermTyped* getConstSubtree() const { return constSubtree; }
1283 #ifndef GLSLANG_WEB
setFlattenSubset(int subset)1284 void setFlattenSubset(int subset) { flattenSubset = subset; }
getFlattenSubset()1285 int getFlattenSubset() const { return flattenSubset; } // -1 means full object
1286 #endif
1287
1288 // This is meant for cases where a node has already been constructed, and
1289 // later on, it becomes necessary to switch to a different symbol.
switchId(int newId)1290 virtual void switchId(int newId) { id = newId; }
1291
1292 protected:
1293 int id; // the unique id of the symbol this node represents
1294 #ifndef GLSLANG_WEB
1295 int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
1296 #endif
1297 TString name; // the name of the symbol this node represents
1298 TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
1299 TIntermTyped* constSubtree;
1300 };
1301
1302 class TIntermConstantUnion : public TIntermTyped {
1303 public:
TIntermConstantUnion(const TConstUnionArray & ua,const TType & t)1304 TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
getConstArray()1305 const TConstUnionArray& getConstArray() const { return constArray; }
getAsConstantUnion()1306 virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
getAsConstantUnion()1307 virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
1308 virtual void traverse(TIntermTraverser*);
1309 virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
1310 virtual TIntermTyped* fold(TOperator, const TType&) const;
setLiteral()1311 void setLiteral() { literal = true; }
setExpression()1312 void setExpression() { literal = false; }
isLiteral()1313 bool isLiteral() const { return literal; }
1314
1315 protected:
1316 TIntermConstantUnion& operator=(const TIntermConstantUnion&);
1317
1318 const TConstUnionArray constArray;
1319 bool literal; // true if node represents a literal in the source code
1320 };
1321
1322 // Represent the independent aspects of a texturing TOperator
1323 struct TCrackedTextureOp {
1324 bool query;
1325 bool proj;
1326 bool lod;
1327 bool fetch;
1328 bool offset;
1329 bool offsets;
1330 bool gather;
1331 bool grad;
1332 bool subpass;
1333 bool lodClamp;
1334 bool fragMask;
1335 };
1336
1337 //
1338 // Intermediate class for node types that hold operators.
1339 //
1340 class TIntermOperator : public TIntermTyped {
1341 public:
getAsOperator()1342 virtual TIntermOperator* getAsOperator() { return this; }
getAsOperator()1343 virtual const TIntermOperator* getAsOperator() const { return this; }
getOp()1344 TOperator getOp() const { return op; }
setOp(TOperator newOp)1345 void setOp(TOperator newOp) { op = newOp; }
1346 bool modifiesState() const;
1347 bool isConstructor() const;
isTexture()1348 bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
isSampling()1349 bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
1350 #ifdef GLSLANG_WEB
isImage()1351 bool isImage() const { return false; }
isSparseTexture()1352 bool isSparseTexture() const { return false; }
isImageFootprint()1353 bool isImageFootprint() const { return false; }
isSparseImage()1354 bool isSparseImage() const { return false; }
isSubgroup()1355 bool isSubgroup() const { return false; }
1356 #else
isImage()1357 bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
isSparseTexture()1358 bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
isImageFootprint()1359 bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
isSparseImage()1360 bool isSparseImage() const { return op == EOpSparseImageLoad; }
isSubgroup()1361 bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; }
1362 #endif
1363
setOperationPrecision(TPrecisionQualifier p)1364 void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
getOperationPrecision()1365 TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
1366 operationPrecision :
1367 type.getQualifier().precision; }
getCompleteString()1368 TString getCompleteString() const
1369 {
1370 TString cs = type.getCompleteString();
1371 if (getOperationPrecision() != type.getQualifier().precision) {
1372 cs += ", operation at ";
1373 cs += GetPrecisionQualifierString(getOperationPrecision());
1374 }
1375
1376 return cs;
1377 }
1378
1379 // Crack the op into the individual dimensions of texturing operation.
crackTexture(TSampler sampler,TCrackedTextureOp & cracked)1380 void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
1381 {
1382 cracked.query = false;
1383 cracked.proj = false;
1384 cracked.lod = false;
1385 cracked.fetch = false;
1386 cracked.offset = false;
1387 cracked.offsets = false;
1388 cracked.gather = false;
1389 cracked.grad = false;
1390 cracked.subpass = false;
1391 cracked.lodClamp = false;
1392 cracked.fragMask = false;
1393
1394 switch (op) {
1395 case EOpImageQuerySize:
1396 case EOpImageQuerySamples:
1397 case EOpTextureQuerySize:
1398 case EOpTextureQueryLod:
1399 case EOpTextureQueryLevels:
1400 case EOpTextureQuerySamples:
1401 case EOpSparseTexelsResident:
1402 cracked.query = true;
1403 break;
1404 case EOpTexture:
1405 case EOpSparseTexture:
1406 break;
1407 case EOpTextureProj:
1408 cracked.proj = true;
1409 break;
1410 case EOpTextureLod:
1411 case EOpSparseTextureLod:
1412 cracked.lod = true;
1413 break;
1414 case EOpTextureOffset:
1415 case EOpSparseTextureOffset:
1416 cracked.offset = true;
1417 break;
1418 case EOpTextureFetch:
1419 case EOpSparseTextureFetch:
1420 cracked.fetch = true;
1421 if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1422 cracked.lod = true;
1423 break;
1424 case EOpTextureFetchOffset:
1425 case EOpSparseTextureFetchOffset:
1426 cracked.fetch = true;
1427 cracked.offset = true;
1428 if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1429 cracked.lod = true;
1430 break;
1431 case EOpTextureProjOffset:
1432 cracked.offset = true;
1433 cracked.proj = true;
1434 break;
1435 case EOpTextureLodOffset:
1436 case EOpSparseTextureLodOffset:
1437 cracked.offset = true;
1438 cracked.lod = true;
1439 break;
1440 case EOpTextureProjLod:
1441 cracked.lod = true;
1442 cracked.proj = true;
1443 break;
1444 case EOpTextureProjLodOffset:
1445 cracked.offset = true;
1446 cracked.lod = true;
1447 cracked.proj = true;
1448 break;
1449 case EOpTextureGrad:
1450 case EOpSparseTextureGrad:
1451 cracked.grad = true;
1452 break;
1453 case EOpTextureGradOffset:
1454 case EOpSparseTextureGradOffset:
1455 cracked.grad = true;
1456 cracked.offset = true;
1457 break;
1458 case EOpTextureProjGrad:
1459 cracked.grad = true;
1460 cracked.proj = true;
1461 break;
1462 case EOpTextureProjGradOffset:
1463 cracked.grad = true;
1464 cracked.offset = true;
1465 cracked.proj = true;
1466 break;
1467 #ifndef GLSLANG_WEB
1468 case EOpTextureClamp:
1469 case EOpSparseTextureClamp:
1470 cracked.lodClamp = true;
1471 break;
1472 case EOpTextureOffsetClamp:
1473 case EOpSparseTextureOffsetClamp:
1474 cracked.offset = true;
1475 cracked.lodClamp = true;
1476 break;
1477 case EOpTextureGradClamp:
1478 case EOpSparseTextureGradClamp:
1479 cracked.grad = true;
1480 cracked.lodClamp = true;
1481 break;
1482 case EOpTextureGradOffsetClamp:
1483 case EOpSparseTextureGradOffsetClamp:
1484 cracked.grad = true;
1485 cracked.offset = true;
1486 cracked.lodClamp = true;
1487 break;
1488 case EOpTextureGather:
1489 case EOpSparseTextureGather:
1490 cracked.gather = true;
1491 break;
1492 case EOpTextureGatherOffset:
1493 case EOpSparseTextureGatherOffset:
1494 cracked.gather = true;
1495 cracked.offset = true;
1496 break;
1497 case EOpTextureGatherOffsets:
1498 case EOpSparseTextureGatherOffsets:
1499 cracked.gather = true;
1500 cracked.offsets = true;
1501 break;
1502 case EOpTextureGatherLod:
1503 case EOpSparseTextureGatherLod:
1504 cracked.gather = true;
1505 cracked.lod = true;
1506 break;
1507 case EOpTextureGatherLodOffset:
1508 case EOpSparseTextureGatherLodOffset:
1509 cracked.gather = true;
1510 cracked.offset = true;
1511 cracked.lod = true;
1512 break;
1513 case EOpTextureGatherLodOffsets:
1514 case EOpSparseTextureGatherLodOffsets:
1515 cracked.gather = true;
1516 cracked.offsets = true;
1517 cracked.lod = true;
1518 break;
1519 case EOpImageLoadLod:
1520 case EOpImageStoreLod:
1521 case EOpSparseImageLoadLod:
1522 cracked.lod = true;
1523 break;
1524 case EOpFragmentMaskFetch:
1525 cracked.subpass = sampler.dim == EsdSubpass;
1526 cracked.fragMask = true;
1527 break;
1528 case EOpFragmentFetch:
1529 cracked.subpass = sampler.dim == EsdSubpass;
1530 cracked.fragMask = true;
1531 break;
1532 case EOpImageSampleFootprintNV:
1533 break;
1534 case EOpImageSampleFootprintClampNV:
1535 cracked.lodClamp = true;
1536 break;
1537 case EOpImageSampleFootprintLodNV:
1538 cracked.lod = true;
1539 break;
1540 case EOpImageSampleFootprintGradNV:
1541 cracked.grad = true;
1542 break;
1543 case EOpImageSampleFootprintGradClampNV:
1544 cracked.lodClamp = true;
1545 cracked.grad = true;
1546 break;
1547 case EOpSubpassLoad:
1548 case EOpSubpassLoadMS:
1549 cracked.subpass = true;
1550 break;
1551 #endif
1552 default:
1553 break;
1554 }
1555 }
1556
1557 protected:
TIntermOperator(TOperator o)1558 TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
TIntermOperator(TOperator o,TType & t)1559 TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
1560 TOperator op;
1561 // The result precision is in the inherited TType, and is usually meant to be both
1562 // the operation precision and the result precision. However, some more complex things,
1563 // like built-in function calls, distinguish between the two, in which case non-EqpNone
1564 // 'operationPrecision' overrides the result precision as far as operation precision
1565 // is concerned.
1566 TPrecisionQualifier operationPrecision;
1567 };
1568
1569 //
1570 // Nodes for all the basic binary math operators.
1571 //
1572 class TIntermBinary : public TIntermOperator {
1573 public:
TIntermBinary(TOperator o)1574 TIntermBinary(TOperator o) : TIntermOperator(o) {}
1575 virtual void traverse(TIntermTraverser*);
setLeft(TIntermTyped * n)1576 virtual void setLeft(TIntermTyped* n) { left = n; }
setRight(TIntermTyped * n)1577 virtual void setRight(TIntermTyped* n) { right = n; }
getLeft()1578 virtual TIntermTyped* getLeft() const { return left; }
getRight()1579 virtual TIntermTyped* getRight() const { return right; }
getAsBinaryNode()1580 virtual TIntermBinary* getAsBinaryNode() { return this; }
getAsBinaryNode()1581 virtual const TIntermBinary* getAsBinaryNode() const { return this; }
1582 virtual void updatePrecision();
1583 protected:
1584 TIntermTyped* left;
1585 TIntermTyped* right;
1586 };
1587
1588 //
1589 // Nodes for unary math operators.
1590 //
1591 class TIntermUnary : public TIntermOperator {
1592 public:
TIntermUnary(TOperator o,TType & t)1593 TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {}
TIntermUnary(TOperator o)1594 TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {}
1595 virtual void traverse(TIntermTraverser*);
setOperand(TIntermTyped * o)1596 virtual void setOperand(TIntermTyped* o) { operand = o; }
getOperand()1597 virtual TIntermTyped* getOperand() { return operand; }
getOperand()1598 virtual const TIntermTyped* getOperand() const { return operand; }
getAsUnaryNode()1599 virtual TIntermUnary* getAsUnaryNode() { return this; }
getAsUnaryNode()1600 virtual const TIntermUnary* getAsUnaryNode() const { return this; }
1601 virtual void updatePrecision();
1602 protected:
1603 TIntermTyped* operand;
1604 };
1605
1606 typedef TVector<TIntermNode*> TIntermSequence;
1607 typedef TVector<TStorageQualifier> TQualifierList;
1608 //
1609 // Nodes that operate on an arbitrary sized set of children.
1610 //
1611 class TIntermAggregate : public TIntermOperator {
1612 public:
TIntermAggregate()1613 TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
TIntermAggregate(TOperator o)1614 TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
~TIntermAggregate()1615 ~TIntermAggregate() { delete pragmaTable; }
getAsAggregate()1616 virtual TIntermAggregate* getAsAggregate() { return this; }
getAsAggregate()1617 virtual const TIntermAggregate* getAsAggregate() const { return this; }
setOperator(TOperator o)1618 virtual void setOperator(TOperator o) { op = o; }
getSequence()1619 virtual TIntermSequence& getSequence() { return sequence; }
getSequence()1620 virtual const TIntermSequence& getSequence() const { return sequence; }
setName(const TString & n)1621 virtual void setName(const TString& n) { name = n; }
getName()1622 virtual const TString& getName() const { return name; }
1623 virtual void traverse(TIntermTraverser*);
setUserDefined()1624 virtual void setUserDefined() { userDefined = true; }
isUserDefined()1625 virtual bool isUserDefined() { return userDefined; }
getQualifierList()1626 virtual TQualifierList& getQualifierList() { return qualifier; }
getQualifierList()1627 virtual const TQualifierList& getQualifierList() const { return qualifier; }
setOptimize(bool o)1628 void setOptimize(bool o) { optimize = o; }
setDebug(bool d)1629 void setDebug(bool d) { debug = d; }
getOptimize()1630 bool getOptimize() const { return optimize; }
getDebug()1631 bool getDebug() const { return debug; }
1632 void setPragmaTable(const TPragmaTable& pTable);
getPragmaTable()1633 const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
1634 protected:
1635 TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
1636 TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
1637 TIntermSequence sequence;
1638 TQualifierList qualifier;
1639 TString name;
1640 bool userDefined; // used for user defined function names
1641 bool optimize;
1642 bool debug;
1643 TPragmaTable* pragmaTable;
1644 };
1645
1646 //
1647 // For if tests.
1648 //
1649 class TIntermSelection : public TIntermTyped {
1650 public:
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB)1651 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
1652 TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
1653 shortCircuit(true),
1654 flatten(false), dontFlatten(false) {}
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB,const TType & type)1655 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
1656 TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
1657 shortCircuit(true),
1658 flatten(false), dontFlatten(false) {}
1659 virtual void traverse(TIntermTraverser*);
getCondition()1660 virtual TIntermTyped* getCondition() const { return condition; }
getTrueBlock()1661 virtual TIntermNode* getTrueBlock() const { return trueBlock; }
getFalseBlock()1662 virtual TIntermNode* getFalseBlock() const { return falseBlock; }
getAsSelectionNode()1663 virtual TIntermSelection* getAsSelectionNode() { return this; }
getAsSelectionNode()1664 virtual const TIntermSelection* getAsSelectionNode() const { return this; }
1665
setNoShortCircuit()1666 void setNoShortCircuit() { shortCircuit = false; }
getShortCircuit()1667 bool getShortCircuit() const { return shortCircuit; }
1668
setFlatten()1669 void setFlatten() { flatten = true; }
setDontFlatten()1670 void setDontFlatten() { dontFlatten = true; }
getFlatten()1671 bool getFlatten() const { return flatten; }
getDontFlatten()1672 bool getDontFlatten() const { return dontFlatten; }
1673
1674 protected:
1675 TIntermTyped* condition;
1676 TIntermNode* trueBlock;
1677 TIntermNode* falseBlock;
1678 bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
1679 bool flatten; // true if flatten requested
1680 bool dontFlatten; // true if requested to not flatten
1681 };
1682
1683 //
1684 // For switch statements. Designed use is that a switch will have sequence of nodes
1685 // that are either case/default nodes or a *single* node that represents all the code
1686 // in between (if any) consecutive case/defaults. So, a traversal need only deal with
1687 // 0 or 1 nodes per case/default statement.
1688 //
1689 class TIntermSwitch : public TIntermNode {
1690 public:
TIntermSwitch(TIntermTyped * cond,TIntermAggregate * b)1691 TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
1692 flatten(false), dontFlatten(false) {}
1693 virtual void traverse(TIntermTraverser*);
getCondition()1694 virtual TIntermNode* getCondition() const { return condition; }
getBody()1695 virtual TIntermAggregate* getBody() const { return body; }
getAsSwitchNode()1696 virtual TIntermSwitch* getAsSwitchNode() { return this; }
getAsSwitchNode()1697 virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
1698
setFlatten()1699 void setFlatten() { flatten = true; }
setDontFlatten()1700 void setDontFlatten() { dontFlatten = true; }
getFlatten()1701 bool getFlatten() const { return flatten; }
getDontFlatten()1702 bool getDontFlatten() const { return dontFlatten; }
1703
1704 protected:
1705 TIntermTyped* condition;
1706 TIntermAggregate* body;
1707 bool flatten; // true if flatten requested
1708 bool dontFlatten; // true if requested to not flatten
1709 };
1710
1711 enum TVisit
1712 {
1713 EvPreVisit,
1714 EvInVisit,
1715 EvPostVisit
1716 };
1717
1718 //
1719 // For traversing the tree. User should derive from this,
1720 // put their traversal specific data in it, and then pass
1721 // it to a Traverse method.
1722 //
1723 // When using this, just fill in the methods for nodes you want visited.
1724 // Return false from a pre-visit to skip visiting that node's subtree.
1725 //
1726 // Explicitly set postVisit to true if you want post visiting, otherwise,
1727 // filled in methods will only be called at pre-visit time (before processing
1728 // the subtree). Similarly for inVisit for in-order visiting of nodes with
1729 // multiple children.
1730 //
1731 // If you only want post-visits, explicitly turn off preVisit (and inVisit)
1732 // and turn on postVisit.
1733 //
1734 // In general, for the visit*() methods, return true from interior nodes
1735 // to have the traversal continue on to children.
1736 //
1737 // If you process children yourself, or don't want them processed, return false.
1738 //
1739 class TIntermTraverser {
1740 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1741 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1742 TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
1743 preVisit(preVisit),
1744 inVisit(inVisit),
1745 postVisit(postVisit),
1746 rightToLeft(rightToLeft),
1747 depth(0),
1748 maxDepth(0) { }
~TIntermTraverser()1749 virtual ~TIntermTraverser() { }
1750
visitSymbol(TIntermSymbol *)1751 virtual void visitSymbol(TIntermSymbol*) { }
visitConstantUnion(TIntermConstantUnion *)1752 virtual void visitConstantUnion(TIntermConstantUnion*) { }
visitBinary(TVisit,TIntermBinary *)1753 virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
visitUnary(TVisit,TIntermUnary *)1754 virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
visitSelection(TVisit,TIntermSelection *)1755 virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
visitAggregate(TVisit,TIntermAggregate *)1756 virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
visitLoop(TVisit,TIntermLoop *)1757 virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
visitBranch(TVisit,TIntermBranch *)1758 virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
visitSwitch(TVisit,TIntermSwitch *)1759 virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
1760
getMaxDepth()1761 int getMaxDepth() const { return maxDepth; }
1762
incrementDepth(TIntermNode * current)1763 void incrementDepth(TIntermNode *current)
1764 {
1765 depth++;
1766 maxDepth = (std::max)(maxDepth, depth);
1767 path.push_back(current);
1768 }
1769
decrementDepth()1770 void decrementDepth()
1771 {
1772 depth--;
1773 path.pop_back();
1774 }
1775
getParentNode()1776 TIntermNode *getParentNode()
1777 {
1778 return path.size() == 0 ? NULL : path.back();
1779 }
1780
1781 const bool preVisit;
1782 const bool inVisit;
1783 const bool postVisit;
1784 const bool rightToLeft;
1785
1786 protected:
1787 TIntermTraverser& operator=(TIntermTraverser&);
1788
1789 int depth;
1790 int maxDepth;
1791
1792 // All the nodes from root to the current node's parent during traversing.
1793 TVector<TIntermNode *> path;
1794 };
1795
1796 // KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
1797 // sized with the same symbol, involving no operations"
SameSpecializationConstants(TIntermTyped * node1,TIntermTyped * node2)1798 inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
1799 {
1800 return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
1801 node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
1802 }
1803
1804 } // end namespace glslang
1805
1806 #endif // __INTERMEDIATE_H
1807