1 //
2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2016 LunarG, Inc.
4 // Copyright (C) 2017 ARM Limited.
5 // Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 //
7 // All rights reserved.
8 //
9 // Redistribution and use in source and binary forms, with or without
10 // modification, are permitted provided that the following conditions
11 // are met:
12 //
13 // Redistributions of source code must retain the above copyright
14 // notice, this list of conditions and the following disclaimer.
15 //
16 // Redistributions in binary form must reproduce the above
17 // copyright notice, this list of conditions and the following
18 // disclaimer in the documentation and/or other materials provided
19 // with the distribution.
20 //
21 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
22 // contributors may be used to endorse or promote products derived
23 // from this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 // POSSIBILITY OF SUCH DAMAGE.
37 //
38
39 //
40 // Definition of the in-memory high-level intermediate representation
41 // of shaders. This is a tree that parser creates.
42 //
43 // Nodes in the tree are defined as a hierarchy of classes derived from
44 // TIntermNode. Each is a node in a tree. There is no preset branching factor;
45 // each node can have it's own type of list of children.
46 //
47
48 #ifndef __INTERMEDIATE_H
49 #define __INTERMEDIATE_H
50
51 #include "Common.h"
52 #include "Types.h"
53 #include "ConstantUnion.h"
54
55 namespace glslang {
56
57 class TIntermediate;
58
59 //
60 // Operators used by the high-level (parse tree) representation.
61 //
62 enum TOperator {
63 EOpNull, // if in a node, should only mean a node is still being built
64 EOpSequence, // denotes a list of statements, or parameters, etc.
65 EOpScope, // Used by debugging to denote a scoped list of statements
66 EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
67 EOpFunctionCall,
68 EOpFunction, // For function definition
69 EOpParameters, // an aggregate listing the parameters to a function
70 EOpSpirvInst,
71
72 //
73 // Unary operators
74 //
75
76 EOpNegative,
77 EOpLogicalNot,
78 EOpVectorLogicalNot,
79 EOpBitwiseNot,
80
81 EOpPostIncrement,
82 EOpPostDecrement,
83 EOpPreIncrement,
84 EOpPreDecrement,
85
86 EOpCopyObject,
87
88 EOpDeclare, // Used by debugging to force declaration of variable in correct scope
89
90 // (u)int* -> bool
91 EOpConvInt8ToBool,
92 EOpConvUint8ToBool,
93 EOpConvInt16ToBool,
94 EOpConvUint16ToBool,
95 EOpConvIntToBool,
96 EOpConvUintToBool,
97 EOpConvInt64ToBool,
98 EOpConvUint64ToBool,
99
100 // float* -> bool
101 EOpConvFloat16ToBool,
102 EOpConvFloatToBool,
103 EOpConvDoubleToBool,
104
105 // bool -> (u)int*
106 EOpConvBoolToInt8,
107 EOpConvBoolToUint8,
108 EOpConvBoolToInt16,
109 EOpConvBoolToUint16,
110 EOpConvBoolToInt,
111 EOpConvBoolToUint,
112 EOpConvBoolToInt64,
113 EOpConvBoolToUint64,
114
115 // bool -> float*
116 EOpConvBoolToFloat16,
117 EOpConvBoolToFloat,
118 EOpConvBoolToDouble,
119
120 // int8_t -> (u)int*
121 EOpConvInt8ToInt16,
122 EOpConvInt8ToInt,
123 EOpConvInt8ToInt64,
124 EOpConvInt8ToUint8,
125 EOpConvInt8ToUint16,
126 EOpConvInt8ToUint,
127 EOpConvInt8ToUint64,
128
129 // uint8_t -> (u)int*
130 EOpConvUint8ToInt8,
131 EOpConvUint8ToInt16,
132 EOpConvUint8ToInt,
133 EOpConvUint8ToInt64,
134 EOpConvUint8ToUint16,
135 EOpConvUint8ToUint,
136 EOpConvUint8ToUint64,
137
138 // int8_t -> float*
139 EOpConvInt8ToFloat16,
140 EOpConvInt8ToFloat,
141 EOpConvInt8ToDouble,
142
143 // uint8_t -> float*
144 EOpConvUint8ToFloat16,
145 EOpConvUint8ToFloat,
146 EOpConvUint8ToDouble,
147
148 // int16_t -> (u)int*
149 EOpConvInt16ToInt8,
150 EOpConvInt16ToInt,
151 EOpConvInt16ToInt64,
152 EOpConvInt16ToUint8,
153 EOpConvInt16ToUint16,
154 EOpConvInt16ToUint,
155 EOpConvInt16ToUint64,
156
157 // uint16_t -> (u)int*
158 EOpConvUint16ToInt8,
159 EOpConvUint16ToInt16,
160 EOpConvUint16ToInt,
161 EOpConvUint16ToInt64,
162 EOpConvUint16ToUint8,
163 EOpConvUint16ToUint,
164 EOpConvUint16ToUint64,
165
166 // int16_t -> float*
167 EOpConvInt16ToFloat16,
168 EOpConvInt16ToFloat,
169 EOpConvInt16ToDouble,
170
171 // uint16_t -> float*
172 EOpConvUint16ToFloat16,
173 EOpConvUint16ToFloat,
174 EOpConvUint16ToDouble,
175
176 // int32_t -> (u)int*
177 EOpConvIntToInt8,
178 EOpConvIntToInt16,
179 EOpConvIntToInt64,
180 EOpConvIntToUint8,
181 EOpConvIntToUint16,
182 EOpConvIntToUint,
183 EOpConvIntToUint64,
184
185 // uint32_t -> (u)int*
186 EOpConvUintToInt8,
187 EOpConvUintToInt16,
188 EOpConvUintToInt,
189 EOpConvUintToInt64,
190 EOpConvUintToUint8,
191 EOpConvUintToUint16,
192 EOpConvUintToUint64,
193
194 // int32_t -> float*
195 EOpConvIntToFloat16,
196 EOpConvIntToFloat,
197 EOpConvIntToDouble,
198
199 // uint32_t -> float*
200 EOpConvUintToFloat16,
201 EOpConvUintToFloat,
202 EOpConvUintToDouble,
203
204 // int64_t -> (u)int*
205 EOpConvInt64ToInt8,
206 EOpConvInt64ToInt16,
207 EOpConvInt64ToInt,
208 EOpConvInt64ToUint8,
209 EOpConvInt64ToUint16,
210 EOpConvInt64ToUint,
211 EOpConvInt64ToUint64,
212
213 // uint64_t -> (u)int*
214 EOpConvUint64ToInt8,
215 EOpConvUint64ToInt16,
216 EOpConvUint64ToInt,
217 EOpConvUint64ToInt64,
218 EOpConvUint64ToUint8,
219 EOpConvUint64ToUint16,
220 EOpConvUint64ToUint,
221
222 // int64_t -> float*
223 EOpConvInt64ToFloat16,
224 EOpConvInt64ToFloat,
225 EOpConvInt64ToDouble,
226
227 // uint64_t -> float*
228 EOpConvUint64ToFloat16,
229 EOpConvUint64ToFloat,
230 EOpConvUint64ToDouble,
231
232 // float16_t -> (u)int*
233 EOpConvFloat16ToInt8,
234 EOpConvFloat16ToInt16,
235 EOpConvFloat16ToInt,
236 EOpConvFloat16ToInt64,
237 EOpConvFloat16ToUint8,
238 EOpConvFloat16ToUint16,
239 EOpConvFloat16ToUint,
240 EOpConvFloat16ToUint64,
241
242 // float16_t -> float*
243 EOpConvFloat16ToFloat,
244 EOpConvFloat16ToDouble,
245
246 // float -> (u)int*
247 EOpConvFloatToInt8,
248 EOpConvFloatToInt16,
249 EOpConvFloatToInt,
250 EOpConvFloatToInt64,
251 EOpConvFloatToUint8,
252 EOpConvFloatToUint16,
253 EOpConvFloatToUint,
254 EOpConvFloatToUint64,
255
256 // float -> float*
257 EOpConvFloatToFloat16,
258 EOpConvFloatToDouble,
259
260 // float64 _t-> (u)int*
261 EOpConvDoubleToInt8,
262 EOpConvDoubleToInt16,
263 EOpConvDoubleToInt,
264 EOpConvDoubleToInt64,
265 EOpConvDoubleToUint8,
266 EOpConvDoubleToUint16,
267 EOpConvDoubleToUint,
268 EOpConvDoubleToUint64,
269
270 // float64_t -> float*
271 EOpConvDoubleToFloat16,
272 EOpConvDoubleToFloat,
273
274 // uint64_t <-> pointer
275 EOpConvUint64ToPtr,
276 EOpConvPtrToUint64,
277
278 // uvec2 <-> pointer
279 EOpConvUvec2ToPtr,
280 EOpConvPtrToUvec2,
281
282 // uint64_t -> accelerationStructureEXT
283 EOpConvUint64ToAccStruct,
284
285 // uvec2 -> accelerationStructureEXT
286 EOpConvUvec2ToAccStruct,
287
288 //
289 // binary operations
290 //
291
292 EOpAdd,
293 EOpSub,
294 EOpMul,
295 EOpDiv,
296 EOpMod,
297 EOpRightShift,
298 EOpLeftShift,
299 EOpAnd,
300 EOpInclusiveOr,
301 EOpExclusiveOr,
302 EOpEqual,
303 EOpNotEqual,
304 EOpVectorEqual,
305 EOpVectorNotEqual,
306 EOpLessThan,
307 EOpGreaterThan,
308 EOpLessThanEqual,
309 EOpGreaterThanEqual,
310 EOpComma,
311
312 EOpVectorTimesScalar,
313 EOpVectorTimesMatrix,
314 EOpMatrixTimesVector,
315 EOpMatrixTimesScalar,
316
317 EOpLogicalOr,
318 EOpLogicalXor,
319 EOpLogicalAnd,
320
321 EOpIndexDirect,
322 EOpIndexIndirect,
323 EOpIndexDirectStruct,
324
325 EOpVectorSwizzle,
326
327 EOpMethod,
328 EOpScoping,
329
330 //
331 // Built-in functions mapped to operators
332 //
333
334 EOpRadians,
335 EOpDegrees,
336 EOpSin,
337 EOpCos,
338 EOpTan,
339 EOpAsin,
340 EOpAcos,
341 EOpAtan,
342 EOpSinh,
343 EOpCosh,
344 EOpTanh,
345 EOpAsinh,
346 EOpAcosh,
347 EOpAtanh,
348
349 EOpPow,
350 EOpExp,
351 EOpLog,
352 EOpExp2,
353 EOpLog2,
354 EOpSqrt,
355 EOpInverseSqrt,
356
357 EOpAbs,
358 EOpSign,
359 EOpFloor,
360 EOpTrunc,
361 EOpRound,
362 EOpRoundEven,
363 EOpCeil,
364 EOpFract,
365 EOpModf,
366 EOpMin,
367 EOpMax,
368 EOpClamp,
369 EOpMix,
370 EOpStep,
371 EOpSmoothStep,
372
373 EOpIsNan,
374 EOpIsInf,
375
376 EOpFma,
377
378 EOpFrexp,
379 EOpLdexp,
380
381 EOpFloatBitsToInt,
382 EOpFloatBitsToUint,
383 EOpIntBitsToFloat,
384 EOpUintBitsToFloat,
385 EOpDoubleBitsToInt64,
386 EOpDoubleBitsToUint64,
387 EOpInt64BitsToDouble,
388 EOpUint64BitsToDouble,
389 EOpFloat16BitsToInt16,
390 EOpFloat16BitsToUint16,
391 EOpInt16BitsToFloat16,
392 EOpUint16BitsToFloat16,
393 EOpPackSnorm2x16,
394 EOpUnpackSnorm2x16,
395 EOpPackUnorm2x16,
396 EOpUnpackUnorm2x16,
397 EOpPackSnorm4x8,
398 EOpUnpackSnorm4x8,
399 EOpPackUnorm4x8,
400 EOpUnpackUnorm4x8,
401 EOpPackHalf2x16,
402 EOpUnpackHalf2x16,
403 EOpPackDouble2x32,
404 EOpUnpackDouble2x32,
405 EOpPackInt2x32,
406 EOpUnpackInt2x32,
407 EOpPackUint2x32,
408 EOpUnpackUint2x32,
409 EOpPackFloat2x16,
410 EOpUnpackFloat2x16,
411 EOpPackInt2x16,
412 EOpUnpackInt2x16,
413 EOpPackUint2x16,
414 EOpUnpackUint2x16,
415 EOpPackInt4x16,
416 EOpUnpackInt4x16,
417 EOpPackUint4x16,
418 EOpUnpackUint4x16,
419 EOpPack16,
420 EOpPack32,
421 EOpPack64,
422 EOpUnpack32,
423 EOpUnpack16,
424 EOpUnpack8,
425
426 EOpLength,
427 EOpDistance,
428 EOpDot,
429 EOpCross,
430 EOpNormalize,
431 EOpFaceForward,
432 EOpReflect,
433 EOpRefract,
434
435 EOpMin3,
436 EOpMax3,
437 EOpMid3,
438
439 EOpDPdx, // Fragment only
440 EOpDPdy, // Fragment only
441 EOpFwidth, // Fragment only
442 EOpDPdxFine, // Fragment only
443 EOpDPdyFine, // Fragment only
444 EOpFwidthFine, // Fragment only
445 EOpDPdxCoarse, // Fragment only
446 EOpDPdyCoarse, // Fragment only
447 EOpFwidthCoarse, // Fragment only
448
449 EOpInterpolateAtCentroid, // Fragment only
450 EOpInterpolateAtSample, // Fragment only
451 EOpInterpolateAtOffset, // Fragment only
452 EOpInterpolateAtVertex,
453
454 EOpMatrixTimesMatrix,
455 EOpOuterProduct,
456 EOpDeterminant,
457 EOpMatrixInverse,
458 EOpTranspose,
459
460 EOpFtransform,
461
462 EOpNoise,
463
464 EOpEmitVertex, // geometry only
465 EOpEndPrimitive, // geometry only
466 EOpEmitStreamVertex, // geometry only
467 EOpEndStreamPrimitive, // geometry only
468
469 EOpBarrier,
470 EOpMemoryBarrier,
471 EOpMemoryBarrierAtomicCounter,
472 EOpMemoryBarrierBuffer,
473 EOpMemoryBarrierImage,
474 EOpMemoryBarrierShared, // compute only
475 EOpGroupMemoryBarrier, // compute only
476
477 EOpBallot,
478 EOpReadInvocation,
479 EOpReadFirstInvocation,
480
481 EOpAnyInvocation,
482 EOpAllInvocations,
483 EOpAllInvocationsEqual,
484
485 EOpSubgroupGuardStart,
486 EOpSubgroupBarrier,
487 EOpSubgroupMemoryBarrier,
488 EOpSubgroupMemoryBarrierBuffer,
489 EOpSubgroupMemoryBarrierImage,
490 EOpSubgroupMemoryBarrierShared, // compute only
491 EOpSubgroupElect,
492 EOpSubgroupAll,
493 EOpSubgroupAny,
494 EOpSubgroupAllEqual,
495 EOpSubgroupBroadcast,
496 EOpSubgroupBroadcastFirst,
497 EOpSubgroupBallot,
498 EOpSubgroupInverseBallot,
499 EOpSubgroupBallotBitExtract,
500 EOpSubgroupBallotBitCount,
501 EOpSubgroupBallotInclusiveBitCount,
502 EOpSubgroupBallotExclusiveBitCount,
503 EOpSubgroupBallotFindLSB,
504 EOpSubgroupBallotFindMSB,
505 EOpSubgroupShuffle,
506 EOpSubgroupShuffleXor,
507 EOpSubgroupShuffleUp,
508 EOpSubgroupShuffleDown,
509 EOpSubgroupAdd,
510 EOpSubgroupMul,
511 EOpSubgroupMin,
512 EOpSubgroupMax,
513 EOpSubgroupAnd,
514 EOpSubgroupOr,
515 EOpSubgroupXor,
516 EOpSubgroupInclusiveAdd,
517 EOpSubgroupInclusiveMul,
518 EOpSubgroupInclusiveMin,
519 EOpSubgroupInclusiveMax,
520 EOpSubgroupInclusiveAnd,
521 EOpSubgroupInclusiveOr,
522 EOpSubgroupInclusiveXor,
523 EOpSubgroupExclusiveAdd,
524 EOpSubgroupExclusiveMul,
525 EOpSubgroupExclusiveMin,
526 EOpSubgroupExclusiveMax,
527 EOpSubgroupExclusiveAnd,
528 EOpSubgroupExclusiveOr,
529 EOpSubgroupExclusiveXor,
530 EOpSubgroupClusteredAdd,
531 EOpSubgroupClusteredMul,
532 EOpSubgroupClusteredMin,
533 EOpSubgroupClusteredMax,
534 EOpSubgroupClusteredAnd,
535 EOpSubgroupClusteredOr,
536 EOpSubgroupClusteredXor,
537 EOpSubgroupQuadBroadcast,
538 EOpSubgroupQuadSwapHorizontal,
539 EOpSubgroupQuadSwapVertical,
540 EOpSubgroupQuadSwapDiagonal,
541
542 EOpSubgroupPartition,
543 EOpSubgroupPartitionedAdd,
544 EOpSubgroupPartitionedMul,
545 EOpSubgroupPartitionedMin,
546 EOpSubgroupPartitionedMax,
547 EOpSubgroupPartitionedAnd,
548 EOpSubgroupPartitionedOr,
549 EOpSubgroupPartitionedXor,
550 EOpSubgroupPartitionedInclusiveAdd,
551 EOpSubgroupPartitionedInclusiveMul,
552 EOpSubgroupPartitionedInclusiveMin,
553 EOpSubgroupPartitionedInclusiveMax,
554 EOpSubgroupPartitionedInclusiveAnd,
555 EOpSubgroupPartitionedInclusiveOr,
556 EOpSubgroupPartitionedInclusiveXor,
557 EOpSubgroupPartitionedExclusiveAdd,
558 EOpSubgroupPartitionedExclusiveMul,
559 EOpSubgroupPartitionedExclusiveMin,
560 EOpSubgroupPartitionedExclusiveMax,
561 EOpSubgroupPartitionedExclusiveAnd,
562 EOpSubgroupPartitionedExclusiveOr,
563 EOpSubgroupPartitionedExclusiveXor,
564
565 EOpSubgroupGuardStop,
566
567 EOpMinInvocations,
568 EOpMaxInvocations,
569 EOpAddInvocations,
570 EOpMinInvocationsNonUniform,
571 EOpMaxInvocationsNonUniform,
572 EOpAddInvocationsNonUniform,
573 EOpMinInvocationsInclusiveScan,
574 EOpMaxInvocationsInclusiveScan,
575 EOpAddInvocationsInclusiveScan,
576 EOpMinInvocationsInclusiveScanNonUniform,
577 EOpMaxInvocationsInclusiveScanNonUniform,
578 EOpAddInvocationsInclusiveScanNonUniform,
579 EOpMinInvocationsExclusiveScan,
580 EOpMaxInvocationsExclusiveScan,
581 EOpAddInvocationsExclusiveScan,
582 EOpMinInvocationsExclusiveScanNonUniform,
583 EOpMaxInvocationsExclusiveScanNonUniform,
584 EOpAddInvocationsExclusiveScanNonUniform,
585 EOpSwizzleInvocations,
586 EOpSwizzleInvocationsMasked,
587 EOpWriteInvocation,
588 EOpMbcnt,
589
590 EOpCubeFaceIndex,
591 EOpCubeFaceCoord,
592 EOpTime,
593
594 EOpAtomicAdd,
595 EOpAtomicSubtract,
596 EOpAtomicMin,
597 EOpAtomicMax,
598 EOpAtomicAnd,
599 EOpAtomicOr,
600 EOpAtomicXor,
601 EOpAtomicExchange,
602 EOpAtomicCompSwap,
603 EOpAtomicLoad,
604 EOpAtomicStore,
605
606 EOpAtomicCounterIncrement, // results in pre-increment value
607 EOpAtomicCounterDecrement, // results in post-decrement value
608 EOpAtomicCounter,
609 EOpAtomicCounterAdd,
610 EOpAtomicCounterSubtract,
611 EOpAtomicCounterMin,
612 EOpAtomicCounterMax,
613 EOpAtomicCounterAnd,
614 EOpAtomicCounterOr,
615 EOpAtomicCounterXor,
616 EOpAtomicCounterExchange,
617 EOpAtomicCounterCompSwap,
618
619 EOpAny,
620 EOpAll,
621
622 EOpCooperativeMatrixLoad,
623 EOpCooperativeMatrixStore,
624 EOpCooperativeMatrixMulAdd,
625 EOpCooperativeMatrixLoadNV,
626 EOpCooperativeMatrixStoreNV,
627 EOpCooperativeMatrixMulAddNV,
628
629 EOpBeginInvocationInterlock, // Fragment only
630 EOpEndInvocationInterlock, // Fragment only
631
632 EOpIsHelperInvocation,
633
634 EOpDebugPrintf,
635
636 //
637 // Branch
638 //
639
640 EOpKill, // Fragment only
641 EOpTerminateInvocation, // Fragment only
642 EOpDemote, // Fragment only
643 EOpTerminateRayKHR, // Any-hit only
644 EOpIgnoreIntersectionKHR, // Any-hit only
645 EOpReturn,
646 EOpBreak,
647 EOpContinue,
648 EOpCase,
649 EOpDefault,
650
651 //
652 // Constructors
653 //
654
655 EOpConstructGuardStart,
656 EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
657 EOpConstructUint,
658 EOpConstructInt8,
659 EOpConstructUint8,
660 EOpConstructInt16,
661 EOpConstructUint16,
662 EOpConstructInt64,
663 EOpConstructUint64,
664 EOpConstructBool,
665 EOpConstructFloat,
666 EOpConstructDouble,
667 // Keep vector and matrix constructors in a consistent relative order for
668 // TParseContext::constructBuiltIn, which converts between 8/16/32 bit
669 // vector constructors
670 EOpConstructVec2,
671 EOpConstructVec3,
672 EOpConstructVec4,
673 EOpConstructMat2x2,
674 EOpConstructMat2x3,
675 EOpConstructMat2x4,
676 EOpConstructMat3x2,
677 EOpConstructMat3x3,
678 EOpConstructMat3x4,
679 EOpConstructMat4x2,
680 EOpConstructMat4x3,
681 EOpConstructMat4x4,
682 EOpConstructDVec2,
683 EOpConstructDVec3,
684 EOpConstructDVec4,
685 EOpConstructBVec2,
686 EOpConstructBVec3,
687 EOpConstructBVec4,
688 EOpConstructI8Vec2,
689 EOpConstructI8Vec3,
690 EOpConstructI8Vec4,
691 EOpConstructU8Vec2,
692 EOpConstructU8Vec3,
693 EOpConstructU8Vec4,
694 EOpConstructI16Vec2,
695 EOpConstructI16Vec3,
696 EOpConstructI16Vec4,
697 EOpConstructU16Vec2,
698 EOpConstructU16Vec3,
699 EOpConstructU16Vec4,
700 EOpConstructIVec2,
701 EOpConstructIVec3,
702 EOpConstructIVec4,
703 EOpConstructUVec2,
704 EOpConstructUVec3,
705 EOpConstructUVec4,
706 EOpConstructI64Vec2,
707 EOpConstructI64Vec3,
708 EOpConstructI64Vec4,
709 EOpConstructU64Vec2,
710 EOpConstructU64Vec3,
711 EOpConstructU64Vec4,
712 EOpConstructDMat2x2,
713 EOpConstructDMat2x3,
714 EOpConstructDMat2x4,
715 EOpConstructDMat3x2,
716 EOpConstructDMat3x3,
717 EOpConstructDMat3x4,
718 EOpConstructDMat4x2,
719 EOpConstructDMat4x3,
720 EOpConstructDMat4x4,
721 EOpConstructIMat2x2,
722 EOpConstructIMat2x3,
723 EOpConstructIMat2x4,
724 EOpConstructIMat3x2,
725 EOpConstructIMat3x3,
726 EOpConstructIMat3x4,
727 EOpConstructIMat4x2,
728 EOpConstructIMat4x3,
729 EOpConstructIMat4x4,
730 EOpConstructUMat2x2,
731 EOpConstructUMat2x3,
732 EOpConstructUMat2x4,
733 EOpConstructUMat3x2,
734 EOpConstructUMat3x3,
735 EOpConstructUMat3x4,
736 EOpConstructUMat4x2,
737 EOpConstructUMat4x3,
738 EOpConstructUMat4x4,
739 EOpConstructBMat2x2,
740 EOpConstructBMat2x3,
741 EOpConstructBMat2x4,
742 EOpConstructBMat3x2,
743 EOpConstructBMat3x3,
744 EOpConstructBMat3x4,
745 EOpConstructBMat4x2,
746 EOpConstructBMat4x3,
747 EOpConstructBMat4x4,
748 EOpConstructFloat16,
749 EOpConstructF16Vec2,
750 EOpConstructF16Vec3,
751 EOpConstructF16Vec4,
752 EOpConstructF16Mat2x2,
753 EOpConstructF16Mat2x3,
754 EOpConstructF16Mat2x4,
755 EOpConstructF16Mat3x2,
756 EOpConstructF16Mat3x3,
757 EOpConstructF16Mat3x4,
758 EOpConstructF16Mat4x2,
759 EOpConstructF16Mat4x3,
760 EOpConstructF16Mat4x4,
761 EOpConstructStruct,
762 EOpConstructTextureSampler,
763 EOpConstructNonuniform, // expected to be transformed away, not present in final AST
764 EOpConstructReference,
765 EOpConstructCooperativeMatrixNV,
766 EOpConstructCooperativeMatrixKHR,
767 EOpConstructAccStruct,
768 EOpConstructGuardEnd,
769
770 //
771 // moves
772 //
773
774 EOpAssign,
775 EOpAddAssign,
776 EOpSubAssign,
777 EOpMulAssign,
778 EOpVectorTimesMatrixAssign,
779 EOpVectorTimesScalarAssign,
780 EOpMatrixTimesScalarAssign,
781 EOpMatrixTimesMatrixAssign,
782 EOpDivAssign,
783 EOpModAssign,
784 EOpAndAssign,
785 EOpInclusiveOrAssign,
786 EOpExclusiveOrAssign,
787 EOpLeftShiftAssign,
788 EOpRightShiftAssign,
789
790 //
791 // Array operators
792 //
793
794 // Can apply to arrays, vectors, or matrices.
795 // Can be decomposed to a constant at compile time, but this does not always happen,
796 // due to link-time effects. So, consumer can expect either a link-time sized or
797 // run-time sized array.
798 EOpArrayLength,
799
800 //
801 // Image operations
802 //
803
804 EOpImageGuardBegin,
805
806 EOpImageQuerySize,
807 EOpImageQuerySamples,
808 EOpImageLoad,
809 EOpImageStore,
810 EOpImageLoadLod,
811 EOpImageStoreLod,
812 EOpImageAtomicAdd,
813 EOpImageAtomicMin,
814 EOpImageAtomicMax,
815 EOpImageAtomicAnd,
816 EOpImageAtomicOr,
817 EOpImageAtomicXor,
818 EOpImageAtomicExchange,
819 EOpImageAtomicCompSwap,
820 EOpImageAtomicLoad,
821 EOpImageAtomicStore,
822
823 EOpSubpassLoad,
824 EOpSubpassLoadMS,
825 EOpSparseImageLoad,
826 EOpSparseImageLoadLod,
827 EOpColorAttachmentReadEXT, // Fragment only
828
829 EOpImageGuardEnd,
830
831 //
832 // Texture operations
833 //
834
835 EOpTextureGuardBegin,
836
837 EOpTextureQuerySize,
838 EOpTextureQueryLod,
839 EOpTextureQueryLevels,
840 EOpTextureQuerySamples,
841
842 EOpSamplingGuardBegin,
843
844 EOpTexture,
845 EOpTextureProj,
846 EOpTextureLod,
847 EOpTextureOffset,
848 EOpTextureFetch,
849 EOpTextureFetchOffset,
850 EOpTextureProjOffset,
851 EOpTextureLodOffset,
852 EOpTextureProjLod,
853 EOpTextureProjLodOffset,
854 EOpTextureGrad,
855 EOpTextureGradOffset,
856 EOpTextureProjGrad,
857 EOpTextureProjGradOffset,
858 EOpTextureGather,
859 EOpTextureGatherOffset,
860 EOpTextureGatherOffsets,
861 EOpTextureClamp,
862 EOpTextureOffsetClamp,
863 EOpTextureGradClamp,
864 EOpTextureGradOffsetClamp,
865 EOpTextureGatherLod,
866 EOpTextureGatherLodOffset,
867 EOpTextureGatherLodOffsets,
868 EOpFragmentMaskFetch,
869 EOpFragmentFetch,
870
871 EOpSparseTextureGuardBegin,
872
873 EOpSparseTexture,
874 EOpSparseTextureLod,
875 EOpSparseTextureOffset,
876 EOpSparseTextureFetch,
877 EOpSparseTextureFetchOffset,
878 EOpSparseTextureLodOffset,
879 EOpSparseTextureGrad,
880 EOpSparseTextureGradOffset,
881 EOpSparseTextureGather,
882 EOpSparseTextureGatherOffset,
883 EOpSparseTextureGatherOffsets,
884 EOpSparseTexelsResident,
885 EOpSparseTextureClamp,
886 EOpSparseTextureOffsetClamp,
887 EOpSparseTextureGradClamp,
888 EOpSparseTextureGradOffsetClamp,
889 EOpSparseTextureGatherLod,
890 EOpSparseTextureGatherLodOffset,
891 EOpSparseTextureGatherLodOffsets,
892
893 EOpSparseTextureGuardEnd,
894
895 EOpImageFootprintGuardBegin,
896 EOpImageSampleFootprintNV,
897 EOpImageSampleFootprintClampNV,
898 EOpImageSampleFootprintLodNV,
899 EOpImageSampleFootprintGradNV,
900 EOpImageSampleFootprintGradClampNV,
901 EOpImageFootprintGuardEnd,
902 EOpSamplingGuardEnd,
903 EOpTextureGuardEnd,
904
905 //
906 // Integer operations
907 //
908
909 EOpAddCarry,
910 EOpSubBorrow,
911 EOpUMulExtended,
912 EOpIMulExtended,
913 EOpBitfieldExtract,
914 EOpBitfieldInsert,
915 EOpBitFieldReverse,
916 EOpBitCount,
917 EOpFindLSB,
918 EOpFindMSB,
919
920 EOpCountLeadingZeros,
921 EOpCountTrailingZeros,
922 EOpAbsDifference,
923 EOpAddSaturate,
924 EOpSubSaturate,
925 EOpAverage,
926 EOpAverageRounded,
927 EOpMul32x16,
928
929 EOpTraceNV,
930 EOpTraceRayMotionNV,
931 EOpTraceKHR,
932 EOpReportIntersection,
933 EOpIgnoreIntersectionNV,
934 EOpTerminateRayNV,
935 EOpExecuteCallableNV,
936 EOpExecuteCallableKHR,
937 EOpWritePackedPrimitiveIndices4x8NV,
938 EOpEmitMeshTasksEXT,
939 EOpSetMeshOutputsEXT,
940
941 //
942 // GL_EXT_ray_query operations
943 //
944
945 EOpRayQueryInitialize,
946 EOpRayQueryTerminate,
947 EOpRayQueryGenerateIntersection,
948 EOpRayQueryConfirmIntersection,
949 EOpRayQueryProceed,
950 EOpRayQueryGetIntersectionType,
951 EOpRayQueryGetRayTMin,
952 EOpRayQueryGetRayFlags,
953 EOpRayQueryGetIntersectionT,
954 EOpRayQueryGetIntersectionInstanceCustomIndex,
955 EOpRayQueryGetIntersectionInstanceId,
956 EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset,
957 EOpRayQueryGetIntersectionGeometryIndex,
958 EOpRayQueryGetIntersectionPrimitiveIndex,
959 EOpRayQueryGetIntersectionBarycentrics,
960 EOpRayQueryGetIntersectionFrontFace,
961 EOpRayQueryGetIntersectionCandidateAABBOpaque,
962 EOpRayQueryGetIntersectionObjectRayDirection,
963 EOpRayQueryGetIntersectionObjectRayOrigin,
964 EOpRayQueryGetWorldRayDirection,
965 EOpRayQueryGetWorldRayOrigin,
966 EOpRayQueryGetIntersectionObjectToWorld,
967 EOpRayQueryGetIntersectionWorldToObject,
968
969 //
970 // GL_NV_shader_invocation_reorder
971 //
972
973 EOpHitObjectTraceRayNV,
974 EOpHitObjectTraceRayMotionNV,
975 EOpHitObjectRecordHitNV,
976 EOpHitObjectRecordHitMotionNV,
977 EOpHitObjectRecordHitWithIndexNV,
978 EOpHitObjectRecordHitWithIndexMotionNV,
979 EOpHitObjectRecordMissNV,
980 EOpHitObjectRecordMissMotionNV,
981 EOpHitObjectRecordEmptyNV,
982 EOpHitObjectExecuteShaderNV,
983 EOpHitObjectIsEmptyNV,
984 EOpHitObjectIsMissNV,
985 EOpHitObjectIsHitNV,
986 EOpHitObjectGetRayTMinNV,
987 EOpHitObjectGetRayTMaxNV,
988 EOpHitObjectGetObjectRayOriginNV,
989 EOpHitObjectGetObjectRayDirectionNV,
990 EOpHitObjectGetWorldRayOriginNV,
991 EOpHitObjectGetWorldRayDirectionNV,
992 EOpHitObjectGetWorldToObjectNV,
993 EOpHitObjectGetObjectToWorldNV,
994 EOpHitObjectGetInstanceCustomIndexNV,
995 EOpHitObjectGetInstanceIdNV,
996 EOpHitObjectGetGeometryIndexNV,
997 EOpHitObjectGetPrimitiveIndexNV,
998 EOpHitObjectGetHitKindNV,
999 EOpHitObjectGetShaderBindingTableRecordIndexNV,
1000 EOpHitObjectGetShaderRecordBufferHandleNV,
1001 EOpHitObjectGetAttributesNV,
1002 EOpHitObjectGetCurrentTimeNV,
1003 EOpReorderThreadNV,
1004 EOpFetchMicroTriangleVertexPositionNV,
1005 EOpFetchMicroTriangleVertexBarycentricNV,
1006
1007 // HLSL operations
1008 //
1009
1010 EOpClip, // discard if input value < 0
1011 EOpIsFinite,
1012 EOpLog10, // base 10 log
1013 EOpRcp, // 1/x
1014 EOpSaturate, // clamp from 0 to 1
1015 EOpSinCos, // sin and cos in out parameters
1016 EOpGenMul, // mul(x,y) on any of mat/vec/scalars
1017 EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
1018 EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
1019 EOpInterlockedAnd, // ...
1020 EOpInterlockedCompareExchange, // ...
1021 EOpInterlockedCompareStore, // ...
1022 EOpInterlockedExchange, // ...
1023 EOpInterlockedMax, // ...
1024 EOpInterlockedMin, // ...
1025 EOpInterlockedOr, // ...
1026 EOpInterlockedXor, // ...
1027 EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
1028 EOpDeviceMemoryBarrier, // ...
1029 EOpDeviceMemoryBarrierWithGroupSync, // ...
1030 EOpWorkgroupMemoryBarrier, // ...
1031 EOpWorkgroupMemoryBarrierWithGroupSync, // ...
1032 EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
1033 EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
1034 EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
1035 EOpLit, // HLSL lighting coefficient vector
1036 EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
1037 EOpAsDouble, // slightly different from EOpUint64BitsToDouble
1038 EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
1039
1040 EOpMethodSample, // Texture object methods. These are translated to existing
1041 EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
1042 EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
1043 EOpMethodSampleCmpLevelZero, // ...
1044 EOpMethodSampleGrad, // ...
1045 EOpMethodSampleLevel, // ...
1046 EOpMethodLoad, // ...
1047 EOpMethodGetDimensions, // ...
1048 EOpMethodGetSamplePosition, // ...
1049 EOpMethodGather, // ...
1050 EOpMethodCalculateLevelOfDetail, // ...
1051 EOpMethodCalculateLevelOfDetailUnclamped, // ...
1052
1053 // Load already defined above for textures
1054 EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
1055 EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
1056 EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
1057 EOpMethodStore, // ...
1058 EOpMethodStore2, // ...
1059 EOpMethodStore3, // ...
1060 EOpMethodStore4, // ...
1061 EOpMethodIncrementCounter, // ...
1062 EOpMethodDecrementCounter, // ...
1063 // EOpMethodAppend is defined for geo shaders below
1064 EOpMethodConsume,
1065
1066 // SM5 texture methods
1067 EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
1068 EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
1069 EOpMethodGatherBlue, // because HLSL arguments are slightly different.
1070 EOpMethodGatherAlpha, // ...
1071 EOpMethodGatherCmp, // ...
1072 EOpMethodGatherCmpRed, // ...
1073 EOpMethodGatherCmpGreen, // ...
1074 EOpMethodGatherCmpBlue, // ...
1075 EOpMethodGatherCmpAlpha, // ...
1076
1077 // geometry methods
1078 EOpMethodAppend, // Geometry shader methods
1079 EOpMethodRestartStrip, // ...
1080
1081 // matrix
1082 EOpMatrixSwizzle, // select multiple matrix components (non-column)
1083
1084 // SM6 wave ops
1085 EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
1086 EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
1087 EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
1088 EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
1089
1090 // Shader Clock Ops
1091 EOpReadClockSubgroupKHR,
1092 EOpReadClockDeviceKHR,
1093
1094 // GL_EXT_ray_tracing_position_fetch
1095 EOpRayQueryGetIntersectionTriangleVertexPositionsEXT,
1096
1097 // Shader tile image ops
1098 EOpStencilAttachmentReadEXT, // Fragment only
1099 EOpDepthAttachmentReadEXT, // Fragment only
1100
1101 // Image processing
1102 EOpImageSampleWeightedQCOM,
1103 EOpImageBoxFilterQCOM,
1104 EOpImageBlockMatchSADQCOM,
1105 EOpImageBlockMatchSSDQCOM,
1106 };
1107
1108 enum TLinkType {
1109 ELinkNone,
1110 ELinkExport,
1111 };
1112
1113 class TIntermTraverser;
1114 class TIntermOperator;
1115 class TIntermAggregate;
1116 class TIntermUnary;
1117 class TIntermBinary;
1118 class TIntermConstantUnion;
1119 class TIntermSelection;
1120 class TIntermSwitch;
1121 class TIntermBranch;
1122 class TIntermTyped;
1123 class TIntermMethod;
1124 class TIntermSymbol;
1125 class TIntermLoop;
1126
1127 } // end namespace glslang
1128
1129 //
1130 // Base class for the tree nodes
1131 //
1132 // (Put outside the glslang namespace, as it's used as part of the external interface.)
1133 //
1134 class TIntermNode {
1135 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1136 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1137
1138 TIntermNode() { loc.init(); }
getLoc()1139 virtual const glslang::TSourceLoc& getLoc() const { return loc; }
setLoc(const glslang::TSourceLoc & l)1140 virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
1141 virtual void traverse(glslang::TIntermTraverser*) = 0;
getAsTyped()1142 virtual glslang::TIntermTyped* getAsTyped() { return nullptr; }
getAsOperator()1143 virtual glslang::TIntermOperator* getAsOperator() { return nullptr; }
getAsConstantUnion()1144 virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return nullptr; }
getAsAggregate()1145 virtual glslang::TIntermAggregate* getAsAggregate() { return nullptr; }
getAsUnaryNode()1146 virtual glslang::TIntermUnary* getAsUnaryNode() { return nullptr; }
getAsBinaryNode()1147 virtual glslang::TIntermBinary* getAsBinaryNode() { return nullptr; }
getAsSelectionNode()1148 virtual glslang::TIntermSelection* getAsSelectionNode() { return nullptr; }
getAsSwitchNode()1149 virtual glslang::TIntermSwitch* getAsSwitchNode() { return nullptr; }
getAsMethodNode()1150 virtual glslang::TIntermMethod* getAsMethodNode() { return nullptr; }
getAsSymbolNode()1151 virtual glslang::TIntermSymbol* getAsSymbolNode() { return nullptr; }
getAsBranchNode()1152 virtual glslang::TIntermBranch* getAsBranchNode() { return nullptr; }
getAsLoopNode()1153 virtual glslang::TIntermLoop* getAsLoopNode() { return nullptr; }
1154
getAsTyped()1155 virtual const glslang::TIntermTyped* getAsTyped() const { return nullptr; }
getAsOperator()1156 virtual const glslang::TIntermOperator* getAsOperator() const { return nullptr; }
getAsConstantUnion()1157 virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return nullptr; }
getAsAggregate()1158 virtual const glslang::TIntermAggregate* getAsAggregate() const { return nullptr; }
getAsUnaryNode()1159 virtual const glslang::TIntermUnary* getAsUnaryNode() const { return nullptr; }
getAsBinaryNode()1160 virtual const glslang::TIntermBinary* getAsBinaryNode() const { return nullptr; }
getAsSelectionNode()1161 virtual const glslang::TIntermSelection* getAsSelectionNode() const { return nullptr; }
getAsSwitchNode()1162 virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return nullptr; }
getAsMethodNode()1163 virtual const glslang::TIntermMethod* getAsMethodNode() const { return nullptr; }
getAsSymbolNode()1164 virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return nullptr; }
getAsBranchNode()1165 virtual const glslang::TIntermBranch* getAsBranchNode() const { return nullptr; }
getAsLoopNode()1166 virtual const glslang::TIntermLoop* getAsLoopNode() const { return nullptr; }
~TIntermNode()1167 virtual ~TIntermNode() { }
1168
1169 protected:
1170 TIntermNode(const TIntermNode&);
1171 TIntermNode& operator=(const TIntermNode&);
1172 glslang::TSourceLoc loc;
1173 };
1174
1175 namespace glslang {
1176
1177 //
1178 // This is just to help yacc.
1179 //
1180 struct TIntermNodePair {
1181 TIntermNode* node1;
1182 TIntermNode* node2;
1183 };
1184
1185 //
1186 // Intermediate class for nodes that have a type.
1187 //
1188 class TIntermTyped : public TIntermNode {
1189 public:
TIntermTyped(const TType & t)1190 TIntermTyped(const TType& t) { type.shallowCopy(t); }
TIntermTyped(TBasicType basicType)1191 TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
getAsTyped()1192 virtual TIntermTyped* getAsTyped() { return this; }
getAsTyped()1193 virtual const TIntermTyped* getAsTyped() const { return this; }
setType(const TType & t)1194 virtual void setType(const TType& t) { type.shallowCopy(t); }
getType()1195 virtual const TType& getType() const { return type; }
getWritableType()1196 virtual TType& getWritableType() { return type; }
1197
getBasicType()1198 virtual TBasicType getBasicType() const { return type.getBasicType(); }
getQualifier()1199 virtual TQualifier& getQualifier() { return type.getQualifier(); }
getQualifier()1200 virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
getArraySizes()1201 virtual TArraySizes* getArraySizes() { return type.getArraySizes(); }
getArraySizes()1202 virtual const TArraySizes* getArraySizes() const { return type.getArraySizes(); }
1203 virtual void propagatePrecision(TPrecisionQualifier);
getVectorSize()1204 virtual int getVectorSize() const { return type.getVectorSize(); }
getMatrixCols()1205 virtual int getMatrixCols() const { return type.getMatrixCols(); }
getMatrixRows()1206 virtual int getMatrixRows() const { return type.getMatrixRows(); }
isMatrix()1207 virtual bool isMatrix() const { return type.isMatrix(); }
isArray()1208 virtual bool isArray() const { return type.isArray(); }
isVector()1209 virtual bool isVector() const { return type.isVector(); }
isScalar()1210 virtual bool isScalar() const { return type.isScalar(); }
isStruct()1211 virtual bool isStruct() const { return type.isStruct(); }
isFloatingDomain()1212 virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
isIntegerDomain()1213 virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
isAtomic()1214 bool isAtomic() const { return type.isAtomic(); }
isReference()1215 bool isReference() const { return type.isReference(); }
1216 TString getCompleteString(bool enhanced = false) const { return type.getCompleteString(enhanced); }
1217
1218 protected:
1219 TIntermTyped& operator=(const TIntermTyped&);
1220 TType type;
1221 };
1222
1223 //
1224 // Handle for, do-while, and while loops.
1225 //
1226 class TIntermLoop : public TIntermNode {
1227 public:
TIntermLoop(TIntermNode * aBody,TIntermTyped * aTest,TIntermTyped * aTerminal,bool testFirst)1228 TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
1229 body(aBody),
1230 test(aTest),
1231 terminal(aTerminal),
1232 first(testFirst),
1233 unroll(false),
1234 dontUnroll(false),
1235 dependency(0),
1236 minIterations(0),
1237 maxIterations(iterationsInfinite),
1238 iterationMultiple(1),
1239 peelCount(0),
1240 partialCount(0)
1241 { }
1242
getAsLoopNode()1243 virtual TIntermLoop* getAsLoopNode() { return this; }
getAsLoopNode()1244 virtual const TIntermLoop* getAsLoopNode() const { return this; }
1245 virtual void traverse(TIntermTraverser*);
getBody()1246 TIntermNode* getBody() const { return body; }
getTest()1247 TIntermTyped* getTest() const { return test; }
getTerminal()1248 TIntermTyped* getTerminal() const { return terminal; }
testFirst()1249 bool testFirst() const { return first; }
1250
setUnroll()1251 void setUnroll() { unroll = true; }
setDontUnroll()1252 void setDontUnroll() {
1253 dontUnroll = true;
1254 peelCount = 0;
1255 partialCount = 0;
1256 }
getUnroll()1257 bool getUnroll() const { return unroll; }
getDontUnroll()1258 bool getDontUnroll() const { return dontUnroll; }
1259
1260 static const unsigned int dependencyInfinite = 0xFFFFFFFF;
1261 static const unsigned int iterationsInfinite = 0xFFFFFFFF;
setLoopDependency(int d)1262 void setLoopDependency(int d) { dependency = d; }
getLoopDependency()1263 int getLoopDependency() const { return dependency; }
1264
setMinIterations(unsigned int v)1265 void setMinIterations(unsigned int v) { minIterations = v; }
getMinIterations()1266 unsigned int getMinIterations() const { return minIterations; }
setMaxIterations(unsigned int v)1267 void setMaxIterations(unsigned int v) { maxIterations = v; }
getMaxIterations()1268 unsigned int getMaxIterations() const { return maxIterations; }
setIterationMultiple(unsigned int v)1269 void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
getIterationMultiple()1270 unsigned int getIterationMultiple() const { return iterationMultiple; }
setPeelCount(unsigned int v)1271 void setPeelCount(unsigned int v) {
1272 peelCount = v;
1273 dontUnroll = false;
1274 }
getPeelCount()1275 unsigned int getPeelCount() const { return peelCount; }
setPartialCount(unsigned int v)1276 void setPartialCount(unsigned int v) {
1277 partialCount = v;
1278 dontUnroll = false;
1279 }
getPartialCount()1280 unsigned int getPartialCount() const { return partialCount; }
1281
1282 protected:
1283 TIntermNode* body; // code to loop over
1284 TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
1285 TIntermTyped* terminal; // exists for for-loops
1286 bool first; // true for while and for, not for do-while
1287 bool unroll; // true if unroll requested
1288 bool dontUnroll; // true if request to not unroll
1289 unsigned int dependency; // loop dependency hint; 0 means not set or unknown
1290 unsigned int minIterations; // as per the SPIR-V specification
1291 unsigned int maxIterations; // as per the SPIR-V specification
1292 unsigned int iterationMultiple; // as per the SPIR-V specification
1293 unsigned int peelCount; // as per the SPIR-V specification
1294 unsigned int partialCount; // as per the SPIR-V specification
1295 };
1296
1297 //
1298 // Handle case, break, continue, return, and kill.
1299 //
1300 class TIntermBranch : public TIntermNode {
1301 public:
TIntermBranch(TOperator op,TIntermTyped * e)1302 TIntermBranch(TOperator op, TIntermTyped* e) :
1303 flowOp(op),
1304 expression(e) { }
getAsBranchNode()1305 virtual TIntermBranch* getAsBranchNode() { return this; }
getAsBranchNode()1306 virtual const TIntermBranch* getAsBranchNode() const { return this; }
1307 virtual void traverse(TIntermTraverser*);
getFlowOp()1308 TOperator getFlowOp() const { return flowOp; }
getExpression()1309 TIntermTyped* getExpression() const { return expression; }
setExpression(TIntermTyped * pExpression)1310 void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
1311 void updatePrecision(TPrecisionQualifier parentPrecision);
1312 protected:
1313 TOperator flowOp;
1314 TIntermTyped* expression;
1315 };
1316
1317 //
1318 // Represent method names before seeing their calling signature
1319 // or resolving them to operations. Just an expression as the base object
1320 // and a textural name.
1321 //
1322 class TIntermMethod : public TIntermTyped {
1323 public:
TIntermMethod(TIntermTyped * o,const TType & t,const TString & m)1324 TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
getAsMethodNode()1325 virtual TIntermMethod* getAsMethodNode() { return this; }
getAsMethodNode()1326 virtual const TIntermMethod* getAsMethodNode() const { return this; }
getMethodName()1327 virtual const TString& getMethodName() const { return method; }
getObject()1328 virtual TIntermTyped* getObject() const { return object; }
1329 virtual void traverse(TIntermTraverser*);
setExport()1330 void setExport() { linkType = ELinkExport; }
1331 protected:
1332 TIntermTyped* object;
1333 TString method;
1334 TLinkType linkType;
1335 };
1336
1337 //
1338 // Nodes that correspond to symbols or constants in the source code.
1339 //
1340 class TIntermSymbol : public TIntermTyped {
1341 public:
1342 // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
1343 // per process threadPoolAllocator, then it causes increased memory usage per compile
1344 // it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(long long i,const TString & n,const TType & t)1345 TIntermSymbol(long long i, const TString& n, const TType& t)
1346 : TIntermTyped(t), id(i), flattenSubset(-1), constSubtree(nullptr) { name = n; }
getId()1347 virtual long long getId() const { return id; }
changeId(long long i)1348 virtual void changeId(long long i) { id = i; }
getName()1349 virtual const TString& getName() const { return name; }
1350 virtual void traverse(TIntermTraverser*);
getAsSymbolNode()1351 virtual TIntermSymbol* getAsSymbolNode() { return this; }
getAsSymbolNode()1352 virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
setConstArray(const TConstUnionArray & c)1353 void setConstArray(const TConstUnionArray& c) { constArray = c; }
getConstArray()1354 const TConstUnionArray& getConstArray() const { return constArray; }
setConstSubtree(TIntermTyped * subtree)1355 void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
getConstSubtree()1356 TIntermTyped* getConstSubtree() const { return constSubtree; }
setFlattenSubset(int subset)1357 void setFlattenSubset(int subset) { flattenSubset = subset; }
1358 virtual const TString& getAccessName() const;
1359
getFlattenSubset()1360 int getFlattenSubset() const { return flattenSubset; } // -1 means full object
1361
1362 // This is meant for cases where a node has already been constructed, and
1363 // later on, it becomes necessary to switch to a different symbol.
switchId(long long newId)1364 virtual void switchId(long long newId) { id = newId; }
1365
1366 protected:
1367 long long id; // the unique id of the symbol this node represents
1368 int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
1369 TString name; // the name of the symbol this node represents
1370 TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
1371 TIntermTyped* constSubtree;
1372 };
1373
1374 class TIntermConstantUnion : public TIntermTyped {
1375 public:
TIntermConstantUnion(const TConstUnionArray & ua,const TType & t)1376 TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
getConstArray()1377 const TConstUnionArray& getConstArray() const { return constArray; }
getAsConstantUnion()1378 virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
getAsConstantUnion()1379 virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
1380 virtual void traverse(TIntermTraverser*);
1381 virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
1382 virtual TIntermTyped* fold(TOperator, const TType&) const;
setLiteral()1383 void setLiteral() { literal = true; }
setExpression()1384 void setExpression() { literal = false; }
isLiteral()1385 bool isLiteral() const { return literal; }
1386
1387 protected:
1388 TIntermConstantUnion& operator=(const TIntermConstantUnion&);
1389
1390 const TConstUnionArray constArray;
1391 bool literal; // true if node represents a literal in the source code
1392 };
1393
1394 // Represent the independent aspects of a texturing TOperator
1395 struct TCrackedTextureOp {
1396 bool query;
1397 bool proj;
1398 bool lod;
1399 bool fetch;
1400 bool offset;
1401 bool offsets;
1402 bool gather;
1403 bool grad;
1404 bool subpass;
1405 bool lodClamp;
1406 bool fragMask;
1407 bool attachmentEXT;
1408 };
1409
1410 //
1411 // Intermediate class for node types that hold operators.
1412 //
1413 class TIntermOperator : public TIntermTyped {
1414 public:
getAsOperator()1415 virtual TIntermOperator* getAsOperator() { return this; }
getAsOperator()1416 virtual const TIntermOperator* getAsOperator() const { return this; }
getOp()1417 TOperator getOp() const { return op; }
setOp(TOperator newOp)1418 void setOp(TOperator newOp) { op = newOp; }
1419 bool modifiesState() const;
1420 bool isConstructor() const;
isTexture()1421 bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
isSampling()1422 bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
isImage()1423 bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
isSparseTexture()1424 bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
isImageFootprint()1425 bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
isSparseImage()1426 bool isSparseImage() const { return op == EOpSparseImageLoad; }
isSubgroup()1427 bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; }
1428
setOperationPrecision(TPrecisionQualifier p)1429 void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
getOperationPrecision()1430 TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
1431 operationPrecision :
1432 type.getQualifier().precision; }
getCompleteString()1433 TString getCompleteString() const
1434 {
1435 TString cs = type.getCompleteString();
1436 if (getOperationPrecision() != type.getQualifier().precision) {
1437 cs += ", operation at ";
1438 cs += GetPrecisionQualifierString(getOperationPrecision());
1439 }
1440
1441 return cs;
1442 }
1443
1444 // Crack the op into the individual dimensions of texturing operation.
crackTexture(TSampler sampler,TCrackedTextureOp & cracked)1445 void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
1446 {
1447 cracked.query = false;
1448 cracked.proj = false;
1449 cracked.lod = false;
1450 cracked.fetch = false;
1451 cracked.offset = false;
1452 cracked.offsets = false;
1453 cracked.gather = false;
1454 cracked.grad = false;
1455 cracked.subpass = false;
1456 cracked.attachmentEXT = false;
1457 cracked.lodClamp = false;
1458 cracked.fragMask = false;
1459
1460 switch (op) {
1461 case EOpImageQuerySize:
1462 case EOpImageQuerySamples:
1463 case EOpTextureQuerySize:
1464 case EOpTextureQueryLod:
1465 case EOpTextureQueryLevels:
1466 case EOpTextureQuerySamples:
1467 case EOpSparseTexelsResident:
1468 cracked.query = true;
1469 break;
1470 case EOpTexture:
1471 case EOpSparseTexture:
1472 break;
1473 case EOpTextureProj:
1474 cracked.proj = true;
1475 break;
1476 case EOpTextureLod:
1477 case EOpSparseTextureLod:
1478 cracked.lod = true;
1479 break;
1480 case EOpTextureOffset:
1481 case EOpSparseTextureOffset:
1482 cracked.offset = true;
1483 break;
1484 case EOpTextureFetch:
1485 case EOpSparseTextureFetch:
1486 cracked.fetch = true;
1487 if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1488 cracked.lod = true;
1489 break;
1490 case EOpTextureFetchOffset:
1491 case EOpSparseTextureFetchOffset:
1492 cracked.fetch = true;
1493 cracked.offset = true;
1494 if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1495 cracked.lod = true;
1496 break;
1497 case EOpTextureProjOffset:
1498 cracked.offset = true;
1499 cracked.proj = true;
1500 break;
1501 case EOpTextureLodOffset:
1502 case EOpSparseTextureLodOffset:
1503 cracked.offset = true;
1504 cracked.lod = true;
1505 break;
1506 case EOpTextureProjLod:
1507 cracked.lod = true;
1508 cracked.proj = true;
1509 break;
1510 case EOpTextureProjLodOffset:
1511 cracked.offset = true;
1512 cracked.lod = true;
1513 cracked.proj = true;
1514 break;
1515 case EOpTextureGrad:
1516 case EOpSparseTextureGrad:
1517 cracked.grad = true;
1518 break;
1519 case EOpTextureGradOffset:
1520 case EOpSparseTextureGradOffset:
1521 cracked.grad = true;
1522 cracked.offset = true;
1523 break;
1524 case EOpTextureProjGrad:
1525 cracked.grad = true;
1526 cracked.proj = true;
1527 break;
1528 case EOpTextureProjGradOffset:
1529 cracked.grad = true;
1530 cracked.offset = true;
1531 cracked.proj = true;
1532 break;
1533 case EOpTextureClamp:
1534 case EOpSparseTextureClamp:
1535 cracked.lodClamp = true;
1536 break;
1537 case EOpTextureOffsetClamp:
1538 case EOpSparseTextureOffsetClamp:
1539 cracked.offset = true;
1540 cracked.lodClamp = true;
1541 break;
1542 case EOpTextureGradClamp:
1543 case EOpSparseTextureGradClamp:
1544 cracked.grad = true;
1545 cracked.lodClamp = true;
1546 break;
1547 case EOpTextureGradOffsetClamp:
1548 case EOpSparseTextureGradOffsetClamp:
1549 cracked.grad = true;
1550 cracked.offset = true;
1551 cracked.lodClamp = true;
1552 break;
1553 case EOpTextureGather:
1554 case EOpSparseTextureGather:
1555 cracked.gather = true;
1556 break;
1557 case EOpTextureGatherOffset:
1558 case EOpSparseTextureGatherOffset:
1559 cracked.gather = true;
1560 cracked.offset = true;
1561 break;
1562 case EOpTextureGatherOffsets:
1563 case EOpSparseTextureGatherOffsets:
1564 cracked.gather = true;
1565 cracked.offsets = true;
1566 break;
1567 case EOpTextureGatherLod:
1568 case EOpSparseTextureGatherLod:
1569 cracked.gather = true;
1570 cracked.lod = true;
1571 break;
1572 case EOpTextureGatherLodOffset:
1573 case EOpSparseTextureGatherLodOffset:
1574 cracked.gather = true;
1575 cracked.offset = true;
1576 cracked.lod = true;
1577 break;
1578 case EOpTextureGatherLodOffsets:
1579 case EOpSparseTextureGatherLodOffsets:
1580 cracked.gather = true;
1581 cracked.offsets = true;
1582 cracked.lod = true;
1583 break;
1584 case EOpImageLoadLod:
1585 case EOpImageStoreLod:
1586 case EOpSparseImageLoadLod:
1587 cracked.lod = true;
1588 break;
1589 case EOpFragmentMaskFetch:
1590 cracked.subpass = sampler.dim == EsdSubpass;
1591 cracked.fragMask = true;
1592 break;
1593 case EOpFragmentFetch:
1594 cracked.subpass = sampler.dim == EsdSubpass;
1595 cracked.fragMask = true;
1596 break;
1597 case EOpImageSampleFootprintNV:
1598 break;
1599 case EOpImageSampleFootprintClampNV:
1600 cracked.lodClamp = true;
1601 break;
1602 case EOpImageSampleFootprintLodNV:
1603 cracked.lod = true;
1604 break;
1605 case EOpImageSampleFootprintGradNV:
1606 cracked.grad = true;
1607 break;
1608 case EOpImageSampleFootprintGradClampNV:
1609 cracked.lodClamp = true;
1610 cracked.grad = true;
1611 break;
1612 case EOpSubpassLoad:
1613 case EOpSubpassLoadMS:
1614 cracked.subpass = true;
1615 break;
1616 case EOpColorAttachmentReadEXT:
1617 cracked.attachmentEXT = true;
1618 break;
1619 default:
1620 break;
1621 }
1622 }
1623
1624 protected:
TIntermOperator(TOperator o)1625 TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
TIntermOperator(TOperator o,TType & t)1626 TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
1627 TOperator op;
1628 // The result precision is in the inherited TType, and is usually meant to be both
1629 // the operation precision and the result precision. However, some more complex things,
1630 // like built-in function calls, distinguish between the two, in which case non-EqpNone
1631 // 'operationPrecision' overrides the result precision as far as operation precision
1632 // is concerned.
1633 TPrecisionQualifier operationPrecision;
1634 };
1635
1636 //
1637 // Nodes for all the basic binary math operators.
1638 //
1639 class TIntermBinary : public TIntermOperator {
1640 public:
TIntermBinary(TOperator o)1641 TIntermBinary(TOperator o) : TIntermOperator(o) {}
1642 virtual void traverse(TIntermTraverser*);
setLeft(TIntermTyped * n)1643 virtual void setLeft(TIntermTyped* n) { left = n; }
setRight(TIntermTyped * n)1644 virtual void setRight(TIntermTyped* n) { right = n; }
getLeft()1645 virtual TIntermTyped* getLeft() const { return left; }
getRight()1646 virtual TIntermTyped* getRight() const { return right; }
getAsBinaryNode()1647 virtual TIntermBinary* getAsBinaryNode() { return this; }
getAsBinaryNode()1648 virtual const TIntermBinary* getAsBinaryNode() const { return this; }
1649 virtual void updatePrecision();
1650 protected:
1651 TIntermTyped* left;
1652 TIntermTyped* right;
1653 };
1654
1655 //
1656 // Nodes for unary math operators.
1657 //
1658 class TIntermUnary : public TIntermOperator {
1659 public:
TIntermUnary(TOperator o,TType & t)1660 TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(nullptr) {}
TIntermUnary(TOperator o)1661 TIntermUnary(TOperator o) : TIntermOperator(o), operand(nullptr) {}
1662 virtual void traverse(TIntermTraverser*);
setOperand(TIntermTyped * o)1663 virtual void setOperand(TIntermTyped* o) { operand = o; }
getOperand()1664 virtual TIntermTyped* getOperand() { return operand; }
getOperand()1665 virtual const TIntermTyped* getOperand() const { return operand; }
getAsUnaryNode()1666 virtual TIntermUnary* getAsUnaryNode() { return this; }
getAsUnaryNode()1667 virtual const TIntermUnary* getAsUnaryNode() const { return this; }
1668 virtual void updatePrecision();
setSpirvInstruction(const TSpirvInstruction & inst)1669 void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
getSpirvInstruction()1670 const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
1671 protected:
1672 TIntermTyped* operand;
1673 TSpirvInstruction spirvInst;
1674 };
1675
1676 typedef TVector<TIntermNode*> TIntermSequence;
1677 typedef TVector<TStorageQualifier> TQualifierList;
1678 //
1679 // Nodes that operate on an arbitrary sized set of children.
1680 //
1681 class TIntermAggregate : public TIntermOperator {
1682 public:
TIntermAggregate()1683 TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
TIntermAggregate(TOperator o)1684 TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
~TIntermAggregate()1685 ~TIntermAggregate() { delete pragmaTable; }
getAsAggregate()1686 virtual TIntermAggregate* getAsAggregate() { return this; }
getAsAggregate()1687 virtual const TIntermAggregate* getAsAggregate() const { return this; }
1688 virtual void updatePrecision();
setOperator(TOperator o)1689 virtual void setOperator(TOperator o) { op = o; }
getSequence()1690 virtual TIntermSequence& getSequence() { return sequence; }
getSequence()1691 virtual const TIntermSequence& getSequence() const { return sequence; }
setName(const TString & n)1692 virtual void setName(const TString& n) { name = n; }
getName()1693 virtual const TString& getName() const { return name; }
1694 virtual void traverse(TIntermTraverser*);
setUserDefined()1695 virtual void setUserDefined() { userDefined = true; }
isUserDefined()1696 virtual bool isUserDefined() { return userDefined; }
getQualifierList()1697 virtual TQualifierList& getQualifierList() { return qualifier; }
getQualifierList()1698 virtual const TQualifierList& getQualifierList() const { return qualifier; }
setOptimize(bool o)1699 void setOptimize(bool o) { optimize = o; }
setDebug(bool d)1700 void setDebug(bool d) { debug = d; }
getOptimize()1701 bool getOptimize() const { return optimize; }
getDebug()1702 bool getDebug() const { return debug; }
1703 void setPragmaTable(const TPragmaTable& pTable);
getPragmaTable()1704 const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
setSpirvInstruction(const TSpirvInstruction & inst)1705 void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
getSpirvInstruction()1706 const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
1707
setLinkType(TLinkType l)1708 void setLinkType(TLinkType l) { linkType = l; }
getLinkType()1709 TLinkType getLinkType() const { return linkType; }
1710 protected:
1711 TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
1712 TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
1713 TIntermSequence sequence;
1714 TQualifierList qualifier;
1715 TString name;
1716 bool userDefined; // used for user defined function names
1717 bool optimize;
1718 bool debug;
1719 TPragmaTable* pragmaTable;
1720 TSpirvInstruction spirvInst;
1721 TLinkType linkType = ELinkNone;
1722 };
1723
1724 //
1725 // For if tests.
1726 //
1727 class TIntermSelection : public TIntermTyped {
1728 public:
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB)1729 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
1730 TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
1731 shortCircuit(true),
1732 flatten(false), dontFlatten(false) {}
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB,const TType & type)1733 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
1734 TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
1735 shortCircuit(true),
1736 flatten(false), dontFlatten(false) {}
1737 virtual void traverse(TIntermTraverser*);
getCondition()1738 virtual TIntermTyped* getCondition() const { return condition; }
setCondition(TIntermTyped * c)1739 virtual void setCondition(TIntermTyped* c) { condition = c; }
getTrueBlock()1740 virtual TIntermNode* getTrueBlock() const { return trueBlock; }
setTrueBlock(TIntermTyped * tb)1741 virtual void setTrueBlock(TIntermTyped* tb) { trueBlock = tb; }
getFalseBlock()1742 virtual TIntermNode* getFalseBlock() const { return falseBlock; }
setFalseBlock(TIntermTyped * fb)1743 virtual void setFalseBlock(TIntermTyped* fb) { falseBlock = fb; }
getAsSelectionNode()1744 virtual TIntermSelection* getAsSelectionNode() { return this; }
getAsSelectionNode()1745 virtual const TIntermSelection* getAsSelectionNode() const { return this; }
1746
setNoShortCircuit()1747 void setNoShortCircuit() { shortCircuit = false; }
getShortCircuit()1748 bool getShortCircuit() const { return shortCircuit; }
1749
setFlatten()1750 void setFlatten() { flatten = true; }
setDontFlatten()1751 void setDontFlatten() { dontFlatten = true; }
getFlatten()1752 bool getFlatten() const { return flatten; }
getDontFlatten()1753 bool getDontFlatten() const { return dontFlatten; }
1754
1755 protected:
1756 TIntermTyped* condition;
1757 TIntermNode* trueBlock;
1758 TIntermNode* falseBlock;
1759 bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
1760 bool flatten; // true if flatten requested
1761 bool dontFlatten; // true if requested to not flatten
1762 };
1763
1764 //
1765 // For switch statements. Designed use is that a switch will have sequence of nodes
1766 // that are either case/default nodes or a *single* node that represents all the code
1767 // in between (if any) consecutive case/defaults. So, a traversal need only deal with
1768 // 0 or 1 nodes per case/default statement.
1769 //
1770 class TIntermSwitch : public TIntermNode {
1771 public:
TIntermSwitch(TIntermTyped * cond,TIntermAggregate * b)1772 TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
1773 flatten(false), dontFlatten(false) {}
1774 virtual void traverse(TIntermTraverser*);
getCondition()1775 virtual TIntermNode* getCondition() const { return condition; }
getBody()1776 virtual TIntermAggregate* getBody() const { return body; }
getAsSwitchNode()1777 virtual TIntermSwitch* getAsSwitchNode() { return this; }
getAsSwitchNode()1778 virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
1779
setFlatten()1780 void setFlatten() { flatten = true; }
setDontFlatten()1781 void setDontFlatten() { dontFlatten = true; }
getFlatten()1782 bool getFlatten() const { return flatten; }
getDontFlatten()1783 bool getDontFlatten() const { return dontFlatten; }
1784
1785 protected:
1786 TIntermTyped* condition;
1787 TIntermAggregate* body;
1788 bool flatten; // true if flatten requested
1789 bool dontFlatten; // true if requested to not flatten
1790 };
1791
1792 enum TVisit
1793 {
1794 EvPreVisit,
1795 EvInVisit,
1796 EvPostVisit
1797 };
1798
1799 //
1800 // For traversing the tree. User should derive from this,
1801 // put their traversal specific data in it, and then pass
1802 // it to a Traverse method.
1803 //
1804 // When using this, just fill in the methods for nodes you want visited.
1805 // Return false from a pre-visit to skip visiting that node's subtree.
1806 //
1807 // Explicitly set postVisit to true if you want post visiting, otherwise,
1808 // filled in methods will only be called at pre-visit time (before processing
1809 // the subtree). Similarly for inVisit for in-order visiting of nodes with
1810 // multiple children.
1811 //
1812 // If you only want post-visits, explicitly turn off preVisit (and inVisit)
1813 // and turn on postVisit.
1814 //
1815 // In general, for the visit*() methods, return true from interior nodes
1816 // to have the traversal continue on to children.
1817 //
1818 // If you process children yourself, or don't want them processed, return false.
1819 //
1820 class TIntermTraverser {
1821 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1822 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1823 TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
1824 preVisit(preVisit),
1825 inVisit(inVisit),
1826 postVisit(postVisit),
1827 rightToLeft(rightToLeft),
1828 depth(0),
1829 maxDepth(0) { }
~TIntermTraverser()1830 virtual ~TIntermTraverser() { }
1831
visitSymbol(TIntermSymbol *)1832 virtual void visitSymbol(TIntermSymbol*) { }
visitConstantUnion(TIntermConstantUnion *)1833 virtual void visitConstantUnion(TIntermConstantUnion*) { }
visitBinary(TVisit,TIntermBinary *)1834 virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
visitUnary(TVisit,TIntermUnary *)1835 virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
visitSelection(TVisit,TIntermSelection *)1836 virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
visitAggregate(TVisit,TIntermAggregate *)1837 virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
visitLoop(TVisit,TIntermLoop *)1838 virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
visitBranch(TVisit,TIntermBranch *)1839 virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
visitSwitch(TVisit,TIntermSwitch *)1840 virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
1841
getMaxDepth()1842 int getMaxDepth() const { return maxDepth; }
1843
incrementDepth(TIntermNode * current)1844 void incrementDepth(TIntermNode *current)
1845 {
1846 depth++;
1847 maxDepth = (std::max)(maxDepth, depth);
1848 path.push_back(current);
1849 }
1850
decrementDepth()1851 void decrementDepth()
1852 {
1853 depth--;
1854 path.pop_back();
1855 }
1856
getParentNode()1857 TIntermNode *getParentNode()
1858 {
1859 return path.size() == 0 ? nullptr : path.back();
1860 }
1861
1862 const bool preVisit;
1863 const bool inVisit;
1864 const bool postVisit;
1865 const bool rightToLeft;
1866
1867 protected:
1868 TIntermTraverser& operator=(TIntermTraverser&);
1869
1870 int depth;
1871 int maxDepth;
1872
1873 // All the nodes from root to the current node's parent during traversing.
1874 TVector<TIntermNode *> path;
1875 };
1876
1877 // KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
1878 // sized with the same symbol, involving no operations"
SameSpecializationConstants(TIntermTyped * node1,TIntermTyped * node2)1879 inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
1880 {
1881 return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
1882 node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
1883 }
1884
1885 } // end namespace glslang
1886
1887 #endif // __INTERMEDIATE_H
1888