1 //
2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2016 LunarG, Inc.
4 // Copyright (C) 2017 ARM Limited.
5 //
6 // All rights reserved.
7 //
8 // Redistribution and use in source and binary forms, with or without
9 // modification, are permitted provided that the following conditions
10 // are met:
11 //
12 // Redistributions of source code must retain the above copyright
13 // notice, this list of conditions and the following disclaimer.
14 //
15 // Redistributions in binary form must reproduce the above
16 // copyright notice, this list of conditions and the following
17 // disclaimer in the documentation and/or other materials provided
18 // with the distribution.
19 //
20 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21 // contributors may be used to endorse or promote products derived
22 // from this software without specific prior written permission.
23 //
24 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 // POSSIBILITY OF SUCH DAMAGE.
36 //
37
38 //
39 // Definition of the in-memory high-level intermediate representation
40 // of shaders. This is a tree that parser creates.
41 //
42 // Nodes in the tree are defined as a hierarchy of classes derived from
43 // TIntermNode. Each is a node in a tree. There is no preset branching factor;
44 // each node can have it's own type of list of children.
45 //
46
47 #ifndef __INTERMEDIATE_H
48 #define __INTERMEDIATE_H
49
50 #if defined(_MSC_VER) && _MSC_VER >= 1900
51 #pragma warning(disable : 4464) // relative include path contains '..'
52 #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted
53 #endif
54
55 #include "../Include/Common.h"
56 #include "../Include/Types.h"
57 #include "../Include/ConstantUnion.h"
58
59 namespace glslang {
60
61 class TIntermediate;
62
63 //
64 // Operators used by the high-level (parse tree) representation.
65 //
66 enum TOperator {
67 EOpNull, // if in a node, should only mean a node is still being built
68 EOpSequence, // denotes a list of statements, or parameters, etc.
69 EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
70 EOpFunctionCall,
71 EOpFunction, // For function definition
72 EOpParameters, // an aggregate listing the parameters to a function
73
74 //
75 // Unary operators
76 //
77
78 EOpNegative,
79 EOpLogicalNot,
80 EOpVectorLogicalNot,
81 EOpBitwiseNot,
82
83 EOpPostIncrement,
84 EOpPostDecrement,
85 EOpPreIncrement,
86 EOpPreDecrement,
87
88 // (u)int* -> bool
89 EOpConvInt8ToBool,
90 EOpConvUint8ToBool,
91 EOpConvInt16ToBool,
92 EOpConvUint16ToBool,
93 EOpConvIntToBool,
94 EOpConvUintToBool,
95 EOpConvInt64ToBool,
96 EOpConvUint64ToBool,
97
98 // float* -> bool
99 EOpConvFloat16ToBool,
100 EOpConvFloatToBool,
101 EOpConvDoubleToBool,
102
103 // bool -> (u)int*
104 EOpConvBoolToInt8,
105 EOpConvBoolToUint8,
106 EOpConvBoolToInt16,
107 EOpConvBoolToUint16,
108 EOpConvBoolToInt,
109 EOpConvBoolToUint,
110 EOpConvBoolToInt64,
111 EOpConvBoolToUint64,
112
113 // bool -> float*
114 EOpConvBoolToFloat16,
115 EOpConvBoolToFloat,
116 EOpConvBoolToDouble,
117
118 // int8_t -> (u)int*
119 EOpConvInt8ToInt16,
120 EOpConvInt8ToInt,
121 EOpConvInt8ToInt64,
122 EOpConvInt8ToUint8,
123 EOpConvInt8ToUint16,
124 EOpConvInt8ToUint,
125 EOpConvInt8ToUint64,
126
127 // uint8_t -> (u)int*
128 EOpConvUint8ToInt8,
129 EOpConvUint8ToInt16,
130 EOpConvUint8ToInt,
131 EOpConvUint8ToInt64,
132 EOpConvUint8ToUint16,
133 EOpConvUint8ToUint,
134 EOpConvUint8ToUint64,
135
136 // int8_t -> float*
137 EOpConvInt8ToFloat16,
138 EOpConvInt8ToFloat,
139 EOpConvInt8ToDouble,
140
141 // uint8_t -> float*
142 EOpConvUint8ToFloat16,
143 EOpConvUint8ToFloat,
144 EOpConvUint8ToDouble,
145
146 // int16_t -> (u)int*
147 EOpConvInt16ToInt8,
148 EOpConvInt16ToInt,
149 EOpConvInt16ToInt64,
150 EOpConvInt16ToUint8,
151 EOpConvInt16ToUint16,
152 EOpConvInt16ToUint,
153 EOpConvInt16ToUint64,
154
155 // uint16_t -> (u)int*
156 EOpConvUint16ToInt8,
157 EOpConvUint16ToInt16,
158 EOpConvUint16ToInt,
159 EOpConvUint16ToInt64,
160 EOpConvUint16ToUint8,
161 EOpConvUint16ToUint,
162 EOpConvUint16ToUint64,
163
164 // int16_t -> float*
165 EOpConvInt16ToFloat16,
166 EOpConvInt16ToFloat,
167 EOpConvInt16ToDouble,
168
169 // uint16_t -> float*
170 EOpConvUint16ToFloat16,
171 EOpConvUint16ToFloat,
172 EOpConvUint16ToDouble,
173
174 // int32_t -> (u)int*
175 EOpConvIntToInt8,
176 EOpConvIntToInt16,
177 EOpConvIntToInt64,
178 EOpConvIntToUint8,
179 EOpConvIntToUint16,
180 EOpConvIntToUint,
181 EOpConvIntToUint64,
182
183 // uint32_t -> (u)int*
184 EOpConvUintToInt8,
185 EOpConvUintToInt16,
186 EOpConvUintToInt,
187 EOpConvUintToInt64,
188 EOpConvUintToUint8,
189 EOpConvUintToUint16,
190 EOpConvUintToUint64,
191
192 // int32_t -> float*
193 EOpConvIntToFloat16,
194 EOpConvIntToFloat,
195 EOpConvIntToDouble,
196
197 // uint32_t -> float*
198 EOpConvUintToFloat16,
199 EOpConvUintToFloat,
200 EOpConvUintToDouble,
201
202 // int64_t -> (u)int*
203 EOpConvInt64ToInt8,
204 EOpConvInt64ToInt16,
205 EOpConvInt64ToInt,
206 EOpConvInt64ToUint8,
207 EOpConvInt64ToUint16,
208 EOpConvInt64ToUint,
209 EOpConvInt64ToUint64,
210
211 // uint64_t -> (u)int*
212 EOpConvUint64ToInt8,
213 EOpConvUint64ToInt16,
214 EOpConvUint64ToInt,
215 EOpConvUint64ToInt64,
216 EOpConvUint64ToUint8,
217 EOpConvUint64ToUint16,
218 EOpConvUint64ToUint,
219
220 // int64_t -> float*
221 EOpConvInt64ToFloat16,
222 EOpConvInt64ToFloat,
223 EOpConvInt64ToDouble,
224
225 // uint64_t -> float*
226 EOpConvUint64ToFloat16,
227 EOpConvUint64ToFloat,
228 EOpConvUint64ToDouble,
229
230 // float16_t -> (u)int*
231 EOpConvFloat16ToInt8,
232 EOpConvFloat16ToInt16,
233 EOpConvFloat16ToInt,
234 EOpConvFloat16ToInt64,
235 EOpConvFloat16ToUint8,
236 EOpConvFloat16ToUint16,
237 EOpConvFloat16ToUint,
238 EOpConvFloat16ToUint64,
239
240 // float16_t -> float*
241 EOpConvFloat16ToFloat,
242 EOpConvFloat16ToDouble,
243
244 // float -> (u)int*
245 EOpConvFloatToInt8,
246 EOpConvFloatToInt16,
247 EOpConvFloatToInt,
248 EOpConvFloatToInt64,
249 EOpConvFloatToUint8,
250 EOpConvFloatToUint16,
251 EOpConvFloatToUint,
252 EOpConvFloatToUint64,
253
254 // float -> float*
255 EOpConvFloatToFloat16,
256 EOpConvFloatToDouble,
257
258 // float64 _t-> (u)int*
259 EOpConvDoubleToInt8,
260 EOpConvDoubleToInt16,
261 EOpConvDoubleToInt,
262 EOpConvDoubleToInt64,
263 EOpConvDoubleToUint8,
264 EOpConvDoubleToUint16,
265 EOpConvDoubleToUint,
266 EOpConvDoubleToUint64,
267
268 // float64_t -> float*
269 EOpConvDoubleToFloat16,
270 EOpConvDoubleToFloat,
271
272 // uint64_t <-> pointer
273 EOpConvUint64ToPtr,
274 EOpConvPtrToUint64,
275
276 //
277 // binary operations
278 //
279
280 EOpAdd,
281 EOpSub,
282 EOpMul,
283 EOpDiv,
284 EOpMod,
285 EOpRightShift,
286 EOpLeftShift,
287 EOpAnd,
288 EOpInclusiveOr,
289 EOpExclusiveOr,
290 EOpEqual,
291 EOpNotEqual,
292 EOpVectorEqual,
293 EOpVectorNotEqual,
294 EOpLessThan,
295 EOpGreaterThan,
296 EOpLessThanEqual,
297 EOpGreaterThanEqual,
298 EOpComma,
299
300 EOpVectorTimesScalar,
301 EOpVectorTimesMatrix,
302 EOpMatrixTimesVector,
303 EOpMatrixTimesScalar,
304
305 EOpLogicalOr,
306 EOpLogicalXor,
307 EOpLogicalAnd,
308
309 EOpIndexDirect,
310 EOpIndexIndirect,
311 EOpIndexDirectStruct,
312
313 EOpVectorSwizzle,
314
315 EOpMethod,
316 EOpScoping,
317
318 //
319 // Built-in functions mapped to operators
320 //
321
322 EOpRadians,
323 EOpDegrees,
324 EOpSin,
325 EOpCos,
326 EOpTan,
327 EOpAsin,
328 EOpAcos,
329 EOpAtan,
330 EOpSinh,
331 EOpCosh,
332 EOpTanh,
333 EOpAsinh,
334 EOpAcosh,
335 EOpAtanh,
336
337 EOpPow,
338 EOpExp,
339 EOpLog,
340 EOpExp2,
341 EOpLog2,
342 EOpSqrt,
343 EOpInverseSqrt,
344
345 EOpAbs,
346 EOpSign,
347 EOpFloor,
348 EOpTrunc,
349 EOpRound,
350 EOpRoundEven,
351 EOpCeil,
352 EOpFract,
353 EOpModf,
354 EOpMin,
355 EOpMax,
356 EOpClamp,
357 EOpMix,
358 EOpStep,
359 EOpSmoothStep,
360
361 EOpIsNan,
362 EOpIsInf,
363
364 EOpFma,
365
366 EOpFrexp,
367 EOpLdexp,
368
369 EOpFloatBitsToInt,
370 EOpFloatBitsToUint,
371 EOpIntBitsToFloat,
372 EOpUintBitsToFloat,
373 EOpDoubleBitsToInt64,
374 EOpDoubleBitsToUint64,
375 EOpInt64BitsToDouble,
376 EOpUint64BitsToDouble,
377 EOpFloat16BitsToInt16,
378 EOpFloat16BitsToUint16,
379 EOpInt16BitsToFloat16,
380 EOpUint16BitsToFloat16,
381 EOpPackSnorm2x16,
382 EOpUnpackSnorm2x16,
383 EOpPackUnorm2x16,
384 EOpUnpackUnorm2x16,
385 EOpPackSnorm4x8,
386 EOpUnpackSnorm4x8,
387 EOpPackUnorm4x8,
388 EOpUnpackUnorm4x8,
389 EOpPackHalf2x16,
390 EOpUnpackHalf2x16,
391 EOpPackDouble2x32,
392 EOpUnpackDouble2x32,
393 EOpPackInt2x32,
394 EOpUnpackInt2x32,
395 EOpPackUint2x32,
396 EOpUnpackUint2x32,
397 EOpPackFloat2x16,
398 EOpUnpackFloat2x16,
399 EOpPackInt2x16,
400 EOpUnpackInt2x16,
401 EOpPackUint2x16,
402 EOpUnpackUint2x16,
403 EOpPackInt4x16,
404 EOpUnpackInt4x16,
405 EOpPackUint4x16,
406 EOpUnpackUint4x16,
407 EOpPack16,
408 EOpPack32,
409 EOpPack64,
410 EOpUnpack32,
411 EOpUnpack16,
412 EOpUnpack8,
413
414 EOpLength,
415 EOpDistance,
416 EOpDot,
417 EOpCross,
418 EOpNormalize,
419 EOpFaceForward,
420 EOpReflect,
421 EOpRefract,
422
423 #ifdef AMD_EXTENSIONS
424 EOpMin3,
425 EOpMax3,
426 EOpMid3,
427 #endif
428
429 EOpDPdx, // Fragment only
430 EOpDPdy, // Fragment only
431 EOpFwidth, // Fragment only
432 EOpDPdxFine, // Fragment only
433 EOpDPdyFine, // Fragment only
434 EOpFwidthFine, // Fragment only
435 EOpDPdxCoarse, // Fragment only
436 EOpDPdyCoarse, // Fragment only
437 EOpFwidthCoarse, // Fragment only
438
439 EOpInterpolateAtCentroid, // Fragment only
440 EOpInterpolateAtSample, // Fragment only
441 EOpInterpolateAtOffset, // Fragment only
442
443 #ifdef AMD_EXTENSIONS
444 EOpInterpolateAtVertex,
445 #endif
446
447 EOpMatrixTimesMatrix,
448 EOpOuterProduct,
449 EOpDeterminant,
450 EOpMatrixInverse,
451 EOpTranspose,
452
453 EOpFtransform,
454
455 EOpNoise,
456
457 EOpEmitVertex, // geometry only
458 EOpEndPrimitive, // geometry only
459 EOpEmitStreamVertex, // geometry only
460 EOpEndStreamPrimitive, // geometry only
461
462 EOpBarrier,
463 EOpMemoryBarrier,
464 EOpMemoryBarrierAtomicCounter,
465 EOpMemoryBarrierBuffer,
466 EOpMemoryBarrierImage,
467 EOpMemoryBarrierShared, // compute only
468 EOpGroupMemoryBarrier, // compute only
469
470 EOpBallot,
471 EOpReadInvocation,
472 EOpReadFirstInvocation,
473
474 EOpAnyInvocation,
475 EOpAllInvocations,
476 EOpAllInvocationsEqual,
477
478 EOpSubgroupGuardStart,
479 EOpSubgroupBarrier,
480 EOpSubgroupMemoryBarrier,
481 EOpSubgroupMemoryBarrierBuffer,
482 EOpSubgroupMemoryBarrierImage,
483 EOpSubgroupMemoryBarrierShared, // compute only
484 EOpSubgroupElect,
485 EOpSubgroupAll,
486 EOpSubgroupAny,
487 EOpSubgroupAllEqual,
488 EOpSubgroupBroadcast,
489 EOpSubgroupBroadcastFirst,
490 EOpSubgroupBallot,
491 EOpSubgroupInverseBallot,
492 EOpSubgroupBallotBitExtract,
493 EOpSubgroupBallotBitCount,
494 EOpSubgroupBallotInclusiveBitCount,
495 EOpSubgroupBallotExclusiveBitCount,
496 EOpSubgroupBallotFindLSB,
497 EOpSubgroupBallotFindMSB,
498 EOpSubgroupShuffle,
499 EOpSubgroupShuffleXor,
500 EOpSubgroupShuffleUp,
501 EOpSubgroupShuffleDown,
502 EOpSubgroupAdd,
503 EOpSubgroupMul,
504 EOpSubgroupMin,
505 EOpSubgroupMax,
506 EOpSubgroupAnd,
507 EOpSubgroupOr,
508 EOpSubgroupXor,
509 EOpSubgroupInclusiveAdd,
510 EOpSubgroupInclusiveMul,
511 EOpSubgroupInclusiveMin,
512 EOpSubgroupInclusiveMax,
513 EOpSubgroupInclusiveAnd,
514 EOpSubgroupInclusiveOr,
515 EOpSubgroupInclusiveXor,
516 EOpSubgroupExclusiveAdd,
517 EOpSubgroupExclusiveMul,
518 EOpSubgroupExclusiveMin,
519 EOpSubgroupExclusiveMax,
520 EOpSubgroupExclusiveAnd,
521 EOpSubgroupExclusiveOr,
522 EOpSubgroupExclusiveXor,
523 EOpSubgroupClusteredAdd,
524 EOpSubgroupClusteredMul,
525 EOpSubgroupClusteredMin,
526 EOpSubgroupClusteredMax,
527 EOpSubgroupClusteredAnd,
528 EOpSubgroupClusteredOr,
529 EOpSubgroupClusteredXor,
530 EOpSubgroupQuadBroadcast,
531 EOpSubgroupQuadSwapHorizontal,
532 EOpSubgroupQuadSwapVertical,
533 EOpSubgroupQuadSwapDiagonal,
534
535 #ifdef NV_EXTENSIONS
536 EOpSubgroupPartition,
537 EOpSubgroupPartitionedAdd,
538 EOpSubgroupPartitionedMul,
539 EOpSubgroupPartitionedMin,
540 EOpSubgroupPartitionedMax,
541 EOpSubgroupPartitionedAnd,
542 EOpSubgroupPartitionedOr,
543 EOpSubgroupPartitionedXor,
544 EOpSubgroupPartitionedInclusiveAdd,
545 EOpSubgroupPartitionedInclusiveMul,
546 EOpSubgroupPartitionedInclusiveMin,
547 EOpSubgroupPartitionedInclusiveMax,
548 EOpSubgroupPartitionedInclusiveAnd,
549 EOpSubgroupPartitionedInclusiveOr,
550 EOpSubgroupPartitionedInclusiveXor,
551 EOpSubgroupPartitionedExclusiveAdd,
552 EOpSubgroupPartitionedExclusiveMul,
553 EOpSubgroupPartitionedExclusiveMin,
554 EOpSubgroupPartitionedExclusiveMax,
555 EOpSubgroupPartitionedExclusiveAnd,
556 EOpSubgroupPartitionedExclusiveOr,
557 EOpSubgroupPartitionedExclusiveXor,
558 #endif
559
560 EOpSubgroupGuardStop,
561
562 #ifdef AMD_EXTENSIONS
563 EOpMinInvocations,
564 EOpMaxInvocations,
565 EOpAddInvocations,
566 EOpMinInvocationsNonUniform,
567 EOpMaxInvocationsNonUniform,
568 EOpAddInvocationsNonUniform,
569 EOpMinInvocationsInclusiveScan,
570 EOpMaxInvocationsInclusiveScan,
571 EOpAddInvocationsInclusiveScan,
572 EOpMinInvocationsInclusiveScanNonUniform,
573 EOpMaxInvocationsInclusiveScanNonUniform,
574 EOpAddInvocationsInclusiveScanNonUniform,
575 EOpMinInvocationsExclusiveScan,
576 EOpMaxInvocationsExclusiveScan,
577 EOpAddInvocationsExclusiveScan,
578 EOpMinInvocationsExclusiveScanNonUniform,
579 EOpMaxInvocationsExclusiveScanNonUniform,
580 EOpAddInvocationsExclusiveScanNonUniform,
581 EOpSwizzleInvocations,
582 EOpSwizzleInvocationsMasked,
583 EOpWriteInvocation,
584 EOpMbcnt,
585
586 EOpCubeFaceIndex,
587 EOpCubeFaceCoord,
588 EOpTime,
589 #endif
590
591 EOpAtomicAdd,
592 EOpAtomicMin,
593 EOpAtomicMax,
594 EOpAtomicAnd,
595 EOpAtomicOr,
596 EOpAtomicXor,
597 EOpAtomicExchange,
598 EOpAtomicCompSwap,
599 EOpAtomicLoad,
600 EOpAtomicStore,
601
602 EOpAtomicCounterIncrement, // results in pre-increment value
603 EOpAtomicCounterDecrement, // results in post-decrement value
604 EOpAtomicCounter,
605 EOpAtomicCounterAdd,
606 EOpAtomicCounterSubtract,
607 EOpAtomicCounterMin,
608 EOpAtomicCounterMax,
609 EOpAtomicCounterAnd,
610 EOpAtomicCounterOr,
611 EOpAtomicCounterXor,
612 EOpAtomicCounterExchange,
613 EOpAtomicCounterCompSwap,
614
615 EOpAny,
616 EOpAll,
617
618 //
619 // Branch
620 //
621
622 EOpKill, // Fragment only
623 EOpReturn,
624 EOpBreak,
625 EOpContinue,
626 EOpCase,
627 EOpDefault,
628
629 //
630 // Constructors
631 //
632
633 EOpConstructGuardStart,
634 EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
635 EOpConstructUint,
636 EOpConstructInt8,
637 EOpConstructUint8,
638 EOpConstructInt16,
639 EOpConstructUint16,
640 EOpConstructInt64,
641 EOpConstructUint64,
642 EOpConstructBool,
643 EOpConstructFloat,
644 EOpConstructDouble,
645 EOpConstructVec2,
646 EOpConstructVec3,
647 EOpConstructVec4,
648 EOpConstructDVec2,
649 EOpConstructDVec3,
650 EOpConstructDVec4,
651 EOpConstructBVec2,
652 EOpConstructBVec3,
653 EOpConstructBVec4,
654 EOpConstructI8Vec2,
655 EOpConstructI8Vec3,
656 EOpConstructI8Vec4,
657 EOpConstructU8Vec2,
658 EOpConstructU8Vec3,
659 EOpConstructU8Vec4,
660 EOpConstructI16Vec2,
661 EOpConstructI16Vec3,
662 EOpConstructI16Vec4,
663 EOpConstructU16Vec2,
664 EOpConstructU16Vec3,
665 EOpConstructU16Vec4,
666 EOpConstructIVec2,
667 EOpConstructIVec3,
668 EOpConstructIVec4,
669 EOpConstructUVec2,
670 EOpConstructUVec3,
671 EOpConstructUVec4,
672 EOpConstructI64Vec2,
673 EOpConstructI64Vec3,
674 EOpConstructI64Vec4,
675 EOpConstructU64Vec2,
676 EOpConstructU64Vec3,
677 EOpConstructU64Vec4,
678 EOpConstructMat2x2,
679 EOpConstructMat2x3,
680 EOpConstructMat2x4,
681 EOpConstructMat3x2,
682 EOpConstructMat3x3,
683 EOpConstructMat3x4,
684 EOpConstructMat4x2,
685 EOpConstructMat4x3,
686 EOpConstructMat4x4,
687 EOpConstructDMat2x2,
688 EOpConstructDMat2x3,
689 EOpConstructDMat2x4,
690 EOpConstructDMat3x2,
691 EOpConstructDMat3x3,
692 EOpConstructDMat3x4,
693 EOpConstructDMat4x2,
694 EOpConstructDMat4x3,
695 EOpConstructDMat4x4,
696 EOpConstructIMat2x2,
697 EOpConstructIMat2x3,
698 EOpConstructIMat2x4,
699 EOpConstructIMat3x2,
700 EOpConstructIMat3x3,
701 EOpConstructIMat3x4,
702 EOpConstructIMat4x2,
703 EOpConstructIMat4x3,
704 EOpConstructIMat4x4,
705 EOpConstructUMat2x2,
706 EOpConstructUMat2x3,
707 EOpConstructUMat2x4,
708 EOpConstructUMat3x2,
709 EOpConstructUMat3x3,
710 EOpConstructUMat3x4,
711 EOpConstructUMat4x2,
712 EOpConstructUMat4x3,
713 EOpConstructUMat4x4,
714 EOpConstructBMat2x2,
715 EOpConstructBMat2x3,
716 EOpConstructBMat2x4,
717 EOpConstructBMat3x2,
718 EOpConstructBMat3x3,
719 EOpConstructBMat3x4,
720 EOpConstructBMat4x2,
721 EOpConstructBMat4x3,
722 EOpConstructBMat4x4,
723 EOpConstructFloat16,
724 EOpConstructF16Vec2,
725 EOpConstructF16Vec3,
726 EOpConstructF16Vec4,
727 EOpConstructF16Mat2x2,
728 EOpConstructF16Mat2x3,
729 EOpConstructF16Mat2x4,
730 EOpConstructF16Mat3x2,
731 EOpConstructF16Mat3x3,
732 EOpConstructF16Mat3x4,
733 EOpConstructF16Mat4x2,
734 EOpConstructF16Mat4x3,
735 EOpConstructF16Mat4x4,
736 EOpConstructStruct,
737 EOpConstructTextureSampler,
738 EOpConstructNonuniform, // expected to be transformed away, not present in final AST
739 EOpConstructReference,
740 EOpConstructGuardEnd,
741
742 //
743 // moves
744 //
745
746 EOpAssign,
747 EOpAddAssign,
748 EOpSubAssign,
749 EOpMulAssign,
750 EOpVectorTimesMatrixAssign,
751 EOpVectorTimesScalarAssign,
752 EOpMatrixTimesScalarAssign,
753 EOpMatrixTimesMatrixAssign,
754 EOpDivAssign,
755 EOpModAssign,
756 EOpAndAssign,
757 EOpInclusiveOrAssign,
758 EOpExclusiveOrAssign,
759 EOpLeftShiftAssign,
760 EOpRightShiftAssign,
761
762 //
763 // Array operators
764 //
765
766 // Can apply to arrays, vectors, or matrices.
767 // Can be decomposed to a constant at compile time, but this does not always happen,
768 // due to link-time effects. So, consumer can expect either a link-time sized or
769 // run-time sized array.
770 EOpArrayLength,
771
772 //
773 // Image operations
774 //
775
776 EOpImageGuardBegin,
777
778 EOpImageQuerySize,
779 EOpImageQuerySamples,
780 EOpImageLoad,
781 EOpImageStore,
782 #ifdef AMD_EXTENSIONS
783 EOpImageLoadLod,
784 EOpImageStoreLod,
785 #endif
786 EOpImageAtomicAdd,
787 EOpImageAtomicMin,
788 EOpImageAtomicMax,
789 EOpImageAtomicAnd,
790 EOpImageAtomicOr,
791 EOpImageAtomicXor,
792 EOpImageAtomicExchange,
793 EOpImageAtomicCompSwap,
794 EOpImageAtomicLoad,
795 EOpImageAtomicStore,
796
797 EOpSubpassLoad,
798 EOpSubpassLoadMS,
799 EOpSparseImageLoad,
800 #ifdef AMD_EXTENSIONS
801 EOpSparseImageLoadLod,
802 #endif
803
804 EOpImageGuardEnd,
805
806 //
807 // Texture operations
808 //
809
810 EOpTextureGuardBegin,
811
812 EOpTextureQuerySize,
813 EOpTextureQueryLod,
814 EOpTextureQueryLevels,
815 EOpTextureQuerySamples,
816
817 EOpSamplingGuardBegin,
818
819 EOpTexture,
820 EOpTextureProj,
821 EOpTextureLod,
822 EOpTextureOffset,
823 EOpTextureFetch,
824 EOpTextureFetchOffset,
825 EOpTextureProjOffset,
826 EOpTextureLodOffset,
827 EOpTextureProjLod,
828 EOpTextureProjLodOffset,
829 EOpTextureGrad,
830 EOpTextureGradOffset,
831 EOpTextureProjGrad,
832 EOpTextureProjGradOffset,
833 EOpTextureGather,
834 EOpTextureGatherOffset,
835 EOpTextureGatherOffsets,
836 EOpTextureClamp,
837 EOpTextureOffsetClamp,
838 EOpTextureGradClamp,
839 EOpTextureGradOffsetClamp,
840 #ifdef AMD_EXTENSIONS
841 EOpTextureGatherLod,
842 EOpTextureGatherLodOffset,
843 EOpTextureGatherLodOffsets,
844 EOpFragmentMaskFetch,
845 EOpFragmentFetch,
846 #endif
847
848 EOpSparseTextureGuardBegin,
849
850 EOpSparseTexture,
851 EOpSparseTextureLod,
852 EOpSparseTextureOffset,
853 EOpSparseTextureFetch,
854 EOpSparseTextureFetchOffset,
855 EOpSparseTextureLodOffset,
856 EOpSparseTextureGrad,
857 EOpSparseTextureGradOffset,
858 EOpSparseTextureGather,
859 EOpSparseTextureGatherOffset,
860 EOpSparseTextureGatherOffsets,
861 EOpSparseTexelsResident,
862 EOpSparseTextureClamp,
863 EOpSparseTextureOffsetClamp,
864 EOpSparseTextureGradClamp,
865 EOpSparseTextureGradOffsetClamp,
866 #ifdef AMD_EXTENSIONS
867 EOpSparseTextureGatherLod,
868 EOpSparseTextureGatherLodOffset,
869 EOpSparseTextureGatherLodOffsets,
870 #endif
871
872 EOpSparseTextureGuardEnd,
873
874 #ifdef NV_EXTENSIONS
875 EOpImageFootprintGuardBegin,
876 EOpImageSampleFootprintNV,
877 EOpImageSampleFootprintClampNV,
878 EOpImageSampleFootprintLodNV,
879 EOpImageSampleFootprintGradNV,
880 EOpImageSampleFootprintGradClampNV,
881 EOpImageFootprintGuardEnd,
882 #endif
883 EOpSamplingGuardEnd,
884 EOpTextureGuardEnd,
885
886 //
887 // Integer operations
888 //
889
890 EOpAddCarry,
891 EOpSubBorrow,
892 EOpUMulExtended,
893 EOpIMulExtended,
894 EOpBitfieldExtract,
895 EOpBitfieldInsert,
896 EOpBitFieldReverse,
897 EOpBitCount,
898 EOpFindLSB,
899 EOpFindMSB,
900
901 #ifdef NV_EXTENSIONS
902 EOpTraceNV,
903 EOpReportIntersectionNV,
904 EOpIgnoreIntersectionNV,
905 EOpTerminateRayNV,
906 EOpExecuteCallableNV,
907 EOpWritePackedPrimitiveIndices4x8NV,
908 #endif
909 //
910 // HLSL operations
911 //
912
913 EOpClip, // discard if input value < 0
914 EOpIsFinite,
915 EOpLog10, // base 10 log
916 EOpRcp, // 1/x
917 EOpSaturate, // clamp from 0 to 1
918 EOpSinCos, // sin and cos in out parameters
919 EOpGenMul, // mul(x,y) on any of mat/vec/scalars
920 EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
921 EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
922 EOpInterlockedAnd, // ...
923 EOpInterlockedCompareExchange, // ...
924 EOpInterlockedCompareStore, // ...
925 EOpInterlockedExchange, // ...
926 EOpInterlockedMax, // ...
927 EOpInterlockedMin, // ...
928 EOpInterlockedOr, // ...
929 EOpInterlockedXor, // ...
930 EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
931 EOpDeviceMemoryBarrier, // ...
932 EOpDeviceMemoryBarrierWithGroupSync, // ...
933 EOpWorkgroupMemoryBarrier, // ...
934 EOpWorkgroupMemoryBarrierWithGroupSync, // ...
935 EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
936 EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
937 EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
938 EOpLit, // HLSL lighting coefficient vector
939 EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
940 EOpAsDouble, // slightly different from EOpUint64BitsToDouble
941 EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
942
943 EOpMethodSample, // Texture object methods. These are translated to existing
944 EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
945 EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
946 EOpMethodSampleCmpLevelZero, // ...
947 EOpMethodSampleGrad, // ...
948 EOpMethodSampleLevel, // ...
949 EOpMethodLoad, // ...
950 EOpMethodGetDimensions, // ...
951 EOpMethodGetSamplePosition, // ...
952 EOpMethodGather, // ...
953 EOpMethodCalculateLevelOfDetail, // ...
954 EOpMethodCalculateLevelOfDetailUnclamped, // ...
955
956 // Load already defined above for textures
957 EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
958 EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
959 EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
960 EOpMethodStore, // ...
961 EOpMethodStore2, // ...
962 EOpMethodStore3, // ...
963 EOpMethodStore4, // ...
964 EOpMethodIncrementCounter, // ...
965 EOpMethodDecrementCounter, // ...
966 // EOpMethodAppend is defined for geo shaders below
967 EOpMethodConsume,
968
969 // SM5 texture methods
970 EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
971 EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
972 EOpMethodGatherBlue, // because HLSL arguments are slightly different.
973 EOpMethodGatherAlpha, // ...
974 EOpMethodGatherCmp, // ...
975 EOpMethodGatherCmpRed, // ...
976 EOpMethodGatherCmpGreen, // ...
977 EOpMethodGatherCmpBlue, // ...
978 EOpMethodGatherCmpAlpha, // ...
979
980 // geometry methods
981 EOpMethodAppend, // Geometry shader methods
982 EOpMethodRestartStrip, // ...
983
984 // matrix
985 EOpMatrixSwizzle, // select multiple matrix components (non-column)
986
987 // SM6 wave ops
988 EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
989 EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
990 EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
991 EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
992 };
993
994 class TIntermTraverser;
995 class TIntermOperator;
996 class TIntermAggregate;
997 class TIntermUnary;
998 class TIntermBinary;
999 class TIntermConstantUnion;
1000 class TIntermSelection;
1001 class TIntermSwitch;
1002 class TIntermBranch;
1003 class TIntermTyped;
1004 class TIntermMethod;
1005 class TIntermSymbol;
1006 class TIntermLoop;
1007
1008 } // end namespace glslang
1009
1010 //
1011 // Base class for the tree nodes
1012 //
1013 // (Put outside the glslang namespace, as it's used as part of the external interface.)
1014 //
1015 class TIntermNode {
1016 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1017 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1018
1019 TIntermNode() { loc.init(); }
getLoc()1020 virtual const glslang::TSourceLoc& getLoc() const { return loc; }
setLoc(const glslang::TSourceLoc & l)1021 virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
1022 virtual void traverse(glslang::TIntermTraverser*) = 0;
getAsTyped()1023 virtual glslang::TIntermTyped* getAsTyped() { return 0; }
getAsOperator()1024 virtual glslang::TIntermOperator* getAsOperator() { return 0; }
getAsConstantUnion()1025 virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; }
getAsAggregate()1026 virtual glslang::TIntermAggregate* getAsAggregate() { return 0; }
getAsUnaryNode()1027 virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; }
getAsBinaryNode()1028 virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; }
getAsSelectionNode()1029 virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; }
getAsSwitchNode()1030 virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; }
getAsMethodNode()1031 virtual glslang::TIntermMethod* getAsMethodNode() { return 0; }
getAsSymbolNode()1032 virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; }
getAsBranchNode()1033 virtual glslang::TIntermBranch* getAsBranchNode() { return 0; }
getAsLoopNode()1034 virtual glslang::TIntermLoop* getAsLoopNode() { return 0; }
1035
getAsTyped()1036 virtual const glslang::TIntermTyped* getAsTyped() const { return 0; }
getAsOperator()1037 virtual const glslang::TIntermOperator* getAsOperator() const { return 0; }
getAsConstantUnion()1038 virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; }
getAsAggregate()1039 virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; }
getAsUnaryNode()1040 virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; }
getAsBinaryNode()1041 virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; }
getAsSelectionNode()1042 virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; }
getAsSwitchNode()1043 virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; }
getAsMethodNode()1044 virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; }
getAsSymbolNode()1045 virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; }
getAsBranchNode()1046 virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; }
getAsLoopNode()1047 virtual const glslang::TIntermLoop* getAsLoopNode() const { return 0; }
~TIntermNode()1048 virtual ~TIntermNode() { }
1049
1050 protected:
1051 TIntermNode(const TIntermNode&);
1052 TIntermNode& operator=(const TIntermNode&);
1053 glslang::TSourceLoc loc;
1054 };
1055
1056 namespace glslang {
1057
1058 //
1059 // This is just to help yacc.
1060 //
1061 struct TIntermNodePair {
1062 TIntermNode* node1;
1063 TIntermNode* node2;
1064 };
1065
1066 //
1067 // Intermediate class for nodes that have a type.
1068 //
1069 class TIntermTyped : public TIntermNode {
1070 public:
TIntermTyped(const TType & t)1071 TIntermTyped(const TType& t) { type.shallowCopy(t); }
TIntermTyped(TBasicType basicType)1072 TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
getAsTyped()1073 virtual TIntermTyped* getAsTyped() { return this; }
getAsTyped()1074 virtual const TIntermTyped* getAsTyped() const { return this; }
setType(const TType & t)1075 virtual void setType(const TType& t) { type.shallowCopy(t); }
getType()1076 virtual const TType& getType() const { return type; }
getWritableType()1077 virtual TType& getWritableType() { return type; }
1078
getBasicType()1079 virtual TBasicType getBasicType() const { return type.getBasicType(); }
getQualifier()1080 virtual TQualifier& getQualifier() { return type.getQualifier(); }
getQualifier()1081 virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
1082 virtual void propagatePrecision(TPrecisionQualifier);
getVectorSize()1083 virtual int getVectorSize() const { return type.getVectorSize(); }
getMatrixCols()1084 virtual int getMatrixCols() const { return type.getMatrixCols(); }
getMatrixRows()1085 virtual int getMatrixRows() const { return type.getMatrixRows(); }
isMatrix()1086 virtual bool isMatrix() const { return type.isMatrix(); }
isArray()1087 virtual bool isArray() const { return type.isArray(); }
isVector()1088 virtual bool isVector() const { return type.isVector(); }
isScalar()1089 virtual bool isScalar() const { return type.isScalar(); }
isStruct()1090 virtual bool isStruct() const { return type.isStruct(); }
isFloatingDomain()1091 virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
isIntegerDomain()1092 virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
getCompleteString()1093 TString getCompleteString() const { return type.getCompleteString(); }
1094
1095 protected:
1096 TIntermTyped& operator=(const TIntermTyped&);
1097 TType type;
1098 };
1099
1100 //
1101 // Handle for, do-while, and while loops.
1102 //
1103 class TIntermLoop : public TIntermNode {
1104 public:
TIntermLoop(TIntermNode * aBody,TIntermTyped * aTest,TIntermTyped * aTerminal,bool testFirst)1105 TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
1106 body(aBody),
1107 test(aTest),
1108 terminal(aTerminal),
1109 first(testFirst),
1110 unroll(false),
1111 dontUnroll(false),
1112 dependency(0)
1113 { }
1114
getAsLoopNode()1115 virtual TIntermLoop* getAsLoopNode() { return this; }
getAsLoopNode()1116 virtual const TIntermLoop* getAsLoopNode() const { return this; }
1117 virtual void traverse(TIntermTraverser*);
getBody()1118 TIntermNode* getBody() const { return body; }
getTest()1119 TIntermTyped* getTest() const { return test; }
getTerminal()1120 TIntermTyped* getTerminal() const { return terminal; }
testFirst()1121 bool testFirst() const { return first; }
1122
setUnroll()1123 void setUnroll() { unroll = true; }
setDontUnroll()1124 void setDontUnroll() { dontUnroll = true; }
getUnroll()1125 bool getUnroll() const { return unroll; }
getDontUnroll()1126 bool getDontUnroll() const { return dontUnroll; }
1127
1128 static const unsigned int dependencyInfinite = 0xFFFFFFFF;
setLoopDependency(int d)1129 void setLoopDependency(int d) { dependency = d; }
getLoopDependency()1130 int getLoopDependency() const { return dependency; }
1131
1132 protected:
1133 TIntermNode* body; // code to loop over
1134 TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
1135 TIntermTyped* terminal; // exists for for-loops
1136 bool first; // true for while and for, not for do-while
1137 bool unroll; // true if unroll requested
1138 bool dontUnroll; // true if request to not unroll
1139 unsigned int dependency; // loop dependency hint; 0 means not set or unknown
1140 };
1141
1142 //
1143 // Handle case, break, continue, return, and kill.
1144 //
1145 class TIntermBranch : public TIntermNode {
1146 public:
TIntermBranch(TOperator op,TIntermTyped * e)1147 TIntermBranch(TOperator op, TIntermTyped* e) :
1148 flowOp(op),
1149 expression(e) { }
getAsBranchNode()1150 virtual TIntermBranch* getAsBranchNode() { return this; }
getAsBranchNode()1151 virtual const TIntermBranch* getAsBranchNode() const { return this; }
1152 virtual void traverse(TIntermTraverser*);
getFlowOp()1153 TOperator getFlowOp() const { return flowOp; }
getExpression()1154 TIntermTyped* getExpression() const { return expression; }
1155 protected:
1156 TOperator flowOp;
1157 TIntermTyped* expression;
1158 };
1159
1160 //
1161 // Represent method names before seeing their calling signature
1162 // or resolving them to operations. Just an expression as the base object
1163 // and a textural name.
1164 //
1165 class TIntermMethod : public TIntermTyped {
1166 public:
TIntermMethod(TIntermTyped * o,const TType & t,const TString & m)1167 TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
getAsMethodNode()1168 virtual TIntermMethod* getAsMethodNode() { return this; }
getAsMethodNode()1169 virtual const TIntermMethod* getAsMethodNode() const { return this; }
getMethodName()1170 virtual const TString& getMethodName() const { return method; }
getObject()1171 virtual TIntermTyped* getObject() const { return object; }
1172 virtual void traverse(TIntermTraverser*);
1173 protected:
1174 TIntermTyped* object;
1175 TString method;
1176 };
1177
1178 //
1179 // Nodes that correspond to symbols or constants in the source code.
1180 //
1181 class TIntermSymbol : public TIntermTyped {
1182 public:
1183 // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
1184 // per process threadPoolAllocator, then it causes increased memory usage per compile
1185 // it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(int i,const TString & n,const TType & t)1186 TIntermSymbol(int i, const TString& n, const TType& t)
1187 : TIntermTyped(t), id(i),
1188 #ifdef ENABLE_HLSL
1189 flattenSubset(-1),
1190 #endif
1191 constSubtree(nullptr)
1192 { name = n; }
getId()1193 virtual int getId() const { return id; }
changeId(int i)1194 virtual void changeId(int i) { id = i; }
getName()1195 virtual const TString& getName() const { return name; }
1196 virtual void traverse(TIntermTraverser*);
getAsSymbolNode()1197 virtual TIntermSymbol* getAsSymbolNode() { return this; }
getAsSymbolNode()1198 virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
setConstArray(const TConstUnionArray & c)1199 void setConstArray(const TConstUnionArray& c) { constArray = c; }
getConstArray()1200 const TConstUnionArray& getConstArray() const { return constArray; }
setConstSubtree(TIntermTyped * subtree)1201 void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
getConstSubtree()1202 TIntermTyped* getConstSubtree() const { return constSubtree; }
1203 #ifdef ENABLE_HLSL
setFlattenSubset(int subset)1204 void setFlattenSubset(int subset) { flattenSubset = subset; }
getFlattenSubset()1205 int getFlattenSubset() const { return flattenSubset; } // -1 means full object
1206 #endif
1207
1208 // This is meant for cases where a node has already been constructed, and
1209 // later on, it becomes necessary to switch to a different symbol.
switchId(int newId)1210 virtual void switchId(int newId) { id = newId; }
1211
1212 protected:
1213 int id; // the unique id of the symbol this node represents
1214 #ifdef ENABLE_HLSL
1215 int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
1216 #endif
1217 TString name; // the name of the symbol this node represents
1218 TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
1219 TIntermTyped* constSubtree;
1220 };
1221
1222 class TIntermConstantUnion : public TIntermTyped {
1223 public:
TIntermConstantUnion(const TConstUnionArray & ua,const TType & t)1224 TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
getConstArray()1225 const TConstUnionArray& getConstArray() const { return constArray; }
getAsConstantUnion()1226 virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
getAsConstantUnion()1227 virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
1228 virtual void traverse(TIntermTraverser*);
1229 virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
1230 virtual TIntermTyped* fold(TOperator, const TType&) const;
setLiteral()1231 void setLiteral() { literal = true; }
setExpression()1232 void setExpression() { literal = false; }
isLiteral()1233 bool isLiteral() const { return literal; }
1234
1235 protected:
1236 TIntermConstantUnion& operator=(const TIntermConstantUnion&);
1237
1238 const TConstUnionArray constArray;
1239 bool literal; // true if node represents a literal in the source code
1240 };
1241
1242 // Represent the independent aspects of a texturing TOperator
1243 struct TCrackedTextureOp {
1244 bool query;
1245 bool proj;
1246 bool lod;
1247 bool fetch;
1248 bool offset;
1249 bool offsets;
1250 bool gather;
1251 bool grad;
1252 bool subpass;
1253 bool lodClamp;
1254 #ifdef AMD_EXTENSIONS
1255 bool fragMask;
1256 #endif
1257 };
1258
1259 //
1260 // Intermediate class for node types that hold operators.
1261 //
1262 class TIntermOperator : public TIntermTyped {
1263 public:
getAsOperator()1264 virtual TIntermOperator* getAsOperator() { return this; }
getAsOperator()1265 virtual const TIntermOperator* getAsOperator() const { return this; }
getOp()1266 TOperator getOp() const { return op; }
setOp(TOperator newOp)1267 void setOp(TOperator newOp) { op = newOp; }
1268 bool modifiesState() const;
1269 bool isConstructor() const;
isTexture()1270 bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
isSampling()1271 bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
isImage()1272 bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
isSparseTexture()1273 bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
1274 #ifdef NV_EXTENSIONS
isImageFootprint()1275 bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
1276 #endif
isSparseImage()1277 bool isSparseImage() const { return op == EOpSparseImageLoad; }
1278
setOperationPrecision(TPrecisionQualifier p)1279 void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
getOperationPrecision()1280 TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
1281 operationPrecision :
1282 type.getQualifier().precision; }
getCompleteString()1283 TString getCompleteString() const
1284 {
1285 TString cs = type.getCompleteString();
1286 if (getOperationPrecision() != type.getQualifier().precision) {
1287 cs += ", operation at ";
1288 cs += GetPrecisionQualifierString(getOperationPrecision());
1289 }
1290
1291 return cs;
1292 }
1293
1294 // Crack the op into the individual dimensions of texturing operation.
crackTexture(TSampler sampler,TCrackedTextureOp & cracked)1295 void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
1296 {
1297 cracked.query = false;
1298 cracked.proj = false;
1299 cracked.lod = false;
1300 cracked.fetch = false;
1301 cracked.offset = false;
1302 cracked.offsets = false;
1303 cracked.gather = false;
1304 cracked.grad = false;
1305 cracked.subpass = false;
1306 cracked.lodClamp = false;
1307 #ifdef AMD_EXTENSIONS
1308 cracked.fragMask = false;
1309 #endif
1310
1311 switch (op) {
1312 case EOpImageQuerySize:
1313 case EOpImageQuerySamples:
1314 case EOpTextureQuerySize:
1315 case EOpTextureQueryLod:
1316 case EOpTextureQueryLevels:
1317 case EOpTextureQuerySamples:
1318 case EOpSparseTexelsResident:
1319 cracked.query = true;
1320 break;
1321 case EOpTexture:
1322 case EOpSparseTexture:
1323 break;
1324 case EOpTextureClamp:
1325 case EOpSparseTextureClamp:
1326 cracked.lodClamp = true;
1327 break;
1328 case EOpTextureProj:
1329 cracked.proj = true;
1330 break;
1331 case EOpTextureLod:
1332 case EOpSparseTextureLod:
1333 cracked.lod = true;
1334 break;
1335 case EOpTextureOffset:
1336 case EOpSparseTextureOffset:
1337 cracked.offset = true;
1338 break;
1339 case EOpTextureOffsetClamp:
1340 case EOpSparseTextureOffsetClamp:
1341 cracked.offset = true;
1342 cracked.lodClamp = true;
1343 break;
1344 case EOpTextureFetch:
1345 case EOpSparseTextureFetch:
1346 cracked.fetch = true;
1347 if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
1348 cracked.lod = true;
1349 break;
1350 case EOpTextureFetchOffset:
1351 case EOpSparseTextureFetchOffset:
1352 cracked.fetch = true;
1353 cracked.offset = true;
1354 if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
1355 cracked.lod = true;
1356 break;
1357 case EOpTextureProjOffset:
1358 cracked.offset = true;
1359 cracked.proj = true;
1360 break;
1361 case EOpTextureLodOffset:
1362 case EOpSparseTextureLodOffset:
1363 cracked.offset = true;
1364 cracked.lod = true;
1365 break;
1366 case EOpTextureProjLod:
1367 cracked.lod = true;
1368 cracked.proj = true;
1369 break;
1370 case EOpTextureProjLodOffset:
1371 cracked.offset = true;
1372 cracked.lod = true;
1373 cracked.proj = true;
1374 break;
1375 case EOpTextureGrad:
1376 case EOpSparseTextureGrad:
1377 cracked.grad = true;
1378 break;
1379 case EOpTextureGradClamp:
1380 case EOpSparseTextureGradClamp:
1381 cracked.grad = true;
1382 cracked.lodClamp = true;
1383 break;
1384 case EOpTextureGradOffset:
1385 case EOpSparseTextureGradOffset:
1386 cracked.grad = true;
1387 cracked.offset = true;
1388 break;
1389 case EOpTextureProjGrad:
1390 cracked.grad = true;
1391 cracked.proj = true;
1392 break;
1393 case EOpTextureProjGradOffset:
1394 cracked.grad = true;
1395 cracked.offset = true;
1396 cracked.proj = true;
1397 break;
1398 case EOpTextureGradOffsetClamp:
1399 case EOpSparseTextureGradOffsetClamp:
1400 cracked.grad = true;
1401 cracked.offset = true;
1402 cracked.lodClamp = true;
1403 break;
1404 case EOpTextureGather:
1405 case EOpSparseTextureGather:
1406 cracked.gather = true;
1407 break;
1408 case EOpTextureGatherOffset:
1409 case EOpSparseTextureGatherOffset:
1410 cracked.gather = true;
1411 cracked.offset = true;
1412 break;
1413 case EOpTextureGatherOffsets:
1414 case EOpSparseTextureGatherOffsets:
1415 cracked.gather = true;
1416 cracked.offsets = true;
1417 break;
1418 #ifdef AMD_EXTENSIONS
1419 case EOpTextureGatherLod:
1420 case EOpSparseTextureGatherLod:
1421 cracked.gather = true;
1422 cracked.lod = true;
1423 break;
1424 case EOpTextureGatherLodOffset:
1425 case EOpSparseTextureGatherLodOffset:
1426 cracked.gather = true;
1427 cracked.offset = true;
1428 cracked.lod = true;
1429 break;
1430 case EOpTextureGatherLodOffsets:
1431 case EOpSparseTextureGatherLodOffsets:
1432 cracked.gather = true;
1433 cracked.offsets = true;
1434 cracked.lod = true;
1435 break;
1436 case EOpImageLoadLod:
1437 case EOpImageStoreLod:
1438 case EOpSparseImageLoadLod:
1439 cracked.lod = true;
1440 break;
1441 case EOpFragmentMaskFetch:
1442 cracked.subpass = sampler.dim == EsdSubpass;
1443 cracked.fragMask = true;
1444 break;
1445 case EOpFragmentFetch:
1446 cracked.subpass = sampler.dim == EsdSubpass;
1447 cracked.fragMask = true;
1448 break;
1449 #endif
1450 #ifdef NV_EXTENSIONS
1451 case EOpImageSampleFootprintNV:
1452 break;
1453 case EOpImageSampleFootprintClampNV:
1454 cracked.lodClamp = true;
1455 break;
1456 case EOpImageSampleFootprintLodNV:
1457 cracked.lod = true;
1458 break;
1459 case EOpImageSampleFootprintGradNV:
1460 cracked.grad = true;
1461 break;
1462 case EOpImageSampleFootprintGradClampNV:
1463 cracked.lodClamp = true;
1464 cracked.grad = true;
1465 break;
1466 #endif
1467 case EOpSubpassLoad:
1468 case EOpSubpassLoadMS:
1469 cracked.subpass = true;
1470 break;
1471 default:
1472 break;
1473 }
1474 }
1475
1476 protected:
TIntermOperator(TOperator o)1477 TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
TIntermOperator(TOperator o,TType & t)1478 TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
1479 TOperator op;
1480 // The result precision is in the inherited TType, and is usually meant to be both
1481 // the operation precision and the result precision. However, some more complex things,
1482 // like built-in function calls, distinguish between the two, in which case non-EqpNone
1483 // 'operationPrecision' overrides the result precision as far as operation precision
1484 // is concerned.
1485 TPrecisionQualifier operationPrecision;
1486 };
1487
1488 //
1489 // Nodes for all the basic binary math operators.
1490 //
1491 class TIntermBinary : public TIntermOperator {
1492 public:
TIntermBinary(TOperator o)1493 TIntermBinary(TOperator o) : TIntermOperator(o) {}
1494 virtual void traverse(TIntermTraverser*);
setLeft(TIntermTyped * n)1495 virtual void setLeft(TIntermTyped* n) { left = n; }
setRight(TIntermTyped * n)1496 virtual void setRight(TIntermTyped* n) { right = n; }
getLeft()1497 virtual TIntermTyped* getLeft() const { return left; }
getRight()1498 virtual TIntermTyped* getRight() const { return right; }
getAsBinaryNode()1499 virtual TIntermBinary* getAsBinaryNode() { return this; }
getAsBinaryNode()1500 virtual const TIntermBinary* getAsBinaryNode() const { return this; }
1501 virtual void updatePrecision();
1502 protected:
1503 TIntermTyped* left;
1504 TIntermTyped* right;
1505 };
1506
1507 //
1508 // Nodes for unary math operators.
1509 //
1510 class TIntermUnary : public TIntermOperator {
1511 public:
TIntermUnary(TOperator o,TType & t)1512 TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {}
TIntermUnary(TOperator o)1513 TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {}
1514 virtual void traverse(TIntermTraverser*);
setOperand(TIntermTyped * o)1515 virtual void setOperand(TIntermTyped* o) { operand = o; }
getOperand()1516 virtual TIntermTyped* getOperand() { return operand; }
getOperand()1517 virtual const TIntermTyped* getOperand() const { return operand; }
getAsUnaryNode()1518 virtual TIntermUnary* getAsUnaryNode() { return this; }
getAsUnaryNode()1519 virtual const TIntermUnary* getAsUnaryNode() const { return this; }
1520 virtual void updatePrecision();
1521 protected:
1522 TIntermTyped* operand;
1523 };
1524
1525 typedef TVector<TIntermNode*> TIntermSequence;
1526 typedef TVector<TStorageQualifier> TQualifierList;
1527 //
1528 // Nodes that operate on an arbitrary sized set of children.
1529 //
1530 class TIntermAggregate : public TIntermOperator {
1531 public:
TIntermAggregate()1532 TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
TIntermAggregate(TOperator o)1533 TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
~TIntermAggregate()1534 ~TIntermAggregate() { delete pragmaTable; }
getAsAggregate()1535 virtual TIntermAggregate* getAsAggregate() { return this; }
getAsAggregate()1536 virtual const TIntermAggregate* getAsAggregate() const { return this; }
setOperator(TOperator o)1537 virtual void setOperator(TOperator o) { op = o; }
getSequence()1538 virtual TIntermSequence& getSequence() { return sequence; }
getSequence()1539 virtual const TIntermSequence& getSequence() const { return sequence; }
setName(const TString & n)1540 virtual void setName(const TString& n) { name = n; }
getName()1541 virtual const TString& getName() const { return name; }
1542 virtual void traverse(TIntermTraverser*);
setUserDefined()1543 virtual void setUserDefined() { userDefined = true; }
isUserDefined()1544 virtual bool isUserDefined() { return userDefined; }
getQualifierList()1545 virtual TQualifierList& getQualifierList() { return qualifier; }
getQualifierList()1546 virtual const TQualifierList& getQualifierList() const { return qualifier; }
setOptimize(bool o)1547 void setOptimize(bool o) { optimize = o; }
setDebug(bool d)1548 void setDebug(bool d) { debug = d; }
getOptimize()1549 bool getOptimize() const { return optimize; }
getDebug()1550 bool getDebug() const { return debug; }
1551 void setPragmaTable(const TPragmaTable& pTable);
getPragmaTable()1552 const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
1553 protected:
1554 TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
1555 TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
1556 TIntermSequence sequence;
1557 TQualifierList qualifier;
1558 TString name;
1559 bool userDefined; // used for user defined function names
1560 bool optimize;
1561 bool debug;
1562 TPragmaTable* pragmaTable;
1563 };
1564
1565 //
1566 // For if tests.
1567 //
1568 class TIntermSelection : public TIntermTyped {
1569 public:
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB)1570 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
1571 TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
1572 shortCircuit(true),
1573 flatten(false), dontFlatten(false) {}
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB,const TType & type)1574 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
1575 TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
1576 shortCircuit(true),
1577 flatten(false), dontFlatten(false) {}
1578 virtual void traverse(TIntermTraverser*);
getCondition()1579 virtual TIntermTyped* getCondition() const { return condition; }
getTrueBlock()1580 virtual TIntermNode* getTrueBlock() const { return trueBlock; }
getFalseBlock()1581 virtual TIntermNode* getFalseBlock() const { return falseBlock; }
getAsSelectionNode()1582 virtual TIntermSelection* getAsSelectionNode() { return this; }
getAsSelectionNode()1583 virtual const TIntermSelection* getAsSelectionNode() const { return this; }
1584
setNoShortCircuit()1585 void setNoShortCircuit() { shortCircuit = false; }
getShortCircuit()1586 bool getShortCircuit() const { return shortCircuit; }
1587
setFlatten()1588 void setFlatten() { flatten = true; }
setDontFlatten()1589 void setDontFlatten() { dontFlatten = true; }
getFlatten()1590 bool getFlatten() const { return flatten; }
getDontFlatten()1591 bool getDontFlatten() const { return dontFlatten; }
1592
1593 protected:
1594 TIntermTyped* condition;
1595 TIntermNode* trueBlock;
1596 TIntermNode* falseBlock;
1597 bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
1598 bool flatten; // true if flatten requested
1599 bool dontFlatten; // true if requested to not flatten
1600 };
1601
1602 //
1603 // For switch statements. Designed use is that a switch will have sequence of nodes
1604 // that are either case/default nodes or a *single* node that represents all the code
1605 // in between (if any) consecutive case/defaults. So, a traversal need only deal with
1606 // 0 or 1 nodes per case/default statement.
1607 //
1608 class TIntermSwitch : public TIntermNode {
1609 public:
TIntermSwitch(TIntermTyped * cond,TIntermAggregate * b)1610 TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
1611 flatten(false), dontFlatten(false) {}
1612 virtual void traverse(TIntermTraverser*);
getCondition()1613 virtual TIntermNode* getCondition() const { return condition; }
getBody()1614 virtual TIntermAggregate* getBody() const { return body; }
getAsSwitchNode()1615 virtual TIntermSwitch* getAsSwitchNode() { return this; }
getAsSwitchNode()1616 virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
1617
setFlatten()1618 void setFlatten() { flatten = true; }
setDontFlatten()1619 void setDontFlatten() { dontFlatten = true; }
getFlatten()1620 bool getFlatten() const { return flatten; }
getDontFlatten()1621 bool getDontFlatten() const { return dontFlatten; }
1622
1623 protected:
1624 TIntermTyped* condition;
1625 TIntermAggregate* body;
1626 bool flatten; // true if flatten requested
1627 bool dontFlatten; // true if requested to not flatten
1628 };
1629
1630 enum TVisit
1631 {
1632 EvPreVisit,
1633 EvInVisit,
1634 EvPostVisit
1635 };
1636
1637 //
1638 // For traversing the tree. User should derive from this,
1639 // put their traversal specific data in it, and then pass
1640 // it to a Traverse method.
1641 //
1642 // When using this, just fill in the methods for nodes you want visited.
1643 // Return false from a pre-visit to skip visiting that node's subtree.
1644 //
1645 // Explicitly set postVisit to true if you want post visiting, otherwise,
1646 // filled in methods will only be called at pre-visit time (before processing
1647 // the subtree). Similarly for inVisit for in-order visiting of nodes with
1648 // multiple children.
1649 //
1650 // If you only want post-visits, explicitly turn off preVisit (and inVisit)
1651 // and turn on postVisit.
1652 //
1653 // In general, for the visit*() methods, return true from interior nodes
1654 // to have the traversal continue on to children.
1655 //
1656 // If you process children yourself, or don't want them processed, return false.
1657 //
1658 class TIntermTraverser {
1659 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1660 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1661 TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
1662 preVisit(preVisit),
1663 inVisit(inVisit),
1664 postVisit(postVisit),
1665 rightToLeft(rightToLeft),
1666 depth(0),
1667 maxDepth(0) { }
~TIntermTraverser()1668 virtual ~TIntermTraverser() { }
1669
visitSymbol(TIntermSymbol *)1670 virtual void visitSymbol(TIntermSymbol*) { }
visitConstantUnion(TIntermConstantUnion *)1671 virtual void visitConstantUnion(TIntermConstantUnion*) { }
visitBinary(TVisit,TIntermBinary *)1672 virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
visitUnary(TVisit,TIntermUnary *)1673 virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
visitSelection(TVisit,TIntermSelection *)1674 virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
visitAggregate(TVisit,TIntermAggregate *)1675 virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
visitLoop(TVisit,TIntermLoop *)1676 virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
visitBranch(TVisit,TIntermBranch *)1677 virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
visitSwitch(TVisit,TIntermSwitch *)1678 virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
1679
getMaxDepth()1680 int getMaxDepth() const { return maxDepth; }
1681
incrementDepth(TIntermNode * current)1682 void incrementDepth(TIntermNode *current)
1683 {
1684 depth++;
1685 maxDepth = (std::max)(maxDepth, depth);
1686 path.push_back(current);
1687 }
1688
decrementDepth()1689 void decrementDepth()
1690 {
1691 depth--;
1692 path.pop_back();
1693 }
1694
getParentNode()1695 TIntermNode *getParentNode()
1696 {
1697 return path.size() == 0 ? NULL : path.back();
1698 }
1699
1700 const bool preVisit;
1701 const bool inVisit;
1702 const bool postVisit;
1703 const bool rightToLeft;
1704
1705 protected:
1706 TIntermTraverser& operator=(TIntermTraverser&);
1707
1708 int depth;
1709 int maxDepth;
1710
1711 // All the nodes from root to the current node's parent during traversing.
1712 TVector<TIntermNode *> path;
1713 };
1714
1715 // KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
1716 // sized with the same symbol, involving no operations"
SameSpecializationConstants(TIntermTyped * node1,TIntermTyped * node2)1717 inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
1718 {
1719 return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
1720 node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
1721 }
1722
1723 } // end namespace glslang
1724
1725 #endif // __INTERMEDIATE_H
1726