Searched refs:_m1 (Results 1 – 25 of 59) sorted by relevance
123
/third_party/skia/third_party/externals/spirv-cross/reference/opt/shaders/desktop-only/comp/ |
D | extended-arithmetic.desktop.comp | 7 uint _m1; 13 uvec2 _m1; 19 uvec3 _m1; 25 uvec4 _m1; 31 int _m1; 37 ivec2 _m1; 43 ivec3 _m1; 49 ivec4 _m1; 95 _25._m0 = uaddCarry(u.a, u.b, _25._m1); 96 u.d = _25._m1; [all …]
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders/desktop-only/comp/ |
D | extended-arithmetic.desktop.comp | 7 uint _m1; 13 uvec2 _m1; 19 uvec3 _m1; 25 uvec4 _m1; 31 int _m1; 37 ivec2 _m1; 43 ivec3 _m1; 49 ivec4 _m1; 95 _25._m0 = uaddCarry(u.a, u.b, _25._m1); 96 u.d = _25._m1; [all …]
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders-msl/desktop-only/comp/ |
D | extended-arithmetic.desktop.comp | 29 uint _m1; 35 uint2 _m1; 41 uint3 _m1; 47 uint4 _m1; 73 int _m1; 79 int2 _m1; 85 int3 _m1; 91 int4 _m1; 100 _25._m1 = select(uint(1), uint(0), _25._m0 >= max(u.a, u.b)); 101 u.d = _25._m1; [all …]
|
/third_party/skia/third_party/externals/spirv-cross/reference/opt/shaders-msl/desktop-only/comp/ |
D | extended-arithmetic.desktop.comp | 29 uint _m1; 35 uint2 _m1; 41 uint3 _m1; 47 uint4 _m1; 73 int _m1; 79 int2 _m1; 85 int3 _m1; 91 int4 _m1; 100 _25._m1 = select(uint(1), uint(0), _25._m0 >= max(u.a, u.b)); 101 u.d = _25._m1; [all …]
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders/asm/comp/ |
D | bitcast_iadd.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 18 _6._m0 = _5._m1 + uvec4(_5._m0); 19 _6._m0 = uvec4(_5._m0) + _5._m1; 20 _6._m0 = _5._m1 + _5._m1; 22 _6._m1 = ivec4(_5._m1 + _5._m1); 23 _6._m1 = _5._m0 + _5._m0; 24 _6._m1 = ivec4(_5._m1) + _5._m0; 25 _6._m1 = _5._m0 + ivec4(_5._m1);
|
D | multiple-entry.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 18 _9._m0 = _8._m1 + uvec4(_8._m0); 19 _9._m0 = uvec4(_8._m0) + _8._m1; 20 _9._m0 = _8._m1 + _8._m1; 22 _9._m1 = ivec4(_8._m1 + _8._m1); 23 _9._m1 = _8._m0 + _8._m0; 24 _9._m1 = ivec4(_8._m1) + _8._m0; 25 _9._m1 = _8._m0 + ivec4(_8._m1);
|
D | bitcast_icmp.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 18 _6._m0 = uvec4(lessThan(ivec4(_5._m1), _5._m0)); 19 _6._m0 = uvec4(lessThanEqual(ivec4(_5._m1), _5._m0)); 20 _6._m0 = uvec4(lessThan(_5._m1, uvec4(_5._m0))); 21 _6._m0 = uvec4(lessThanEqual(_5._m1, uvec4(_5._m0))); 22 _6._m0 = uvec4(greaterThan(ivec4(_5._m1), _5._m0)); 23 _6._m0 = uvec4(greaterThanEqual(ivec4(_5._m1), _5._m0)); 24 _6._m0 = uvec4(greaterThan(_5._m1, uvec4(_5._m0))); 25 _6._m0 = uvec4(greaterThanEqual(_5._m1, uvec4(_5._m0)));
|
D | bitcast_slr.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 19 uvec4 _23 = _5._m1; 24 _6._m1 = ivec4(_23 >> _23); 25 _6._m1 = ivec4(uvec4(_22) >> uvec4(_22)); 26 _6._m1 = ivec4(_23 >> uvec4(_22)); 27 _6._m1 = ivec4(uvec4(_22) >> _23);
|
D | bitcast_sdiv.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 19 uvec4 _23 = _5._m1; 24 _6._m1 = ivec4(_23) / ivec4(_23); 25 _6._m1 = _22 / _22; 26 _6._m1 = ivec4(_23) / _22; 27 _6._m1 = _22 / ivec4(_23);
|
/third_party/skia/third_party/externals/spirv-cross/reference/opt/shaders/asm/comp/ |
D | bitcast_iadd.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 18 _6._m0 = _5._m1 + uvec4(_5._m0); 19 _6._m0 = uvec4(_5._m0) + _5._m1; 20 _6._m0 = _5._m1 + _5._m1; 22 _6._m1 = ivec4(_5._m1 + _5._m1); 23 _6._m1 = _5._m0 + _5._m0; 24 _6._m1 = ivec4(_5._m1) + _5._m0; 25 _6._m1 = _5._m0 + ivec4(_5._m1);
|
D | multiple-entry.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 18 _9._m0 = _8._m1 + uvec4(_8._m0); 19 _9._m0 = uvec4(_8._m0) + _8._m1; 20 _9._m0 = _8._m1 + _8._m1; 22 _9._m1 = ivec4(_8._m1 + _8._m1); 23 _9._m1 = _8._m0 + _8._m0; 24 _9._m1 = ivec4(_8._m1) + _8._m0; 25 _9._m1 = _8._m0 + ivec4(_8._m1);
|
D | bitcast_icmp.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 18 _6._m0 = uvec4(lessThan(ivec4(_5._m1), _5._m0)); 19 _6._m0 = uvec4(lessThanEqual(ivec4(_5._m1), _5._m0)); 20 _6._m0 = uvec4(lessThan(_5._m1, uvec4(_5._m0))); 21 _6._m0 = uvec4(lessThanEqual(_5._m1, uvec4(_5._m0))); 22 _6._m0 = uvec4(greaterThan(ivec4(_5._m1), _5._m0)); 23 _6._m0 = uvec4(greaterThanEqual(ivec4(_5._m1), _5._m0)); 24 _6._m0 = uvec4(greaterThan(_5._m1, uvec4(_5._m0))); 25 _6._m0 = uvec4(greaterThanEqual(_5._m1, uvec4(_5._m0)));
|
D | bitcast_sar.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 19 uvec4 _23 = _5._m1; 24 _6._m1 = ivec4(_23) >> ivec4(_23); 25 _6._m1 = _22 >> _22; 26 _6._m1 = ivec4(_23) >> _22; 27 _6._m1 = _22 >> ivec4(_23);
|
D | bitcast_sdiv.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 19 uvec4 _23 = _5._m1; 24 _6._m1 = ivec4(_23) / ivec4(_23); 25 _6._m1 = _22 / _22; 26 _6._m1 = ivec4(_23) / _22; 27 _6._m1 = _22 / ivec4(_23);
|
D | bitcast_slr.asm.comp | 7 uvec4 _m1; 13 ivec4 _m1; 19 uvec4 _23 = _5._m1; 24 _6._m1 = ivec4(_23 >> _23); 25 _6._m1 = ivec4(uvec4(_22) >> uvec4(_22)); 26 _6._m1 = ivec4(_23 >> uvec4(_22)); 27 _6._m1 = ivec4(uvec4(_22) >> _23);
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders-msl/asm/comp/ |
D | multiple-entry.asm.comp | 9 uint4 _m1; 15 int4 _m1; 20 _9._m0 = _8._m1 + uint4(_8._m0); 21 _9._m0 = uint4(_8._m0) + _8._m1; 22 _9._m0 = _8._m1 + _8._m1; 24 _9._m1 = int4(_8._m1 + _8._m1); 25 _9._m1 = _8._m0 + _8._m0; 26 _9._m1 = int4(_8._m1) + _8._m0; 27 _9._m1 = _8._m0 + int4(_8._m1);
|
D | bitcast_iadd.asm.comp | 9 uint4 _m1; 15 int4 _m1; 20 _6._m0 = _5._m1 + uint4(_5._m0); 21 _6._m0 = uint4(_5._m0) + _5._m1; 22 _6._m0 = _5._m1 + _5._m1; 24 _6._m1 = int4(_5._m1 + _5._m1); 25 _6._m1 = _5._m0 + _5._m0; 26 _6._m1 = int4(_5._m1) + _5._m0; 27 _6._m1 = _5._m0 + int4(_5._m1);
|
D | bitcast_icmp.asm.comp | 9 uint4 _m1; 15 int4 _m1; 20 _6._m0 = uint4(int4(_5._m1) < _5._m0); 21 _6._m0 = uint4(int4(_5._m1) <= _5._m0); 22 _6._m0 = uint4(_5._m1 < uint4(_5._m0)); 23 _6._m0 = uint4(_5._m1 <= uint4(_5._m0)); 24 _6._m0 = uint4(int4(_5._m1) > _5._m0); 25 _6._m0 = uint4(int4(_5._m1) >= _5._m0); 26 _6._m0 = uint4(_5._m1 > uint4(_5._m0)); 27 _6._m0 = uint4(_5._m1 >= uint4(_5._m0));
|
/third_party/skia/third_party/externals/spirv-cross/reference/opt/shaders-msl/asm/comp/ |
D | multiple-entry.asm.comp | 9 uint4 _m1; 15 int4 _m1; 20 _9._m0 = _8._m1 + uint4(_8._m0); 21 _9._m0 = uint4(_8._m0) + _8._m1; 22 _9._m0 = _8._m1 + _8._m1; 24 _9._m1 = int4(_8._m1 + _8._m1); 25 _9._m1 = _8._m0 + _8._m0; 26 _9._m1 = int4(_8._m1) + _8._m0; 27 _9._m1 = _8._m0 + int4(_8._m1);
|
D | bitcast_iadd.asm.comp | 9 uint4 _m1; 15 int4 _m1; 20 _6._m0 = _5._m1 + uint4(_5._m0); 21 _6._m0 = uint4(_5._m0) + _5._m1; 22 _6._m0 = _5._m1 + _5._m1; 24 _6._m1 = int4(_5._m1 + _5._m1); 25 _6._m1 = _5._m0 + _5._m0; 26 _6._m1 = int4(_5._m1) + _5._m0; 27 _6._m1 = _5._m0 + int4(_5._m1);
|
D | bitcast_icmp.asm.comp | 9 uint4 _m1; 15 int4 _m1; 20 _6._m0 = uint4(int4(_5._m1) < _5._m0); 21 _6._m0 = uint4(int4(_5._m1) <= _5._m0); 22 _6._m0 = uint4(_5._m1 < uint4(_5._m0)); 23 _6._m0 = uint4(_5._m1 <= uint4(_5._m0)); 24 _6._m0 = uint4(int4(_5._m1) > _5._m0); 25 _6._m0 = uint4(int4(_5._m1) >= _5._m0); 26 _6._m0 = uint4(_5._m1 > uint4(_5._m0)); 27 _6._m0 = uint4(_5._m1 >= uint4(_5._m0));
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders-no-opt/frag/ |
D | sparse-texture-feedback.desktop.frag | 8 vec4 _m1; 24 vec4 texel = _24._m1; 30 texel = _31._m1; 36 texel = _38._m1; 42 texel = _47._m1; 48 texel = _56._m1; 54 texel = _64._m1; 60 texel = _76._m1; 66 texel = _86._m1; 72 texel = _93._m1; [all …]
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders-no-opt/asm/comp/ |
D | glsl.std450.frexp-modf-struct.asm.comp | 7 float _m1; 13 int _m1; 19 int _m1; 25 _23._m0 = modf(20.0, _23._m1); 27 _24._m0 = frexp(40.0, _24._m1); 29 _4._m0 = _23._m1; 31 _4._m1 = _24._m1;
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders-msl-no-opt/asm/comp/ |
D | glsl.std450.frexp-modf-struct.asm.comp | 9 float _m1; 15 int _m1; 21 int _m1; 27 _23._m0 = modf(20.0, _23._m1); 29 _24._m0 = frexp(40.0, _24._m1); 31 _4._m0 = _23._m1; 33 _4._m1 = _24._m1;
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders/frag/ |
D | frexp-modf.frag | 8 int _m1; 14 ivec2 _m1; 24 _16._m0 = frexp(v0, _16._m1); 25 mediump int e0 = _16._m1; 28 _22._m0 = frexp(v0 + 1.0, _22._m1); 29 e0 = _22._m1; 32 _35._m0 = frexp(v1, _35._m1); 33 mediump ivec2 e1 = _35._m1;
|
123