Home
last modified time | relevance | path

Searched refs:in3 (Results 1 – 25 of 54) sorted by relevance

123

/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct4x4_msa.c18 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa() local
20 LD_SH4(input, src_stride, in0, in1, in2, in3); in vp9_fwht4x4_msa()
23 in3 -= in2; in vp9_fwht4x4_msa()
24 in4 = (in0 - in3) >> 1; in vp9_fwht4x4_msa()
27 in3 += in1; in vp9_fwht4x4_msa()
29 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vp9_fwht4x4_msa()
32 in1 -= in3; in vp9_fwht4x4_msa()
34 SUB2(in4, in2, in4, in3, in2, in3); in vp9_fwht4x4_msa()
35 in0 -= in3; in vp9_fwht4x4_msa()
38 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fwht4x4_msa()
[all …]
Dvp9_fdct8x8_msa.c18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local
20 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
21 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fht8x8_msa()
26 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa()
27 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa()
29 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
30 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa()
31 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
34 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa()
[all …]
Dvp9_fdct_msa.h18 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
35 ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
38 in4, in3); \
63 ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
84 #define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) { \ argument
91 UNPCK_R_SH_SW(in3, in3_r_m); \
Dvp9_fdct16x16_msa.c368 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11; in postproc_fdct16x8_1d_row() local
371 LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7); in postproc_fdct16x8_1d_row()
374 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in postproc_fdct16x8_1d_row()
375 in0, in1, in2, in3, in4, in5, in6, in7); in postproc_fdct16x8_1d_row()
379 FDCT_POSTPROC_2V_NEG_H(in2, in3); in postproc_fdct16x8_1d_row()
386 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in postproc_fdct16x8_1d_row()
397 in0, in1, in2, in3, in4, in5, in6, in7); in postproc_fdct16x8_1d_row()
398 TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, in postproc_fdct16x8_1d_row()
399 tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3); in postproc_fdct16x8_1d_row()
400 ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16); in postproc_fdct16x8_1d_row()
/external/libvpx/libvpx/vp9/common/mips/msa/
Dvp9_idct4x4_msa.c18 v8i16 in0, in1, in2, in3; in vp9_iht4x4_16_add_msa() local
21 LD4x4_SH(input, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
22 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
27 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
29 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
30 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
34 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
36 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
37 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
41 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa()
[all …]
Dvp9_idct8x8_msa.c18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local
21 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
23 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa()
24 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
29 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa()
30 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
32 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa()
33 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
34 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa()
35 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
[all …]
/external/libvpx/libvpx/vp8/encoder/mips/msa/
Ddct_msa.c14 #define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
18 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
20 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
71 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa() local
78 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct4x4_msa()
79 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa()
81 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa()
82 SLLI_4V(temp0, temp1, in1, in3, 3); in vp8_short_fdct4x4_msa()
86 temp0 = __msa_ilvr_h(in3, in1); in vp8_short_fdct4x4_msa()
94 PCKEV_H2_SH(out0, out0, out1, out1, in1, in3); in vp8_short_fdct4x4_msa()
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Didct4x4_msa.c15 v8i16 in0, in1, in2, in3; in vpx_iwht4x4_16_add_msa() local
19 LD4x4_SH(input, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa()
20 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa()
23 UNPCK_R_SH_SW(in3, in3_r); in vpx_iwht4x4_16_add_msa()
46 in0, in1, in2, in3); in vpx_iwht4x4_16_add_msa()
47 ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); in vpx_iwht4x4_16_add_msa()
72 v8i16 in0, in1, in2, in3; in vpx_idct4x4_16_add_msa() local
75 LD4x4_SH(input, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa()
77 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa()
78 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa()
[all …]
Didct8x8_msa.c15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local
18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
21 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa()
22 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
24 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa()
25 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
27 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa()
28 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa()
31 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
[all …]
Dfwd_txfm_msa.c16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local
29 in0, in1, in2, in3, in4, in5, in6, in7, in fdct8x16_1d_column()
31 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x16_1d_column()
35 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column()
40 SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); in fdct8x16_1d_column()
135 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct16x8_1d_row() local
138 LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row()
140 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in fdct16x8_1d_row()
141 in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row()
144 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in fdct16x8_1d_row()
[all …]
Dmacros_msa.h268 #define SW4(in0, in1, in2, in3, pdst, stride) { \ argument
272 SW(in3, (pdst) + 3 * stride); \
282 #define SD4(in0, in1, in2, in3, pdst, stride) { \ argument
286 SD(in3, (pdst) + 3 * stride); \
406 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) { \ argument
408 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
412 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
414 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
430 #define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) { \ argument
432 ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
[all …]
Dfwd_dct32x32_msa.c16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_load_butterfly() local
22 LD_SH4(input, src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly()
26 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x32_1d_column_load_butterfly()
30 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, in fdct8x32_1d_column_load_butterfly()
40 LD_SH4(input + (8 * src_stride), src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly()
44 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x32_1d_column_load_butterfly()
48 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, in fdct8x32_1d_column_load_butterfly()
59 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_even_store() local
65 LD_SH4(input, 8, in0, in1, in2, in3); in fdct8x32_1d_column_even_store()
67 BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15, in fdct8x32_1d_column_even_store()
[all …]
Dinv_txfm_msa.h18 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
35 ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
38 in4, in3); \
64 ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
95 #define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) { \ argument
105 ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, \
112 #define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) { \ argument
124 step1_m = __msa_ilvr_h(in3, in1); \
135 #define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) { \ argument
147 ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \
[all …]
Dfwd_txfm_msa.h32 #define VP9_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) { \ argument
39 BUTTERFLY_4(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m); \
59 #define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) { \ argument
62 SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15); \
64 AVE_SH4_SH(vec0_m, in0, vec1_m, in1, vec2_m, in2, vec3_m, in3, \
65 in0, in1, in2, in3); \
70 #define VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
79 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
130 #define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
138 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
Dtxfm_macros_msa.h34 #define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
41 DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, \
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h256 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument
261 SW(in3, (pdst) + 3 * stride); \
271 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument
276 SD(in3, (pdst) + 3 * stride); \
370 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument
373 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
378 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
381 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
568 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument
571 out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
[all …]
Didct_msa.c18 #define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
22 TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \
45 #define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
55 c_tmp2_m = __msa_mul_q_h(in3, const_cospi8sqrt2minus1_m); \
57 c_tmp2_m = in3 + c_tmp2_m; \
62 d_tmp2_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3); \
67 #define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \ argument
78 c_tmp2_m = in3 + ((in3 * const_cospi8sqrt2minus1_m) >> 16); \
81 d_tmp2_m = (in3 * sinpi8_sqrt2_m) >> 16; \
91 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in idct4x4_addblk_msa() local
[all …]
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_sse2.c50 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in vpx_fdct8x8_1_sse2() local
54 u1 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2()
59 in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); in vpx_fdct8x8_1_sse2()
64 in2 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2()
88 __m128i in0, in1, in2, in3; in vpx_fdct16x16_1_sse2() local
98 in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in vpx_fdct16x16_1_sse2()
101 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2()
107 in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); in vpx_fdct16x16_1_sse2()
111 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2()
117 in3 = _mm_load_si128((const __m128i *)(input + 11 * stride)); in vpx_fdct16x16_1_sse2()
[all …]
Dfwd_txfm_impl_sse2.h289 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in FDCT8x8_2D() local
298 in3 = _mm_slli_epi16(in3, 2); in FDCT8x8_2D()
315 const __m128i q3 = ADD_EPI16(in3, in4); in FDCT8x8_2D()
316 const __m128i q4 = SUB_EPI16(in3, in4); in FDCT8x8_2D()
521 in3 = _mm_unpackhi_epi64(tr1_2, tr1_6); in FDCT8x8_2D()
544 const __m128i sign_in3 = _mm_srai_epi16(in3, 15); in FDCT8x8_2D()
552 in3 = _mm_sub_epi16(in3, sign_in3); in FDCT8x8_2D()
560 in3 = _mm_srai_epi16(in3, 1); in FDCT8x8_2D()
569 store_output(&in3, (output + 3 * 8)); in FDCT8x8_2D()
/external/llvm/test/CodeGen/AMDGPU/
Dfma.f64.ll12 double addrspace(1)* %in2, double addrspace(1)* %in3) {
15 %r2 = load double, double addrspace(1)* %in3
25 <2 x double> addrspace(1)* %in2, <2 x double> addrspace(1)* %in3) {
28 %r2 = load <2 x double>, <2 x double> addrspace(1)* %in3
40 <4 x double> addrspace(1)* %in2, <4 x double> addrspace(1)* %in3) {
43 %r2 = load <4 x double>, <4 x double> addrspace(1)* %in3
Dimage-resource-id.ll196 %opencl.image2d_t addrspace(1)* %in3, ; read_only
200 %opencl.image2d_t addrspace(1)* %in3) #0
213 %opencl.image3d_t addrspace(1)* %in3, ; read_only
217 %opencl.image3d_t addrspace(1)* %in3) #0
231 %opencl.image2d_t addrspace(1)* %in3, ; write_only
235 %opencl.image2d_t addrspace(1)* %in3) #0
248 %opencl.image3d_t addrspace(1)* %in3, ; write_only
252 %opencl.image3d_t addrspace(1)* %in3) #0
266 %opencl.image2d_t addrspace(1)* %in3, ; read_only
270 %opencl.image2d_t addrspace(1)* %in3) #0
[all …]
Dfma.ll16 float addrspace(1)* %in2, float addrspace(1)* %in3) {
19 %r2 = load float, float addrspace(1)* %in3
33 <2 x float> addrspace(1)* %in2, <2 x float> addrspace(1)* %in3) {
36 %r2 = load <2 x float>, <2 x float> addrspace(1)* %in3
54 <4 x float> addrspace(1)* %in2, <4 x float> addrspace(1)* %in3) {
57 %r2 = load <4 x float>, <4 x float> addrspace(1)* %in3
/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_dct_ssse3.c53 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in vp9_fdct8x8_quant_ssse3() local
70 in3 = _mm_slli_epi16(in3, 2); in vp9_fdct8x8_quant_ssse3()
79 in[3] = &in3; in vp9_fdct8x8_quant_ssse3()
96 const __m128i q3 = _mm_add_epi16(in3, in4); in vp9_fdct8x8_quant_ssse3()
97 const __m128i q4 = _mm_sub_epi16(in3, in4); in vp9_fdct8x8_quant_ssse3()
245 in3 = _mm_unpackhi_epi64(tr1_2, tr1_6); in vp9_fdct8x8_quant_ssse3()
268 const __m128i sign_in3 = _mm_srai_epi16(in3, 15); in vp9_fdct8x8_quant_ssse3()
276 in3 = _mm_sub_epi16(in3, sign_in3); in vp9_fdct8x8_quant_ssse3()
284 in3 = _mm_srai_epi16(in3, 1); in vp9_fdct8x8_quant_ssse3()
/external/boringssl/src/crypto/aes/asm/
Daesni-x86_64.pl1160 my ($in0,$in1,$in2,$in3,$in4,$in5)=map("%xmm$_",(10..15));
1337 movdqu 0x50($inp),$in3
1350 pxor $inout5,$in3
1357 movdqu $in3,0x50($out)
1479 movdqu 0x30($inp),$in3
1482 pxor $rndkey0,$in3
1504 aesenclast $in3,$inout3
1506 movdqa 0x20(%rsp),$in3
1521 movdqa $in3,$inout2
1569 movdqu 0x30($inp),$in3
[all …]
/external/deqp/external/vulkancts/data/vulkan/glsl/es310/
Dconversions.test7987 input float in3 = [ 3.5 | 36.8125 | 1.0 | -8.25 | 2.0 | 0.0 | -0.5 | -20.125 ];
8001 out0 = vec4(in0, in1, in2, in3);
8014 input float in3 = [ 3.5 | 36.8125 | 1.0 | -8.25 | 2.0 | 0.0 | -0.5 | -20.125 ];
8028 out0 = ivec4(in0, in1, in2, in3);
8041 input float in3 = [ 3.5 | 36.8125 | 1.0 | -8.25 | 2.0 | 0.0 | -0.5 | -20.125 ];
8055 out0 = bvec4(in0, in1, in2, in3);
8068 input int in3 = [ -192 | -66 | 8 | -12 | 1 | 2 | 0 | 255 | 5 | 11 ];
8082 out0 = vec4(in0, in1, in2, in3);
8095 input int in3 = [ -192 | -66 | 8 | -12 | 1 | 2 | 0 | 255 | 5 | 11 ];
8109 out0 = ivec4(in0, in1, in2, in3);
[all …]

123