/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct4x4_msa.c | 19 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa() local 21 LD_SH4(input, src_stride, in0, in1, in2, in3); in vp9_fwht4x4_msa() 24 in3 -= in2; in vp9_fwht4x4_msa() 25 in4 = (in0 - in3) >> 1; in vp9_fwht4x4_msa() 28 in3 += in1; in vp9_fwht4x4_msa() 30 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vp9_fwht4x4_msa() 33 in1 -= in3; in vp9_fwht4x4_msa() 35 SUB2(in4, in2, in4, in3, in2, in3); in vp9_fwht4x4_msa() 36 in0 -= in3; in vp9_fwht4x4_msa() 39 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fwht4x4_msa() [all …]
|
D | vp9_fdct8x8_msa.c | 19 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local 21 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 22 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fht8x8_msa() 27 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 29 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa() 30 in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 31 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 35 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 37 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa() 38 in3, in4, in5, in6, in7); in vp9_fht8x8_msa() [all …]
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_idct4x4_msa.c | 19 v8i16 in0, in1, in2, in3; in vp9_iht4x4_16_add_msa() local 22 LD4x4_SH(input, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 23 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 28 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 30 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 31 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 35 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 37 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 38 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 42 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() [all …]
|
D | vp9_idct8x8_msa.c | 19 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local 22 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 24 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 33 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa() 34 in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 35 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 40 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 43 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa() 44 in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() [all …]
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | fdct4x4_msa.c | 18 v8i16 in0, in1, in2, in3, in4; in av1_fwht4x4_msa() local 20 LD_SH4(input, src_stride, in0, in1, in2, in3); in av1_fwht4x4_msa() 23 in3 -= in2; in av1_fwht4x4_msa() 24 in4 = (in0 - in3) >> 1; in av1_fwht4x4_msa() 27 in3 += in1; in av1_fwht4x4_msa() 29 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in av1_fwht4x4_msa() 32 in1 -= in3; in av1_fwht4x4_msa() 34 SUB2(in4, in2, in4, in3, in2, in3); in av1_fwht4x4_msa() 35 in0 -= in3; in av1_fwht4x4_msa() 38 SLLI_4V(in0, in1, in2, in3, 2); in av1_fwht4x4_msa() [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | dct_msa.c | 14 #define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument 18 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 20 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 70 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa() local 77 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 78 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 80 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa() 81 SLLI_4V(temp0, temp1, in1, in3, 3); in vp8_short_fdct4x4_msa() 85 temp0 = __msa_ilvr_h(in3, in1); in vp8_short_fdct4x4_msa() 93 PCKEV_H2_SH(out0, out0, out1, out1, in1, in3); in vp8_short_fdct4x4_msa() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | idct4x4_msa.c | 16 v8i16 in0, in1, in2, in3; in vpx_iwht4x4_16_add_msa() local 20 LD4x4_SH(input, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 21 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 24 UNPCK_R_SH_SW(in3, in3_r); in vpx_iwht4x4_16_add_msa() 47 in2, in3); in vpx_iwht4x4_16_add_msa() 48 ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); in vpx_iwht4x4_16_add_msa() 73 v8i16 in0, in1, in2, in3; in vpx_idct4x4_16_add_msa() local 76 LD4x4_SH(input, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() 78 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() 79 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() [all …]
|
D | idct8x8_msa.c | 16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local 19 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 22 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 25 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 31 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 34 SRARI_H4_SH(in0, in1, in2, in3, 5); in vpx_idct8x8_64_add_msa() 37 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); in vpx_idct8x8_64_add_msa() 44 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_12_add_msa() local 50 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_12_add_msa() [all …]
|
D | fwd_txfm_msa.c | 15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_fdct8x8_1_msa() local 18 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_fdct8x8_1_msa() 19 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa() 31 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local 44 LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in fdct8x16_1d_column() 46 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x16_1d_column() 50 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column() 55 SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); in fdct8x16_1d_column() 150 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct16x8_1d_row() local 153 LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row() [all …]
|
D | macros_msa.h | 205 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 210 SW(in3, (pdst) + 3 * stride); \ 220 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 225 SD(in3, (pdst) + 3 * stride); \ 323 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 326 ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 331 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 333 ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 481 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument 484 out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \ [all …]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | macros_msa.h | 265 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 270 SW(in3, (pdst) + 3 * stride); \ 280 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 285 SD(in3, (pdst) + 3 * stride); \ 416 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 419 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 423 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 425 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 442 #define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 445 ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ [all …]
|
/external/deqp/external/vulkancts/modules/vulkan/spirv_assembly/ |
D | vktSpvAsmTrinaryMinMaxTests.cpp | 196 static inline void runOpFunc (F f, void* out, const void* in1, const void* in2, const void* in3) in runOpFunc() argument 198 …interpret_cast<const T*>(in1), *reinterpret_cast<const T*>(in2), *reinterpret_cast<const T*>(in3)); in runOpFunc() 202 …const void* in1, const void* in2, const void* in3) { runOpFunc<deInt8> (min3<deInt8>, out, in1,… in minInt8() argument 203 …const void* in1, const void* in2, const void* in3) { runOpFunc<deInt8> (max3<deInt8>, out, in1,… in maxInt8() argument 204 …const void* in1, const void* in2, const void* in3) { runOpFunc<deInt8> (mid3<deInt8>, out, in1,… in midInt8() argument 205 …onst void* in1, const void* in2, const void* in3) { runOpFunc<deUint8> (min3<deUint8>, out, in1… in minUint8() argument 206 …onst void* in1, const void* in2, const void* in3) { runOpFunc<deUint8> (max3<deUint8>, out, in1… in maxUint8() argument 207 …onst void* in1, const void* in2, const void* in3) { runOpFunc<deUint8> (mid3<deUint8>, out, in1… in midUint8() argument 208 …onst void* in1, const void* in2, const void* in3) { runOpFunc<deInt16> (min3<deInt16>, out, in1… in minInt16() argument 209 …onst void* in1, const void* in2, const void* in3) { runOpFunc<deInt16> (max3<deInt16>, out, in1… in maxInt16() argument [all …]
|
/external/webp/src/dsp/ |
D | msa_macro.h | 168 #define SW4(in0, in1, in2, in3, pdst, stride) do { \ argument 176 SW(in3, ptmp); \ 202 #define SD4(in0, in1, in2, in3, pdst, stride) do { \ argument 210 SD(in3, ptmp); \ 303 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \ argument 305 ST_B2(RTYPE, in2, in3, pdst + 2 * stride, stride); \ 310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 312 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 338 #define ST_W4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \ argument 340 ST_W2(RTYPE, in2, in3, pdst + 2 * stride, stride); \ [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 238 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 243 SW(in3, (pdst) + 3 * stride); \ 253 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 258 SD(in3, (pdst) + 3 * stride); \ 352 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 355 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 360 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 362 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 549 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument 552 out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \ [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_txfm_sse2.c | 51 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in vpx_fdct8x8_1_sse2() local 55 u1 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2() 60 in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); in vpx_fdct8x8_1_sse2() 65 in2 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2() 89 __m128i in0, in1, in2, in3; in vpx_fdct16x16_1_sse2() local 98 in3 = _mm_load_si128((const __m128i *)(input + 1 * stride + 8)); in vpx_fdct16x16_1_sse2() 101 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2() 107 in3 = _mm_load_si128((const __m128i *)(input + 3 * stride + 8)); in vpx_fdct16x16_1_sse2() 111 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2() 117 in3 = _mm_load_si128((const __m128i *)(input + 5 * stride + 8)); in vpx_fdct16x16_1_sse2() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | aggregate_ops_gpu.cu.cc | 47 typename TTypes<T>::ConstFlat in3) { in operator ()() 48 Add3EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3); in operator ()() 57 typename TTypes<T>::ConstFlat in3, in operator ()() 59 Add4EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4); in operator ()() 68 typename TTypes<T>::ConstFlat in3, in operator ()() 71 Add5EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); in operator ()() 80 typename TTypes<T>::ConstFlat in3, in operator ()() 84 Add6EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); in operator ()() 93 typename TTypes<T>::ConstFlat in3, in operator ()() 98 Add7EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, in operator ()() [all …]
|
D | aggregate_ops_cpu.h | 45 typename TTypes<T>::ConstFlat in3) { 46 Add3EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3); 54 typename TTypes<T>::ConstFlat in3, 56 Add4EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4); 64 typename TTypes<T>::ConstFlat in3, 67 Add5EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); 75 typename TTypes<T>::ConstFlat in3, 79 Add6EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); 87 typename TTypes<T>::ConstFlat in3, 92 Add7EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, [all …]
|
D | aggregate_ops.h | 49 typename TTypes<T>::ConstFlat in3); 57 typename TTypes<T>::ConstFlat in3) { in Compute() 58 out.device(d) = in1 + in2 + in3; in Compute() 67 typename TTypes<T>::ConstFlat in3, 76 typename TTypes<T>::ConstFlat in3, in Compute() 78 out.device(d) = in1 + in2 + in3 + in4; in Compute() 87 typename TTypes<T>::ConstFlat in3, 97 typename TTypes<T>::ConstFlat in3, in Compute() 100 out.device(d) = in1 + in2 + in3 + in4 + in5; in Compute() 109 typename TTypes<T>::ConstFlat in3, [all …]
|
/external/python/cpython2/Modules/_ctypes/libffi/testsuite/libffi.call/ |
D | return_dbl2.c | 10 static double return_dbl(double dbl1, double dbl2, unsigned int in3, double dbl4) in return_dbl() argument 12 return dbl1 + dbl2 + in3 + dbl4; in return_dbl() 20 unsigned int in3; in main() local 27 values[2] = &in3; in main() 35 in3 = 255; in main() 39 printf ("%f vs %f\n", rdbl, return_dbl(dbl1, dbl2, in3, dbl4)); in main() 40 CHECK(rdbl == dbl1 + dbl2 + in3 + dbl4); in main()
|
D | return_dbl1.c | 10 static double return_dbl(double dbl1, float fl2, unsigned int in3, double dbl4) in return_dbl() argument 12 return dbl1 + fl2 + in3 + dbl4; in return_dbl() 21 unsigned int in3; in main() local 28 values[2] = &in3; in main() 36 in3 = 255; in main() 40 printf ("%f vs %f\n", rdbl, return_dbl(dbl1, fl2, in3, dbl4)); in main() 41 CHECK(rdbl == dbl1 + fl2 + in3 + dbl4); in main()
|
D | return_fl3.c | 10 static float return_fl(float fl1, float fl2, unsigned int in3, float fl4) in return_fl() argument 12 return fl1 + fl2 + in3 + fl4; in return_fl() 20 unsigned int in3; in main() local 27 values[2] = &in3; in main() 35 in3 = 255; in main() 39 printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, in3, fl4)); in main() 40 CHECK(rfl == fl1 + fl2 + in3 + fl4); in main()
|
/external/libffi/testsuite/libffi.call/ |
D | return_fl3.c | 10 static float return_fl(float fl1, float fl2, unsigned int in3, float fl4) in return_fl() argument 12 return fl1 + fl2 + in3 + fl4; in return_fl() 20 unsigned int in3; in main() local 27 values[2] = &in3; in main() 35 in3 = 255; in main() 39 printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, in3, fl4)); in main() 40 CHECK(rfl == fl1 + fl2 + in3 + fl4); in main()
|
D | return_dbl2.c | 10 static double return_dbl(double dbl1, double dbl2, unsigned int in3, double dbl4) in return_dbl() argument 12 return dbl1 + dbl2 + in3 + dbl4; in return_dbl() 20 unsigned int in3; in main() local 27 values[2] = &in3; in main() 35 in3 = 255; in main() 39 printf ("%f vs %f\n", rdbl, return_dbl(dbl1, dbl2, in3, dbl4)); in main() 40 CHECK(rdbl == dbl1 + dbl2 + in3 + dbl4); in main()
|
D | return_dbl1.c | 10 static double return_dbl(double dbl1, float fl2, unsigned int in3, double dbl4) in return_dbl() argument 12 return dbl1 + fl2 + in3 + dbl4; in return_dbl() 21 unsigned int in3; in main() local 28 values[2] = &in3; in main() 36 in3 = 255; in main() 40 printf ("%f vs %f\n", rdbl, return_dbl(dbl1, fl2, in3, dbl4)); in main() 41 CHECK(rdbl == dbl1 + fl2 + in3 + dbl4); in main()
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | fma.f64.ll | 12 double addrspace(1)* %in2, double addrspace(1)* %in3) { 15 %r2 = load double, double addrspace(1)* %in3 25 <2 x double> addrspace(1)* %in2, <2 x double> addrspace(1)* %in3) { 28 %r2 = load <2 x double>, <2 x double> addrspace(1)* %in3 40 <4 x double> addrspace(1)* %in2, <4 x double> addrspace(1)* %in3) { 43 %r2 = load <4 x double>, <4 x double> addrspace(1)* %in3 52 double addrspace(1)* %in2, double addrspace(1)* %in3) { 55 %r2 = load double, double addrspace(1)* %in3 65 double addrspace(1)* %in2, double addrspace(1)* %in3) { 68 %r2 = load double, double addrspace(1)* %in3 [all …]
|