/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct4x4_msa.c | 19 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa() local 21 LD_SH4(input, src_stride, in0, in1, in2, in3); in vp9_fwht4x4_msa() 24 in3 -= in2; in vp9_fwht4x4_msa() 26 SUB2(in4, in1, in4, in2, in1, in2); in vp9_fwht4x4_msa() 27 in0 -= in2; in vp9_fwht4x4_msa() 30 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vp9_fwht4x4_msa() 32 in0 += in2; in vp9_fwht4x4_msa() 35 SUB2(in4, in2, in4, in3, in2, in3); in vp9_fwht4x4_msa() 37 in1 += in2; in vp9_fwht4x4_msa() 39 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fwht4x4_msa() [all …]
|
D | vp9_fdct8x8_msa.c | 19 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local 21 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 22 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fht8x8_msa() 27 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 29 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa() 31 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 35 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 37 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa() 39 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 43 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() [all …]
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_idct4x4_msa.c | 19 v8i16 in0, in1, in2, in3; in vp9_iht4x4_16_add_msa() local 22 LD4x4_SH(input, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 23 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 28 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 30 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 31 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 35 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 37 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 38 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 42 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() [all …]
|
D | vp9_idct8x8_msa.c | 19 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local 22 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 24 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 33 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa() 35 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 40 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 43 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa() 45 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_iht8x8_64_add_msa() 50 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_iht8x8_64_add_msa() [all …]
|
/external/llvm-project/pstl/test/std/algorithms/alg.nonmodifying/ |
D | mismatch.pass.cpp | 63 Sequence<T> in2(in); in test_mismatch_by_type() local 64 invoke_on_all_policies(test_mismatch(), in.begin(), in.end(), in2.begin(), in2.end()); in test_mismatch_by_type() 65 invoke_on_all_policies(test_mismatch(), in.begin(), in.end(), in2.begin()); in test_mismatch_by_type() 72 … invoke_on_all_policies(test_mismatch(), in.begin(), in.end(), in2.begin(), in2.end()); in test_mismatch_by_type() 73 invoke_on_all_policies(test_mismatch(), in.begin(), in.end(), in2.begin()); in test_mismatch_by_type() 79 in2[size / idx_for_2] = val; in test_mismatch_by_type() 80 … invoke_on_all_policies(test_mismatch(), in.cbegin(), in.cend(), in2.cbegin(), in2.cend()); in test_mismatch_by_type() 81 invoke_on_all_policies(test_mismatch(), in.cbegin(), in.cend(), in2.cbegin()); in test_mismatch_by_type() 85 Sequence<T> in2(100, [](size_t v) -> T { return T(v); }); in test_mismatch_by_type() local 86 invoke_on_all_policies(test_mismatch(), in2.begin(), in2.end(), in.begin(), in.end()); in test_mismatch_by_type() [all …]
|
D | find_first_of.pass.cpp | 62 Sequence<T> in2(max_n2, [](std::size_t) { return T(0); }); in test() local 68 …ke_on_all_policies(test_one_policy(), in1.begin(), in1.begin() + n1, in2.data(), in2.data() + n2, … in test() 70 in2[n2 / 2] = T(1); in test() 71 …ke_on_all_policies(test_one_policy(), in1.cbegin(), in1.cbegin() + n1, in2.data(), in2.data() + n2, in test() 76 in2[2 * n2 / 3] = T(1); in test() 77 … invoke_on_all_policies(test_one_policy(), in1.cbegin(), in1.cbegin() + n1, in2.begin(), in test() 78 in2.begin() + n2, pred); in test() 79 in2[2 * n2 / 3] = T(0); in test() 81 in2[n2 / 2] = T(0); in test()
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | fdct4x4_msa.c | 18 v8i16 in0, in1, in2, in3, in4; in av1_fwht4x4_msa() local 20 LD_SH4(input, src_stride, in0, in1, in2, in3); in av1_fwht4x4_msa() 23 in3 -= in2; in av1_fwht4x4_msa() 25 SUB2(in4, in1, in4, in2, in1, in2); in av1_fwht4x4_msa() 26 in0 -= in2; in av1_fwht4x4_msa() 29 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in av1_fwht4x4_msa() 31 in0 += in2; in av1_fwht4x4_msa() 34 SUB2(in4, in2, in4, in3, in2, in3); in av1_fwht4x4_msa() 36 in1 += in2; in av1_fwht4x4_msa() 38 SLLI_4V(in0, in1, in2, in3, 2); in av1_fwht4x4_msa() [all …]
|
/external/llvm-project/pstl/test/std/algorithms/alg.sorting/alg.lex.comparison/ |
D | lexicographical_compare.pass.cpp | 52 Sequence<T2> in2(2 * max_n, [](std::size_t k) { return T2(k); }); in test() local 57 …invoke_on_all_policies(test_one_policy(), in1.cbegin(), in1.cbegin() + max_n, in2.cbegin() + 3 * m… in test() 58 in2.cbegin() + 5 * max_n / 10); in test() 62 …invoke_on_all_policies(test_one_policy(), in1.begin(), in1.begin() + max_n, in2.cbegin(), in2.cbeg… in test() 64 … invoke_on_all_policies(test_one_policy(), in1.begin(), in1.begin() + max_n, in2.begin() + max_n2, in test() 65 in2.begin() + 3 * max_n2, pred); in test() 69 …invoke_on_all_policies(test_one_policy(), in1.cbegin(), in1.cbegin() + max_n, in2.begin(), in2.beg… in test() 76 …invoke_on_all_policies(test_one_policy(), in1.begin(), in1.begin() + n1, in2.begin(), in2.begin() … in test() 81 in2[ind] = T2(-1); in test() 82 …invoke_on_all_policies(test_one_policy(), in1.begin(), in1.begin() + n1, in2.begin(), in2.begin() … in test() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_txfm_msa.c | 15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_fdct8x8_1_msa() local 18 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_fdct8x8_1_msa() 19 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa() 20 ADD2(in0, in2, in4, in6, in0, in4); in vpx_fdct8x8_1_msa() 31 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local 44 LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in fdct8x16_1d_column() 46 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x16_1d_column() 50 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column() 55 SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); in fdct8x16_1d_column() 150 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct16x8_1d_row() local [all …]
|
D | idct4x4_msa.c | 16 v8i16 in0, in1, in2, in3; in vpx_iwht4x4_16_add_msa() local 20 LD4x4_SH(input, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 21 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 23 UNPCK_R_SH_SW(in2, in2_r); in vpx_iwht4x4_16_add_msa() 47 in2, in3); in vpx_iwht4x4_16_add_msa() 48 ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); in vpx_iwht4x4_16_add_msa() 73 v8i16 in0, in1, in2, in3; in vpx_idct4x4_16_add_msa() local 76 LD4x4_SH(input, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() 78 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() 79 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() [all …]
|
D | idct8x8_msa.c | 16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local 19 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 22 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 25 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 31 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 34 SRARI_H4_SH(in0, in1, in2, in3, 5); in vpx_idct8x8_64_add_msa() 37 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); in vpx_idct8x8_64_add_msa() 44 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_12_add_msa() local 50 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_12_add_msa() [all …]
|
D | macros_msa.h | 205 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 209 SW(in2, (pdst) + 2 * stride); \ 220 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 224 SD(in2, (pdst) + 2 * stride); \ 323 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 326 ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 331 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 333 ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 481 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \ argument 484 out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \ [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | dct_msa.c | 14 #define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument 18 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 20 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 70 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa() local 77 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 78 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 80 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa() 83 in2 = temp0 - temp1; in vp8_short_fdct4x4_msa() 94 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 96 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa() [all …]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | macros_msa.h | 265 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 269 SW(in2, (pdst) + 2 * stride); \ 280 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 284 SD(in2, (pdst) + 2 * stride); \ 416 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 419 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 423 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 425 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 442 #define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 445 ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ [all …]
|
/external/deqp/external/vulkancts/modules/vulkan/spirv_assembly/ |
D | vktSpvAsmTrinaryMinMaxTests.cpp | 196 static inline void runOpFunc (F f, void* out, const void* in1, const void* in2, const void* in3) in runOpFunc() argument 198 …*reinterpret_cast<T*>(out) = f(*reinterpret_cast<const T*>(in1), *reinterpret_cast<const T*>(in2),… in runOpFunc() 202 …void* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deInt8> (min3<deInt8>, … in minInt8() argument 203 …void* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deInt8> (max3<deInt8>, … in maxInt8() argument 204 …void* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deInt8> (mid3<deInt8>, … in midInt8() argument 205 …oid* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deUint8> (min3<deUint8>,… in minUint8() argument 206 …oid* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deUint8> (max3<deUint8>,… in maxUint8() argument 207 …oid* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deUint8> (mid3<deUint8>,… in midUint8() argument 208 …oid* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deInt16> (min3<deInt16>,… in minInt16() argument 209 …oid* out, const void* in1, const void* in2, const void* in3) { runOpFunc<deInt16> (max3<deInt16>,… in maxInt16() argument [all …]
|
/external/webp/src/dsp/ |
D | msa_macro.h | 168 #define SW4(in0, in1, in2, in3, pdst, stride) do { \ argument 174 SW(in2, ptmp); \ 179 #define SW3(in0, in1, in2, pdst, stride) do { \ argument 185 SW(in2, ptmp); \ 202 #define SD4(in0, in1, in2, in3, pdst, stride) do { \ argument 208 SD(in2, ptmp); \ 303 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \ argument 305 ST_B2(RTYPE, in2, in3, pdst + 2 * stride, stride); \ 310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 312 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 238 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 242 SW(in2, (pdst) + 2 * stride); \ 253 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 257 SD(in2, (pdst) + 2 * stride); \ 352 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 355 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 360 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 362 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 549 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument 552 out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \ [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | aggregate_ops_gpu.cu.cc | 37 typename TTypes<T>::ConstFlat in2) { in operator ()() 38 Add2EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2); in operator ()() 46 typename TTypes<T>::ConstFlat in2, in operator ()() 48 Add3EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3); in operator ()() 56 typename TTypes<T>::ConstFlat in2, in operator ()() 59 Add4EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4); in operator ()() 67 typename TTypes<T>::ConstFlat in2, in operator ()() 71 Add5EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); in operator ()() 79 typename TTypes<T>::ConstFlat in2, in operator ()() 84 Add6EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); in operator ()() [all …]
|
D | aggregate_ops_cpu.h | 36 typename TTypes<T>::ConstFlat in2) { 37 Add2EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2); 44 typename TTypes<T>::ConstFlat in2, 46 Add3EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3); 53 typename TTypes<T>::ConstFlat in2, 56 Add4EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4); 63 typename TTypes<T>::ConstFlat in2, 67 Add5EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); 74 typename TTypes<T>::ConstFlat in2, 79 Add6EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); [all …]
|
D | aggregate_ops.h | 32 typename TTypes<T>::ConstFlat in2); 39 typename TTypes<T>::ConstFlat in2) { in Compute() 40 out.device(d) = in1 + in2; in Compute() 48 typename TTypes<T>::ConstFlat in2, 56 typename TTypes<T>::ConstFlat in2, in Compute() 58 out.device(d) = in1 + in2 + in3; in Compute() 66 typename TTypes<T>::ConstFlat in2, 75 typename TTypes<T>::ConstFlat in2, in Compute() 78 out.device(d) = in1 + in2 + in3 + in4; in Compute() 86 typename TTypes<T>::ConstFlat in2, [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_txfm_sse2.c | 50 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); in vpx_fdct8x8_1_sse2() local 55 u1 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2() 59 in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); in vpx_fdct8x8_1_sse2() 65 in2 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2() 69 sum = _mm_add_epi16(sum, in2); in vpx_fdct8x8_1_sse2() 89 __m128i in0, in1, in2, in3; in vpx_fdct16x16_1_sse2() local 97 in2 = _mm_load_si128((const __m128i *)(input + 1 * stride + 0)); in vpx_fdct16x16_1_sse2() 101 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2() 106 in2 = _mm_load_si128((const __m128i *)(input + 3 * stride + 0)); in vpx_fdct16x16_1_sse2() 111 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2() [all …]
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_device.cu | 27 …Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in… in CPUContext() 49 const Eigen::Tensor<float, 3>& in2() const { return in2_; } in in2() function 70 …::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1… in GPUContext() 92 const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; } in in2() function 116 …context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1(… in test_contextual_eval() 122 …context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + cont… in test_forced_contextual_eval() 129 context->out().device(context->device()) += context->in1() + context->in2() * 3.14f; in test_compound_assignment() 145 …e).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims); in test_contraction() 182 Eigen::Tensor<float, 3> in2(40,50,70); in test_cpu() local 186 in2 = in2.random() + in2.constant(10.0f); in test_cpu() [all …]
|
D | cxx11_tensor_forced_eval_sycl.cpp | 32 Eigen::Tensor<float, 3> in2(tensorRange); in test_forced_eval_sycl() local 36 …float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*size… in test_forced_eval_sycl() 40 in2 = in2.random() + in2.constant(10.0f); in test_forced_eval_sycl() 47 …sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(floa… in test_forced_eval_sycl() 55 (in1(i, j, k) + in2(i, j, k)) * in2(i, j, k)); in test_forced_eval_sycl()
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | fcmp64.ll | 7 double addrspace(1)* %in2) { 9 %r1 = load double, double addrspace(1)* %in2 19 double addrspace(1)* %in2) { 21 %r1 = load double, double addrspace(1)* %in2 31 double addrspace(1)* %in2) { 33 %r1 = load double, double addrspace(1)* %in2 43 double addrspace(1)* %in2) { 45 %r1 = load double, double addrspace(1)* %in2 55 double addrspace(1)* %in2) { 57 %r1 = load double, double addrspace(1)* %in2 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | fma.f64.ll | 12 double addrspace(1)* %in2, double addrspace(1)* %in3) { 14 %r1 = load double, double addrspace(1)* %in2 25 <2 x double> addrspace(1)* %in2, <2 x double> addrspace(1)* %in3) { 27 %r1 = load <2 x double>, <2 x double> addrspace(1)* %in2 40 <4 x double> addrspace(1)* %in2, <4 x double> addrspace(1)* %in3) { 42 %r1 = load <4 x double>, <4 x double> addrspace(1)* %in2 52 double addrspace(1)* %in2, double addrspace(1)* %in3) { 54 %r1 = load double, double addrspace(1)* %in2 65 double addrspace(1)* %in2, double addrspace(1)* %in3) { 67 %r1 = load double, double addrspace(1)* %in2 [all …]
|