Home
last modified time | relevance | path

Searched refs:v4i32 (Results 1 – 25 of 802) sorted by relevance

12345678910>>...33

/external/gemmlowp/fixedpoint/
Dfixedpoint_msa.h26 struct FixedPointRawTypeTraits<v4i32> {
38 inline v4i32 BitAnd(v4i32 a, v4i32 b) {
39 return reinterpret_cast<v4i32>(__builtin_msa_and_v(reinterpret_cast<v16u8>(a),
50 inline v4i32 BitOr(v4i32 a, v4i32 b) {
51 return reinterpret_cast<v4i32>(__builtin_msa_or_v(reinterpret_cast<v16u8>(a),
62 inline v4i32 BitXor(v4i32 a, v4i32 b) {
63 return reinterpret_cast<v4i32>(__builtin_msa_xor_v(reinterpret_cast<v16u8>(a),
74 inline v4i32 BitNot(v4i32 a) {
75 return reinterpret_cast<v4i32>(__builtin_msa_nor_v(reinterpret_cast<v16u8>(a),
86 inline v4i32 Add(v4i32 a, v4i32 b) {
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dvaddsplat.ll8 %v4i32 = type <4 x i32>
12 define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
13 %p = load %v4i32, %v4i32* %P
14 %r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
15 store %v4i32 %r, %v4i32* %S
23 define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
24 %p = load %v4i32, %v4i32* %P
25 %r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
26 store %v4i32 %r, %v4i32* %S
78 define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dvaddsplat.ll8 %v4i32 = type <4 x i32>
12 define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
13 %p = load %v4i32, %v4i32* %P
14 %r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
15 store %v4i32 %r, %v4i32* %S
23 define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
24 %p = load %v4i32, %v4i32* %P
25 %r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
26 store %v4i32 %r, %v4i32* %S
78 define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
[all …]
/external/libyuv/files/source/
Drotate_msa.cc40 out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \
41 out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \
42 out2 = (v16u8)__msa_ilvr_w((v4i32)in3, (v4i32)in2); \
43 out3 = (v16u8)__msa_ilvl_w((v4i32)in3, (v4i32)in2); \
132 res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0); in TransposeWx16_MSA()
133 res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0); in TransposeWx16_MSA()
137 res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1); in TransposeWx16_MSA()
138 res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1); in TransposeWx16_MSA()
142 res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA()
143 res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA()
[all …]
/external/libvpx/libvpx/third_party/libyuv/source/
Drotate_msa.cc40 out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \
41 out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \
42 out2 = (v16u8)__msa_ilvr_w((v4i32)in3, (v4i32)in2); \
43 out3 = (v16u8)__msa_ilvl_w((v4i32)in3, (v4i32)in2); \
132 res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0); in TransposeWx16_MSA()
133 res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0); in TransposeWx16_MSA()
137 res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1); in TransposeWx16_MSA()
138 res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1); in TransposeWx16_MSA()
142 res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA()
143 res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2); in TransposeWx16_MSA()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/PowerPC/
DPPCInstrAltivec.td408 [(int_ppc_altivec_mtvscr v4i32:$vB)]>;
419 [(set v4i32:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
422 [(set v4i32:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
425 [(set v4i32:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
446 [(int_ppc_altivec_stvewx v4i32:$rS, xoaddr:$dst)]>;
449 [(int_ppc_altivec_stvx v4i32:$rS, xoaddr:$dst)]>;
452 [(int_ppc_altivec_stvxl v4i32:$rS, xoaddr:$dst)]>;
476 v4i32, v4i32, v16i8>;
477 def VSEL : VA1a_Int_Ty<42, "vsel", int_ppc_altivec_vsel, v4i32>;
499 [(set v4i32:$vD, (add v4i32:$vA, v4i32:$vB))]>;
[all …]
DPPCInstrVSX.td515 int_ppc_vsx_xvcmpeqsp, v4i32, v4f32>;
521 int_ppc_vsx_xvcmpgesp, v4i32, v4f32>;
527 int_ppc_vsx_xvcmpgtsp, v4i32, v4f32>;
647 [(set v4i32:$XT, (int_ppc_vsx_xvcvdpsxws v2f64:$XB))]>;
655 [(set v4i32:$XT, (int_ppc_vsx_xvcvdpuxws v2f64:$XB))]>;
667 [(set v4i32:$XT, (fp_to_sint v4f32:$XB))]>;
674 [(set v4i32:$XT, (fp_to_uint v4f32:$XB))]>;
686 [(set v2f64:$XT, (int_ppc_vsx_xvcvsxwdp v4i32:$XB))]>;
690 [(set v4f32:$XT, (sint_to_fp v4i32:$XB))]>;
702 [(set v2f64:$XT, (int_ppc_vsx_xvcvuxwdp v4i32:$XB))]>;
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/CellSPU/useful-harnesses/
Dvecoperations.c5 typedef int v4i32 __attribute__((ext_vector_type(4))); typedef
50 void print_v4i32(const char *str, v4i32 v) { in print_v4i32()
76 v4i32 v4i32_shuffle_1(v4i32 a) { in v4i32_shuffle_1()
77 v4i32 c2 = a.yzwx; in v4i32_shuffle_1()
81 v4i32 v4i32_shuffle_2(v4i32 a) { in v4i32_shuffle_2()
82 v4i32 c2 = a.zwxy; in v4i32_shuffle_2()
86 v4i32 v4i32_shuffle_3(v4i32 a) { in v4i32_shuffle_3()
87 v4i32 c2 = a.wxyz; in v4i32_shuffle_3()
91 v4i32 v4i32_shuffle_4(v4i32 a) { in v4i32_shuffle_4()
92 v4i32 c2 = a.xyzw; in v4i32_shuffle_4()
[all …]
/external/llvm/lib/Target/PowerPC/
DPPCInstrAltivec.td407 [(int_ppc_altivec_mtvscr v4i32:$vB)]>;
418 [(set v4i32:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
421 [(set v4i32:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
424 [(set v4i32:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
445 [(int_ppc_altivec_stvewx v4i32:$rS, xoaddr:$dst)]>;
448 [(int_ppc_altivec_stvx v4i32:$rS, xoaddr:$dst)]>;
451 [(int_ppc_altivec_stvxl v4i32:$rS, xoaddr:$dst)]>;
475 v4i32, v4i32, v16i8>;
476 def VSEL : VA1a_Int_Ty<42, "vsel", int_ppc_altivec_vsel, v4i32>;
498 [(set v4i32:$vD, (add v4i32:$vA, v4i32:$vB))]>;
[all …]
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/
DCellSDKIntrinsics.td43 [(set (v4i32 VECREG:$rT), (int_spu_si_mpy (v8i16 VECREG:$rA),
49 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyu (v8i16 VECREG:$rA),
55 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyi (v8i16 VECREG:$rA),
61 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyui (v8i16 VECREG:$rA),
67 [(set (v4i32 VECREG:$rT), (int_spu_si_mpya (v8i16 VECREG:$rA),
74 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyh (v4i32 VECREG:$rA),
80 [(set (v4i32 VECREG:$rT), (int_spu_si_mpys (v8i16 VECREG:$rA),
86 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyhh (v8i16 VECREG:$rA),
92 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyhha (v8i16 VECREG:$rA),
100 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyhhu (v8i16 VECREG:$rA),
[all …]
/external/webp/src/dsp/
Dmsa_macro.h27 #define ADDVI_W(a, b) __msa_addvi_w((v4i32)a, b)
30 #define SRAI_W(a, b) __msa_srai_w((v4i32)a, b)
32 #define SLLI_B(a, b) __msa_slli_b((v4i32)a, b)
58 #define LD_SW(...) LD_W(v4i32, __VA_ARGS__)
70 #define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
275 #define LD_SW2(...) LD_W2(v4i32, __VA_ARGS__)
282 #define LD_SW3(...) LD_W3(v4i32, __VA_ARGS__)
289 #define LD_SW4(...) LD_W4(v4i32, __VA_ARGS__)
329 #define ST_SW2(...) ST_W2(v4i32, __VA_ARGS__)
336 #define ST_SW3(...) ST_W3(v4i32, __VA_ARGS__)
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h29 #define LD_SW(...) LD_W(v4i32, __VA_ARGS__)
40 #define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
433 out0_m = __msa_copy_u_w((v4i32)in0, idx0); \
434 out1_m = __msa_copy_u_w((v4i32)in0, idx1); \
435 out2_m = __msa_copy_u_w((v4i32)in1, idx2); \
436 out3_m = __msa_copy_u_w((v4i32)in1, idx3); \
652 #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
666 out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \
667 out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \
708 out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dsi-spill-cf.ll12 %tmp = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 16)
13 %tmp1 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 32)
14 %tmp2 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 80)
15 %tmp3 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 84)
16 %tmp4 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 88)
17 %tmp5 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 96)
18 %tmp6 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 100)
19 %tmp7 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 104)
20 %tmp8 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 112)
21 %tmp9 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 116)
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dmacros_msa.h24 #define LD_SW(...) LD_V(v4i32, __VA_ARGS__)
30 #define ST_SW(...) ST_V(v4i32, __VA_ARGS__)
243 #define LD_SW2(...) LD_V2(v4i32, __VA_ARGS__)
321 #define ST_SW2(...) ST_V2(v4i32, __VA_ARGS__)
378 out0_m = __msa_copy_u_w((v4i32)in, 0); \
379 out1_m = __msa_copy_u_w((v4i32)in, 1); \
401 out0_m = __msa_copy_u_w((v4i32)in0, idx0); \
402 out1_m = __msa_copy_u_w((v4i32)in0, idx1); \
403 out2_m = __msa_copy_u_w((v4i32)in1, idx2); \
404 out3_m = __msa_copy_u_w((v4i32)in1, idx3); \
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dllvm.amdgcn.image.atomic.ll10 …%orig = call i32 @llvm.amdgcn.image.atomic.swap.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1…
44 …%orig = call i32 @llvm.amdgcn.image.atomic.cmpswap.v4i32(i32 %2, i32 %3, <4 x i32> %1, <8 x i32> %…
55 …%orig = call i32 @llvm.amdgcn.image.atomic.add.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 …
66 …%orig = call i32 @llvm.amdgcn.image.atomic.sub.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 …
92 …%t0 = call i32 @llvm.amdgcn.image.atomic.smin.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0…
93 …%t1 = call i32 @llvm.amdgcn.image.atomic.umin.v4i32(i32 %t0, <4 x i32> %1, <8 x i32> %0, i1 0, i1 …
94 …%t2 = call i32 @llvm.amdgcn.image.atomic.smax.v4i32(i32 %t1, <4 x i32> %1, <8 x i32> %0, i1 0, i1 …
95 …%t3 = call i32 @llvm.amdgcn.image.atomic.umax.v4i32(i32 %t2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 …
96 …%t4 = call i32 @llvm.amdgcn.image.atomic.and.v4i32(i32 %t3, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0…
97 …%t5 = call i32 @llvm.amdgcn.image.atomic.or.v4i32(i32 %t4, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0,…
[all …]
Dllvm.SI.image.sample.ll9 …%r = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> unde…
23 …%r = call <4 x float> @llvm.SI.image.sample.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> u…
37 …%r = call <4 x float> @llvm.SI.image.sample.d.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> un…
51 …%r = call <4 x float> @llvm.SI.image.sample.d.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32>…
65 …%r = call <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> un…
79 …%r = call <4 x float> @llvm.SI.image.sample.b.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> un…
93 …%r = call <4 x float> @llvm.SI.image.sample.b.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32>…
107 …%r = call <4 x float> @llvm.SI.image.sample.lz.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> u…
121 …%r = call <4 x float> @llvm.SI.image.sample.cd.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> u…
135 …%r = call <4 x float> @llvm.SI.image.sample.cd.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32…
[all …]
Dllvm.SI.image.sample.o.ll9 …%r = call <4 x float> @llvm.SI.image.sample.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> un…
23 …%r = call <4 x float> @llvm.SI.image.sample.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32>…
37 …%r = call <4 x float> @llvm.SI.image.sample.d.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> …
51 …%r = call <4 x float> @llvm.SI.image.sample.d.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i3…
65 …%r = call <4 x float> @llvm.SI.image.sample.l.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> …
79 …%r = call <4 x float> @llvm.SI.image.sample.b.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> …
93 …%r = call <4 x float> @llvm.SI.image.sample.b.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i3…
107 …%r = call <4 x float> @llvm.SI.image.sample.lz.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32>…
121 …%r = call <4 x float> @llvm.SI.image.sample.cd.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32>…
135 …%r = call <4 x float> @llvm.SI.image.sample.cd.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i…
[all …]
/external/llvm/test/Transforms/InstCombine/
D2012-04-23-Neon-Intrinsics.ll5 …%a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> zeroinitializer) noun…
13 …%a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> <i16 1, i16 1, i16 1,…
22 …%a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x …
30 …%b = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <…
38 …%b = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <…
46 …%a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x …
50 ; CHECK-NEXT: %a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 2, i16 2, i16 2, …
56 …%a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x …
64 declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
65 declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/
Dscalarize.ll13 declare %i4 @llvm.bswap.v4i32(%i4)
16 declare %i4 @llvm.cttz.v4i32(%i4)
24 ; CHECK32: cost of 1 {{.*}}bswap.v4i32
25 ; CHECK64: cost of 1 {{.*}}bswap.v4i32
26 %r2 = call %i4 @llvm.bswap.v4i32(%i4 undef)
31 ; CHECK32: cost of 14 {{.*}}cttz.v4i32
32 ; CHECK64: cost of 14 {{.*}}cttz.v4i32
33 %r4 = call %i4 @llvm.cttz.v4i32(%i4 undef)
/external/llvm/test/Analysis/CostModel/X86/
Dscalarize.ll13 declare %i4 @llvm.bswap.v4i32(%i4)
16 declare %i4 @llvm.cttz.v4i32(%i4)
24 ; CHECK32: cost of 1 {{.*}}bswap.v4i32
25 ; CHECK64: cost of 1 {{.*}}bswap.v4i32
26 %r2 = call %i4 @llvm.bswap.v4i32(%i4 undef)
31 ; CHECK32: cost of 12 {{.*}}cttz.v4i32
32 ; CHECK64: cost of 12 {{.*}}cttz.v4i32
33 %r4 = call %i4 @llvm.cttz.v4i32(%i4 undef)
/external/libaom/libaom/aom_dsp/mips/
Dmacros_msa.h30 #define LD_SW(...) LD_W(v4i32, __VA_ARGS__)
40 #define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
506 out0_m = __msa_copy_u_w((v4i32)in, 0); \
507 out1_m = __msa_copy_u_w((v4i32)in, 1); \
529 out0_m = __msa_copy_u_w((v4i32)in0, idx0); \
530 out1_m = __msa_copy_u_w((v4i32)in0, idx1); \
531 out2_m = __msa_copy_u_w((v4i32)in1, idx2); \
532 out3_m = __msa_copy_u_w((v4i32)in1, idx3); \
637 #define SLDI_B2_0_SW(...) SLDI_B2_0(v4i32, __VA_ARGS__)
761 #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
[all …]
/external/gemmlowp/internal/
Doutput_msa.h40 v4i32 tmp = __builtin_msa_sat_s_w(input.reg[0], 8);
42 tmp = reinterpret_cast<v4i32>(__builtin_msa_pckev_h(
51 tmp = reinterpret_cast<v4i32>(__builtin_msa_pckev_b(
73 v4i32 tmp_lo = __builtin_msa_sat_s_w(input.reg[0], 8);
74 v4i32 tmp_hi = __builtin_msa_sat_s_w(input.reg[1], 8);
77 tmp_lo = reinterpret_cast<v4i32>(__builtin_msa_pckev_h(
86 tmp_lo = reinterpret_cast<v4i32>(__builtin_msa_pckev_b(
97 v4i32 tmp0 = __builtin_msa_sat_s_w(in0, 8); \
98 v4i32 tmp1 = __builtin_msa_sat_s_w(in1, 8); \
99 v4i32 tmp2 = __builtin_msa_sat_s_w(in2, 8); \
[all …]
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/AArch64/
DAArch64GenDAGISel.inc207 /* 324*/ OPC_CheckChild0Type, MVT::v4i32,
223 …// Src: (st (vector_extract:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, 0:{ *:[iPTR] }), (ro_Wind…
224 …// Dst: (STRSroW (EXTRACT_SUBREG:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, ssub:{ *:[i32] }), G…
233 …// Src: (st (vector_extract:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, 0:{ *:[iPTR] }), (ro_Xind…
234 …// Dst: (STRSroX (EXTRACT_SUBREG:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, ssub:{ *:[i32] }), G…
243 …// Src: (st (vector_extract:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, 0:{ *:[iPTR] }), (am_inde…
244 …// Dst: (STRSui (EXTRACT_SUBREG:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, ssub:{ *:[i32] }), GP…
253 …// Src: (st (vector_extract:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, 0:{ *:[iPTR] }), (am_unsc…
254 …// Dst: (STURSi (EXTRACT_SUBREG:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, ssub:{ *:[i32] }), GP…
273 …// Src: (st (vector_extract:{ *:[i32] } VecListOne128:{ *:[v4i32] }:$Vt, (imm:{ *:[i64] })<<P:Pred…
[all …]
/external/clang/test/CodeGen/
Dmips-vector-arg.c9 typedef int v4i32 __attribute__ ((__vector_size__ (16))); typedef
24 extern test_v4i32_2(v4i32, int, v4i32);
25 void test_v4i32(v4i32 a1, int a2, v4i32 a3) { in test_v4i32()
/external/libvpx/libvpx/vp8/encoder/mips/msa/
Ddct_msa.c48 v4i32 tmp0_m; \
49 v4i32 one_m = __msa_ldi_w(1); \
60 v4i32 tmp0_m; \
62 v4i32 one_m = __msa_ldi_w(1); \
74 v4i32 out0, out1, out2, out3; in vp8_short_fdct4x4_msa()
87 out0 = (v4i32)__msa_ilvev_h(zero, in1); in vp8_short_fdct4x4_msa()
89 out1 = __msa_splati_w((v4i32)coeff, 0); in vp8_short_fdct4x4_msa()
106 out1 = __msa_splati_w((v4i32)coeff, 1); in vp8_short_fdct4x4_msa()
110 out1 += (v4i32)temp1; in vp8_short_fdct4x4_msa()
121 v4i32 vec0_w, vec1_w, vec2_w, vec3_w; in vp8_short_fdct8x4_msa()
[all …]

12345678910>>...33