/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/GlobalISel/ |
D | legalize-add-v512.mir | 52 ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]] 54 …:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD… 92 ; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]] 94 …_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD… 132 ; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]] 134 …_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD… 168 ; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]] 170 …:_(<8 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD… 216 ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]] 219 ; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_dct32x32_msa.c | 89 ADD2(vec4, vec5, vec7, vec6, vec0, vec1); in fdct8x32_1d_column_even_store() 105 ADD2(in0, in1, in2, in3, vec0, vec7); in fdct8x32_1d_column_even_store() 125 ADD2(in3, in2, in0, in1, vec3, vec4); in fdct8x32_1d_column_even_store() 186 ADD2(in27, in26, in25, in24, in23, in20); in fdct8x32_1d_column_odd_store() 206 ADD2(in26, in27, in24, in25, in22, in21); in fdct8x32_1d_column_odd_store() 220 ADD2(in28, in29, in31, in30, in16, in19); in fdct8x32_1d_column_odd_store() 239 ADD2(in29, in28, in30, in31, in17, in18); in fdct8x32_1d_column_odd_store() 347 ADD2(vec4, vec5, vec7, vec6, vec0, vec1); in fdct8x32_1d_row_even_4x() 364 ADD2(in0, in1, in2, in3, vec0, vec7); in fdct8x32_1d_row_even_4x() 384 ADD2(in3, in2, in0, in1, vec3, vec4); in fdct8x32_1d_row_even_4x() [all …]
|
D | idct16x16_msa.c | 388 ADD2(res0, out0, res1, out1, res0, res1); in vpx_iadst16_1d_columns_addblk_msa() 405 ADD2(res8, out8, res9, out9, res8, res9); in vpx_iadst16_1d_columns_addblk_msa() 420 ADD2(res4, out4, res5, out5, res4, res5); in vpx_iadst16_1d_columns_addblk_msa() 432 ADD2(res12, out12, res13, out13, res12, res13); in vpx_iadst16_1d_columns_addblk_msa() 445 ADD2(res6, out6, res7, out7, res6, res7); in vpx_iadst16_1d_columns_addblk_msa() 456 ADD2(res10, out10, res11, out11, res10, res11); in vpx_iadst16_1d_columns_addblk_msa() 469 ADD2(res2, out2, res3, out3, res2, res3); in vpx_iadst16_1d_columns_addblk_msa() 480 ADD2(res14, out14, res15, out15, res14, res15); in vpx_iadst16_1d_columns_addblk_msa()
|
D | fwd_txfm_msa.c | 20 ADD2(in0, in2, in4, in6, in0, in4); in vpx_fdct8x8_1_msa() 121 ADD2(stp34, stp25, stp33, stp22, in13, in10); in fdct8x16_1d_column() 264 ADD2(in0, in2, in4, in6, in0, in4); in vpx_fdct16x16_1_msa()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CallSiteSplitting/ |
D | split-loop.ll | 35 ; CHECK-NEXT: [[ADD2:%.*]] = add i16 [[S]], 10 62 ; CHECK-NEXT: [[ADD2:%.*]] = add i16 [[ADD]], 10 68 ; CHECK-NEXT: ret i16 [[ADD2]]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | PR36280.ll | 13 ; CHECK-NEXT: [[ADD2:%.*]] = fadd float [[MUL2]], [[ADD1]] 14 ; CHECK-NEXT: ret float [[ADD2]]
|
D | return.ll | 24 ; CHECK-NEXT: [[ADD2:%.*]] = fadd double [[TMP3]], [[TMP4]] 25 ; CHECK-NEXT: ret double [[ADD2]]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | machine-cse-mid-pipeline.mir | 197 ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD0]], [[ADD1]] 198 ; CHECK-NEXT: $w0 = COPY [[ADD2]](s32) 231 ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD0]], [[ADD1]] 232 ; CHECK-NEXT: $w0 = COPY [[ADD2]](s32) 264 ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD0]], [[ADD1]] 265 ; CHECK-NEXT: $w0 = COPY [[ADD2]](s32)
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | denoising_msa.c | 111 ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1); in vp8_denoiser_filter_msa() 113 ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h); in vp8_denoiser_filter_msa() 159 ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1); in vp8_denoiser_filter_msa() 161 ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h); in vp8_denoiser_filter_msa() 224 ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3); in vp8_denoiser_filter_msa() 237 ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3); in vp8_denoiser_filter_msa() 258 ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3); in vp8_denoiser_filter_msa() 271 ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3); in vp8_denoiser_filter_msa()
|
D | temporal_filter_msa.c | 66 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_16size_msa() 104 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_16size_msa() 186 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_8size_msa() 226 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_8size_msa()
|
D | encodeopt_msa.c | 149 ADD2(err0, err_dup0, err1, err_dup1, err0, err1); in vp8_mbuverror_msa() 161 ADD2(err0, err_dup0, err1, err_dup1, err0, err1); in vp8_mbuverror_msa()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/ |
D | mixed-fast-nonfast-fp.ll | 29 ; CHECK-NEXT: [[ADD2:%.*]] = fadd reassoc float [[MUL2]], [[MUL4]] 30 ; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[ADD1]], [[ADD2]]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | add2.ll | 219 ; CHECK-NEXT: [[ADD2:%.*]] = mul nsw i16 [[X:%.*]], 3 220 ; CHECK-NEXT: ret i16 [[ADD2]] 229 ; CHECK-NEXT: [[ADD2:%.*]] = mul nsw i16 [[X:%.*]], 9 230 ; CHECK-NEXT: ret i16 [[ADD2]] 239 ; CHECK-NEXT: [[ADD2:%.*]] = mul nsw i16 [[X:%.*]], 9 240 ; CHECK-NEXT: ret i16 [[ADD2]] 294 ; CHECK-NEXT: [[ADD2:%.*]] = shl i16 [[X:%.*]], 15 295 ; CHECK-NEXT: ret i16 [[ADD2]]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SimplifyCFG/ |
D | multiple-phis.ll | 24 ; CHECK-NEXT: [[ADD2:%.*]] = add i32 [[DIV]], 1 26 ; CHECK-NEXT: [[LOW_0_ADD2]] = select i1 [[CMP1]], i32 [[LOW_0]], i32 [[ADD2]]
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | temporal_filter_msa.c | 78 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_8size_msa() 125 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_8size_msa() 202 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_16size_msa() 249 ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); in temporal_filter_apply_16size_msa()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/AMDGPU/ |
D | reduction.ll | 22 ; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]] 23 ; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]] 62 ; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]] 63 ; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]] 124 ; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]] 125 ; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]] 186 ; GCN-NEXT: [[ADD2:%.*]] = fsub fast half [[ELT2]], [[ADD1]] 187 ; GCN-NEXT: [[ADD3:%.*]] = fsub fast half [[ELT3]], [[ADD2]] 220 ; VI-NEXT: [[ADD2:%.*]] = add i16 [[ELT2]], [[ADD1]] 221 ; VI-NEXT: [[ADD3:%.*]] = add i16 [[ELT3]], [[ADD2]] [all …]
|
/external/swiftshader/third_party/subzero/tests_lit/llvm2ice_tests/ |
D | mips-legalization.ll | 76 ; MIPS32: addu [[ADD2:.*]],[[TMP_B]],[[TMP_E]] 77 ; MIPS32: sw [[ADD2]],28(sp)
|
/external/libpng/mips/ |
D | filter_msa_intrinsics.c | 280 #define ADD2(in0, in1, in2, in3, out0, out1) \ macro 288 ADD2(in0, in1, in2, in3, out0, out1); \ 294 ADD2(in0, in1, in2, in3, out0, out1); \ 295 ADD2(in4, in5, in6, in7, out2, out3); \ 427 ADD2(src0, src4, src1, src5, src0, src1); in png_read_filter_row_up_msa() 438 ADD2(src0, src4, src1, src5, src0, src1); in png_read_filter_row_up_msa()
|
/external/llvm/test/CodeGen/AArch64/ |
D | addsub.ll | 47 ; CHECK: add [[ADD2:x[0-9]+]], x[[LOAD32]], #12 51 ; CHECK: str [[ADD2]], [x1]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | addsub.ll | 47 ; CHECK: add [[ADD2:x[0-9]+]], x[[LOAD32]], #12 51 ; CHECK: str [[ADD2]], [x1]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InterleavedAccess/X86/ |
D | interleavedLoad.ll | 35 ; AVX2-NEXT: [[ADD2:%.*]] = add <32 x i8> [[TMP25]], [[ADD1]] 36 ; AVX2-NEXT: ret <32 x i8> [[ADD2]] 68 ; AVX2-NEXT: [[ADD2:%.*]] = add <16 x i8> [[TMP16]], [[ADD1]] 69 ; AVX2-NEXT: ret <16 x i8> [[ADD2]] 87 ; AVX2-NEXT: [[ADD2:%.*]] = add <8 x i8> [[V3]], [[ADD1]] 88 ; AVX2-NEXT: ret <8 x i8> [[ADD2]] 148 ; AVX2-NEXT: [[ADD2:%.*]] = add <64 x i8> [[TMP43]], [[ADD1]] 149 ; AVX2-NEXT: ret <64 x i8> [[ADD2]]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | reduction.ll | 40 ; GFX9-NEXT: v_pk_add_f16 [[ADD2:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}{{$}} 41 ; GFX9-NEXT: v_pk_add_f16 [[ADD3:v[0-9]+]], [[ADD2]], [[ADD1]]{{$}} 66 ; GFX9-NEXT: v_pk_add_u16 [[ADD2]], v{{[0-9]+}}, v{{[0-9]+}}{{$}} 67 ; GFX9-NEXT: v_pk_add_u16 [[ADD3]], [[ADD2]], [[ADD1]]{{$}} 96 ; GFX9-NEXT: v_pk_add_f16 [[ADD2]], v{{[0-9]+}}, v{{[0-9]+}}{{$}} 97 ; GFX9-NEXT: v_pk_add_f16 [[ADD3]], [[ADD2]], [[ADD1]]{{$}}
|
D | ds_read2_superreg.ll | 43 ; CI: v_add_f32_e32 v[[ADD2:[0-9]+]], v[[ADD0]], v[[ADD1]] 44 ; CI: buffer_store_dword v[[ADD2]]
|
/external/webp/src/dsp/ |
D | lossless_msa.c | 94 ADD2(t0, t2, t1, t3, t0, t1); \ 260 ADD2(src0, tmp0, src1, tmp1, dst0, dst1); in AddGreenToBlueAndRed_MSA()
|
D | filters_msa.c | 114 ADD2(a0, b0, a1, b1, a0, a1); in PredictLineGradient()
|