/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | sink-down-more-instructions-regpressure-high.mir | 521 ; CHECK: [[ADD4_:%[0-9]+]]:gprc = nsw ADD4 killed [[LWZU]], [[PHI2]] 551 ; CHECK: [[ADD4_1:%[0-9]+]]:gprc = nsw ADD4 [[PHI6]], [[ADD4_]] 553 ; CHECK: [[ADD4_2:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_1]], killed [[LWZ]] 555 ; CHECK: [[ADD4_3:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_2]], killed [[LWZ1]] 557 ; CHECK: [[ADD4_4:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_3]], killed [[LWZX]] 559 ; CHECK: [[ADD4_5:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_4]], killed [[LWZX1]] 561 ; CHECK: [[ADD4_6:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_5]], killed [[LWZX2]] 563 ; CHECK: [[ADD4_7:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_6]], killed [[LWZX3]] 565 ; CHECK: [[ADD4_8:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_7]], killed [[LWZX4]] 567 ; CHECK: [[ADD4_9:%[0-9]+]]:gprc = nsw ADD4 killed [[ADD4_8]], killed [[LWZX5]] [all …]
|
D | ifcvt.mir | 36 renamable $r5 = ADD4 killed renamable $r5, killed renamable $r6 37 renamable $r6 = ADD4 killed renamable $r6, renamable $r6
|
D | sink-down-more-instructions-1.mir | 341 ; CHECK: [[ADD4_:%[0-9]+]]:gprc = nsw ADD4 killed [[LWZX]], [[PHI2]] 361 ; CHECK: [[ADD4_1:%[0-9]+]]:gprc = nsw ADD4 [[PHI3]], [[ADD4_]] 398 ; CHECK: [[ADD4_2:%[0-9]+]]:gprc = nsw ADD4 [[LWZU]], [[PHI5]] 399 ; CHECK: [[ADD4_3:%[0-9]+]]:gprc = nsw ADD4 [[PHI8]], [[ADD4_2]] 427 ; CHECK: [[ADD4_4:%[0-9]+]]:gprc = nsw ADD4 [[LWZ]], [[ADD4_2]] 428 ; CHECK: [[ADD4_5:%[0-9]+]]:gprc = nsw ADD4 [[PHI9]], [[ADD4_4]] 492 %7:gprc = nsw ADD4 killed %77, %6 512 %87:gprc = nsw ADD4 %10, %7 538 %21:gprc = nsw ADD4 killed %54, %12 558 %67:gprc = nsw ADD4 %25, %21 [all …]
|
D | aix32-cc-abi-vaarg.ll | 79 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r6, killed renamable $r3 80 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 155 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 156 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5 157 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6 158 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r7 159 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r8 160 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r9 161 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r10 162 ; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r4, killed renamable $r3 [all …]
|
D | aix64-cc-abi-vaarg.ll | 74 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r8, renamable $r3, implicit killed $x3 76 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4, implicit-def … 152 ; 64BIT-DAG: renamable $r3 = nsw ADD4 renamable $r4, renamable $r3, implicit killed $x3, implic… 153 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 154 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6 155 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r7, implicit killed $x7 156 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r8, implicit killed $x8 157 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r9, implicit killed $x9 158 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r10, implicit killed $x10 159 ; 64BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r11 [all …]
|
D | phi-eliminate.mir | 161 ; CEHCK: %24:gprc = ADD4 killed %23, killed %21 184 ; CHECK: %33:gprc = ADD4 killed %32, killed %30 212 ; CHECK: %49:gprc = ADD4 killed %48, killed %46 245 %24:gprc = ADD4 killed %23, killed %21 265 %33:gprc = ADD4 killed %32, killed %30 288 %49:gprc = ADD4 killed %48, killed %46
|
D | remove-implicit-use.mir | 69 renamable $r5 = nsw ADD4 killed renamable $r5, renamable $r5, implicit $x5 71 ; CHECK: ADD4
|
D | remove-self-copies.mir | 122 renamable $r4 = ADD4 killed renamable $r5, killed renamable $r4, implicit $x4, implicit $x5 124 renamable $r3 = ADD4 killed renamable $r4, killed renamable $r3
|
D | convert-rr-to-ri-instrs-R0-special-handling.mir | 127 %4:gprc = ADD4 killed $r0, killed %2 181 %4:gprc = ADD4 killed %3, killed $r0 235 %4:gprc = ADD4 killed $r0, killed %2 288 %4:gprc = ADD4 killed %2, killed $r0
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_txfm_msa.c | 19 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa() 50 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column() 51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column() 159 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in fdct16x8_1d_row() 160 ADD4(in4, 1, in5, 1, in6, 1, in7, 1, in4, in5, in6, in7); in fdct16x8_1d_row() 161 ADD4(in8, 1, in9, 1, in10, 1, in11, 1, in8, in9, in10, in11); in fdct16x8_1d_row() 162 ADD4(in12, 1, in13, 1, in14, 1, in15, 1, in12, in13, in14, in15); in fdct16x8_1d_row() 208 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in vpx_fdct4x4_msa() 263 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct16x16_1_msa()
|
D | avg_msa.c | 25 ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6); in vpx_avg_8x8_msa() 419 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa() 421 ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l, in vpx_int_pro_row_msa() 423 ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l, in vpx_int_pro_row_msa() 425 ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l, in vpx_int_pro_row_msa() 443 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa() 445 ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l, in vpx_int_pro_row_msa() 447 ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l, in vpx_int_pro_row_msa() 449 ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l, in vpx_int_pro_row_msa() 461 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa() [all …]
|
D | idct32x32_msa.c | 201 ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3); in idct32x8_row_odd_process_store() 215 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct32x8_row_odd_process_store() 229 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct32x8_row_odd_process_store() 256 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6); in idct_butterfly_transpose_store() 273 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7); in idct_butterfly_transpose_store() 290 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6); in idct_butterfly_transpose_store() 307 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7); in idct_butterfly_transpose_store() 503 ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3); in idct8x32_column_odd_process_store() 514 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct8x32_column_odd_process_store() 528 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct8x32_column_odd_process_store() [all …]
|
D | fwd_dct32x32_msa.c | 76 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_column_even_store() 104 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_column_even_store() 182 ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31); in fdct8x32_1d_column_odd_store() 185 ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25); in fdct8x32_1d_column_odd_store() 232 ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19); in fdct8x32_1d_column_odd_store() 316 ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r, tmp0_w, in fdct8x32_1d_row_even_4x() 319 ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l, vec0_r, in fdct8x32_1d_row_even_4x() 363 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_row_even_4x() 407 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_row_even() 435 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_row_even() [all …]
|
D | inv_txfm_msa.h | 104 ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, res0_m, res1_m, \ 271 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ 289 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ 307 ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m, \
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | crash_reordering_undefs.ll | 15 ; CHECK-NEXT: [[ADD4:%.*]] = select i1 [[CMP2]], i32 65536, i32 65537 16 ; CHECK-NEXT: [[ADD5:%.*]] = add i32 [[ADD3]], [[ADD4]]
|
D | rgb_phi.ll | 37 ; CHECK-NEXT: [[R_030:%.*]] = phi float [ [[TMP0]], [[ENTRY]] ], [ [[ADD4:%.*]], [[FOR_BODY_FOR_… 39 ; CHECK-NEXT: [[ADD4]] = fadd float [[R_030]], [[MUL]] 59 ; CHECK-NEXT: [[ADD16:%.*]] = fadd float [[ADD4]], [[ADD9]]
|
D | crash_cmpop.ll | 25 ; SSE-NEXT: [[ADD4:%.*]] = fadd float [[MUL]], [[ADD3]] 31 ; SSE-NEXT: [[CMP_I49:%.*]] = fcmp olt float [[ADD4]], 1.000000e+00 32 ; SSE-NEXT: [[COND_I50:%.*]] = select i1 [[CMP_I49]], float [[ADD4]], float 1.000000e+00
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | temporal_filter_msa.c | 87 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 133 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 211 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 258 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa()
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | temporal_filter_msa.c | 74 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 113 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 195 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 235 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | idct_msa.c | 108 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in idct4x4_addblk_msa() 130 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in idct4x4_addconst_msa() 204 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in dequant_idct4x4_addblk_msa() 246 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in dequant_idct4x4_addblk_2x_msa() 287 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in dequant_idct_addconst_2x_msa()
|
/external/llvm-project/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ |
D | ctpop.mir | 86 ; MIPS32: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]] 88 ; MIPS32: $v0 = COPY [[ADD4]](s32)
|
D | mul.mir | 304 ; MIPS32: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[MUL5]] 305 ; MIPS32: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[MUL5]] 309 ; MIPS32: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH1]] 400 ; MIPS32: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[UMULH2]] 401 ; MIPS32: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[UMULH2]] 405 ; MIPS32: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ADD2]]
|
/external/llvm-project/llvm/test/Transforms/LoopVectorize/X86/ |
D | interleaving.ll | 22 ; SSE-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], [[TMP1]] 24 ; SSE-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX6]], align 4
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | legalize-sdiv.mir | 42 ; GFX6: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C3]] 43 ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[ADD4]], [[SELECT]] 78 ; GFX8: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C3]] 79 ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[ADD4]], [[SELECT]] 114 ; GFX9: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C3]] 115 ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[ADD4]], [[SELECT]] 164 ; GFX6: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C3]] 165 ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[ADD4]], [[SELECT]] 232 ; GFX8: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C3]] 233 ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[ADD4]], [[SELECT]] [all …]
|
/external/llvm-project/llvm/test/Transforms/LoopVectorize/ARM/ |
D | tail-folding-prefer-flag.ll | 56 ; PREDFLAG: %[[ADD4:.*]] = add i32 %index, 12 60 …LM4:active.lane.mask.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %[[ADD4]], i32 %N)
|