/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | legalize-extract-vector-elt.mir | 11 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 12 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 13 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C]](s32) 14 ; CHECK: $vgpr0 = COPY [[EVEC]](s32) 15 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 16 %1:_(s32) = G_CONSTANT i32 0 17 %2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1 27 ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 28 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 29 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<3 x s32>), [[C]](s32) [all …]
|
D | regbankselect-insert-vector-elt.mir | 12 ; CHECK: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 13 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr5 14 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 15 …; CHECK: [[IVEC:%[0-9]+]]:sgpr(<4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[COPY1]](s32), [[C]](s3… 16 ; CHECK: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[IVEC]](<4 x s32>) 17 %0:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 18 %1:_(s32) = COPY $sgpr5 19 %2:_(s32) = G_CONSTANT i32 0 20 %3:_(<4 x s32>) = G_INSERT_VECTOR_ELT %0, %1, %2 32 ; CHECK: [[COPY:%[0-9]+]]:vgpr(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 [all …]
|
D | legalize-merge-values.mir | 9 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 10 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 11 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) 13 %0:_(s32) = G_CONSTANT i32 0 14 %1:_(s32) = G_CONSTANT i32 1 15 %2:_(s64) = G_MERGE_VALUES %0:_(s32), %1:_(s32) 24 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 25 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 26 ; CHECK: [[MV:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) 27 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](<2 x s32>) [all …]
|
D | regbankselect-amdgcn-exp.mir | 28 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 29 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 30 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 31 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 32 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 33 ; CHECK: [[COPY3:%[0-9]+]]:sgpr(s32) = COPY $sgpr3 36 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) 37 ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 38 ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) 39 ; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[COPY3]](s32) [all …]
|
D | regbankselect-minnum.mir | 13 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 14 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum), [[COPY]](s32), [[COPY2]]… 17 %0:_(s32) = COPY $sgpr0 18 %1:_(s32) = COPY $sgpr1 19 %2:_(s32) = G_INTRINSIC intrinsic(@llvm.minnum.f32), %0, %1 29 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.minnum), [[COPY]](s32), [[COPY1]]… [all …]
|
D | regbankselect-maxnum.mir | 13 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 14 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum), [[COPY]](s32), [[COPY2]]… 17 %0:_(s32) = COPY $sgpr0 18 %1:_(s32) = COPY $sgpr1 19 %2:_(s32) = G_INTRINSIC intrinsic(@llvm.maxnum.f32), %0, %1 29 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.maxnum), [[COPY]](s32), [[COPY1]]… [all …]
|
D | regbankselect-amdgcn.cvt.pkrtz.mir | 13 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 14 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), [[COPY]](s32),… 17 %0:_(s32) = COPY $sgpr0 18 %1:_(s32) = COPY $sgpr1 19 %2:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0, %1 29 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 30 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 31 …; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), [[COPY]](s32),… [all …]
|
D | regbankselect-fadd.mir | 13 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 14 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 ; CHECK: [[FADD:%[0-9]+]]:vgpr(s32) = G_FADD [[COPY]], [[COPY2]] 17 %0:_(s32) = COPY $sgpr0 18 %1:_(s32) = COPY $sgpr1 19 %2:_(s32) = G_FADD %0, %1 30 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 31 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 32 ; CHECK: [[FADD:%[0-9]+]]:vgpr(s32) = G_FADD [[COPY]], [[COPY1]] [all …]
|
D | regbankselect-fmul.mir | 13 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 14 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 15 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 16 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY2]] 17 %0:_(s32) = COPY $sgpr0 18 %1:_(s32) = COPY $sgpr1 19 %2:_(s32) = G_FMUL %0, %1 30 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 31 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 32 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY1]] [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | machine-cse-mid-pipeline.mir | 9 ; CHECK: %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1 10 ; CHECK-NEXT: %[[TWO:[0-9]+]]:_(s32) = G_ADD %[[ONE]], %[[ONE]] 11 ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]] 12 ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32) 15 %0:_(s32) = G_CONSTANT i32 1 16 %1:_(s32) = G_ADD %0, %0 17 %2:_(s32) = G_ADD %0, %0 18 %3:_(s32) = G_ADD %1, %2 19 $w0 = COPY %3(s32) 29 ; CHECK: %[[ONE:[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 [all …]
|
D | legalize-itofp.mir | 34 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 35 ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s32) 36 %0:_(s32) = COPY $w0 37 %1:_(s32) = G_SITOFP %0 47 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 48 ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[COPY]](s32) 49 %0:_(s32) = COPY $w0 50 %1:_(s32) = G_UITOFP %0 61 ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s64) 63 %1:_(s32) = G_SITOFP %0 [all …]
|
D | legalize-phi.mir | 69 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 70 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 71 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 72 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 73 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]] 74 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32) 79 ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]] 80 ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32) 84 ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]] 85 ; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32) [all …]
|
D | localizer.mir | 27 ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 28 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]] 29 %0:gpr(s32) = G_CONSTANT 1 30 %1:gpr(s32) = G_ADD %0, %0 41 ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 42 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]] 44 ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 45 ; CHECK: [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[ADD]] 53 %0:gpr(s32) = G_CONSTANT 1 54 %1:gpr(s32) = G_ADD %0, %0 [all …]
|
D | legalize-fptoi.mir | 34 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF 35 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32) 36 ; CHECK: $w0 = COPY [[FPTOSI]](s32) 37 %0:_(s32) = G_IMPLICIT_DEF 38 %1:_(s32) = G_FPTOSI %0 48 ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF 49 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[DEF]](s32) 50 ; CHECK: $w0 = COPY [[FPTOUI]](s32) 51 %0:_(s32) = G_IMPLICIT_DEF 52 %1:_(s32) = G_FPTOUI %0 [all …]
|
D | legalize-ext.mir | 40 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) 41 ; CHECK: $w0 = COPY [[TRUNC]](s32) 42 ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) 43 ; CHECK: $w0 = COPY [[TRUNC1]](s32) 44 ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) 45 ; CHECK: $w0 = COPY [[TRUNC2]](s32) 46 ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) 47 ; CHECK: $w0 = COPY [[TRUNC3]](s32) 61 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 62 ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) [all …]
|
D | regbankselect-default.mir | 85 ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 86 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]] 87 %0(s32) = COPY $w0 88 %1(s32) = G_ADD %0, %0 101 ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0 102 ; CHECK: [[ADD:%[0-9]+]]:fpr(<4 x s32>) = G_ADD [[COPY]], [[COPY]] 103 %0(<4 x s32>) = COPY $q0 104 %1(<4 x s32>) = G_ADD %0, %0 117 ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0 118 ; CHECK: [[SUB:%[0-9]+]]:gpr(s32) = G_SUB [[COPY]], [[COPY]] [all …]
|
D | legalize-shift.mir | 29 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 30 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) 31 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]] 32 ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] 33 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 34 ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) 35 ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C1]] 36 ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]] 37 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) 38 ; CHECK: $w0 = COPY [[COPY2]](s32) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/GlobalISel/ |
D | arm-legalize-fp.mir | 96 ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 97 ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 98 %0(s32) = COPY $r0 99 %1(s32) = COPY $r1 108 ; SOFT: [[R:%[0-9]+]]:_(s32) = COPY $r0 109 ; HARD: [[R:%[0-9]+]]:_(s32) = COPY $s0 112 %2(s32) = G_FREM %0, %1 114 $r0 = COPY %2(s32) 145 ; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY $r0 146 ; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY $r1 [all …]
|
D | arm-legalize-divmod.mir | 40 ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 41 ; CHECK-DAG: [[Y:%[0-9]+]]:_(s32) = COPY $r1 42 %0(s32) = COPY $r0 43 %1(s32) = COPY $r1 44 ; HWDIV: [[R:%[0-9]+]]:_(s32) = G_SDIV [[X]], [[Y]] 50 ; SOFT-AEABI: [[R:%[0-9]+]]:_(s32) = COPY $r0 52 ; SOFT-DEFAULT: [[R:%[0-9]+]]:_(s32) = COPY $r0 55 %2(s32) = G_SDIV %0, %1 57 $r0 = COPY %2(s32) 76 ; CHECK-DAG: [[X:%[0-9]+]]:_(s32) = COPY $r0 [all …]
|
D | arm-instruction-select-combos.mir | 68 %0(s32) = COPY $r0 69 %1(s32) = COPY $r1 70 %2(s32) = COPY $r2 75 %3(s32) = G_MUL %0, %1 76 %4(s32) = G_ADD %3, %2 79 $r0 = COPY %4(s32) 102 %0(s32) = COPY $r0 103 %1(s32) = COPY $r1 104 %2(s32) = COPY $r2 109 %3(s32) = G_MUL %0, %1 [all …]
|
D | arm-param-lowering.ll | 10 ; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY $r1 29 ; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY $r1 36 ; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 37 ; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32) 38 ; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4 40 ; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 41 ; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32) 57 ; CHECK-DAG: [[R0VREG:%[0-9]+]]:_(s32) = COPY $r0 59 ; CHECK-DAG: [[R1VREG:%[0-9]+]]:_(s32) = COPY $r1 61 ; CHECK-DAG: [[R2VREG:%[0-9]+]]:_(s32) = COPY $r2 [all …]
|
D | arm-irtranslator.ll | 15 ; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 17 ; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 20 ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_SEXT [[SUM]] 21 ; CHECK: $r0 = COPY [[EXT]](s32) 31 ; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 33 ; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 36 ; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]] 37 ; CHECK: $r0 = COPY [[SUM_EXT]](s32) 47 ; CHECK-DAG: [[VREGR0:%[0-9]+]]:_(s32) = COPY $r0 49 ; CHECK-DAG: [[VREGR1:%[0-9]+]]:_(s32) = COPY $r1 [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_itrans_recon_8x8.s | 248 vadd.s32 q5,q10,q11 @// c0 = y0 * cos4 + y4 * cos4(part of a0 and a1) 249 vsub.s32 q10,q10,q11 @// c1 = y0 * cos4 - y4 * cos4(part of a0 and a1) 256 vadd.s32 q7,q5,q3 @// a0 = c0 + d0(part of r0,r7) 257 vsub.s32 q5,q5,q3 @// a3 = c0 - d0(part of r3,r4) 258 vsub.s32 q11,q10,q9 @// a2 = c1 - d1(part of r2,r5) 259 vadd.s32 q9,q10,q9 @// a1 = c1 + d1(part of r1,r6) 261 vadd.s32 q10,q7,q12 @// a0 + b0(part of r0) 262 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7) 264 vadd.s32 q12,q11,q14 @// a2 + b2(part of r2) 265 vsub.s32 q11,q11,q14 @// a2 - b2(part of r5) [all …]
|
/external/boringssl/src/crypto/curve25519/asm/ |
D | x25519-asm-arm.S | 339 vmull.s32 q12,d2,d2 340 vmlal.s32 q12,d11,d1 341 vmlal.s32 q12,d12,d0 342 vmlal.s32 q12,d13,d23 343 vmlal.s32 q12,d16,d22 344 vmlal.s32 q12,d7,d21 345 vmull.s32 q10,d2,d11 346 vmlal.s32 q10,d4,d1 347 vmlal.s32 q10,d13,d0 348 vmlal.s32 q10,d6,d23 [all …]
|
/external/libmpeg2/common/arm/ |
D | impeg2_idct.s | 221 vdup.s32 q0, r4 233 vraddhn.s32 d12, q0, q4 234 vraddhn.s32 d13, q0, q5 243 vraddhn.s32 d12, q0, q4 244 vraddhn.s32 d13, q0, q5 253 vraddhn.s32 d12, q0, q4 254 vraddhn.s32 d13, q0, q5 263 vraddhn.s32 d12, q0, q4 264 vraddhn.s32 d13, q0, q5 273 vraddhn.s32 d12, q0, q4 [all …]
|