/external/llvm/test/CodeGen/ARM/ |
D | vldm-liveness.ll | 4 ; s1 = VLDRS [r0, 1], Q0<imp-def> 5 ; s3 = VLDRS [r0, 2], Q0<imp-use,kill>, Q0<imp-def> 6 ; s0 = VLDRS [r0, 0], Q0<imp-use,kill>, Q0<imp-def> 7 ; s2 = VLDRS [r0, 4], Q0<imp-use,kill>, Q0<imp-def> 11 ; imp-use of Q0, which is undefined.
|
D | 2010-06-29-PartialRedefFastAlloc.ll | 10 ; %reg1028 gets allocated %Q0, and if %reg1030 is reloaded for the partial 11 ; redef, it cannot also get %Q0.
|
D | vcgt.ll | 165 ;CHECK: vmov.i32 [[Q0:q[0-9]+]], #0x1 167 ;CHECK: vand [[Q2:q[0-9]+]], [[Q1]], [[Q0]]
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64CallingConvention.td | 68 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 70 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 72 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 75 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 77 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 106 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 108 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 110 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 113 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 115 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> [all …]
|
D | AArch64CallingConvention.h | 40 static const MCPhysReg QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
|
D | AArch64AsmPrinter.cpp | 428 DestReg = AArch64::Q0 + (DestReg - AArch64::S0); in EmitFMov0() 431 DestReg = AArch64::Q0 + (DestReg - AArch64::D0); in EmitFMov0()
|
/external/libhevc/common/arm/ |
D | ihevc_sao_edge_offset_class0.s | 190 VCLT.U8 Q0,Q13,Q14 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 211 …VSUB.I8 Q10,Q0,Q15 @II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_g… 215 VCLT.U8 Q0,Q13,Q14 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 217 …VSUB.I8 Q11,Q0,Q15 @II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_… 221 …VMOVL.U8 Q0,D26 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u… 245 …VADDW.S8 Q0,Q0,D30 @II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[… 248 …VMAX.S16 Q0,Q0,Q2 @II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val… 251 …VMIN.U16 Q0,Q0,Q3 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u… 253 VMOVN.I16 D0,Q0 @II vmovn_s16(pi2_tmp_cur_row.val[0])
|
D | ihevc_sao_edge_offset_class1.s | 115 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 168 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 180 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up) 241 VADD.I8 Q11,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 310 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 320 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up) 360 VADD.I8 Q11,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up)
|
D | ihevc_sao_edge_offset_class1_chroma.s | 118 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 172 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 184 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up) 253 VADD.I8 Q11,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 327 VADD.I8 Q6,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up) 339 VADD.I8 Q11,Q0,Q8 @II edge_idx = vaddq_s8(const_2, sign_up) 390 VADD.I8 Q11,Q0,Q8 @edge_idx = vaddq_s8(const_2, sign_up)
|
D | ihevc_sao_edge_offset_class2.s | 190 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 303 VADD.I8 Q12,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 387 VADD.I8 Q11,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 401 VADD.I8 Q9,Q0,Q7 @III edge_idx = vaddq_s8(const_2, sign_up) 479 VADD.I8 Q9,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 620 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 751 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up)
|
D | ihevc_sao_edge_offset_class3.s | 202 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 321 VADD.I8 Q9,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 410 VADD.I8 Q13,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 430 VADD.I8 Q9,Q0,Q7 @III edge_idx = vaddq_s8(const_2, sign_up) 517 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 661 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 803 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up)
|
D | ihevc_sao_edge_offset_class3_chroma.s | 285 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 409 VADD.I8 Q9,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 513 VADD.I8 Q13,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 539 VADD.I8 Q9,Q0,Q7 @III edge_idx = vaddq_s8(const_2, sign_up) 643 VADD.I8 Q9,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 815 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 989 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up)
|
D | ihevc_sao_edge_offset_class2_chroma.s | 273 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 417 VADD.I8 Q9,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) 510 VADD.I8 Q13,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) 547 VADD.I8 Q9,Q0,Q7 @III edge_idx = vaddq_s8(const_2, sign_up) 641 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 792 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) 940 VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up)
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 75 CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, 82 CCIfType<[f32], CCAssignToStackWithShadow<4, 4, [Q0, Q1, Q2, Q3]>>, 83 CCIfType<[f64], CCAssignToStackWithShadow<8, 4, [Q0, Q1, Q2, Q3]>>, 84 CCIfType<[v2f64], CCAssignToStackWithShadow<16, 4, [Q0, Q1, Q2, Q3]>>, 94 CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, 138 CCIfType<[f32], CCAssignToStackWithShadow<4, 4, [Q0, Q1, Q2, Q3]>>, 139 CCIfType<[f64], CCAssignToStackWithShadow<8, 8, [Q0, Q1, Q2, Q3]>>, 141 CCAssignToStackWithShadow<16, 16, [Q0, Q1, Q2, Q3]>>>, 142 CCIfType<[v2f64], CCAssignToStackWithShadow<16, 8, [Q0, Q1, Q2, Q3]>> 215 CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | 2010-06-29-PartialRedefFastAlloc.ll | 10 ; %reg1028 gets allocated %Q0, and if %reg1030 is reloaded for the partial 11 ; redef, it cannot also get %Q0.
|
D | vcgt.ll | 165 ;CHECK: vmov.i32 [[Q0:q[0-9]+]], #0x1 167 ;CHECK: vand [[Q2:q[0-9]+]], [[Q1]], [[Q0]]
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
D | ARMCallingConv.td | 66 CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, 78 CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, 166 CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, 178 CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-return-vector.ll | 3 ; 2x64 vector should be returned in Q0.
|
/external/libavc/common/arm/ |
D | ih264_iquant_itrans_recon_a9.s | 147 vmull.s16 q0, d16, d20 @ Q0 = p[i] = (x[i] * trns_coeff[i]) where i = 0..3 152 vshl.s32 q0, q0, q15 @ Q0 = q[i] = (p[i] << (qP/6)) where i = 0..3 320 vmull.s16 q0, d16, d20 @ Q0 = p[i] = (x[i] * trns_coeff[i]) where i = 0..3 325 vshl.s32 q0, q0, q15 @ Q0 = q[i] = (p[i] << (qP/6)) where i = 0..3 508 vmull.s16 q0, d16, d20 @ Q0 = p[i] = (x[i] * trns_coeff[i]) where i = 0..3 515 vshl.s32 q0, q0, q15 @ Q0 = q[i] = (p[i] << (qP/6)) where i = 0..3 601 vswp d1, d8 @ Q0/Q1 = Row order x0/x1 625 vadd.s16 q0, q8, q2 @ Q0 = z0 697 vadd.s16 q0, q0, q11 @ Q0 = x0 717 vswp d1, d8 @ Q0/Q1 = Row order x0/x1 [all …]
|
D | ih264_ihadamard_scaling_a9.s | 141 vmul.s32 q0, q0, q9 @ Q0 = p[i] = (x[i] * trns_coeff[i]) where i = 0..3 146 vshl.s32 q0, q0, q10 @ Q0 = q[i] = (p[i] << (qP/6)) where i = 0..3
|
D | ih264_deblk_chroma_a9.s | 284 vld2.8 {d0, d1}, [r0], r1 @Q0=q0 319 vqsub.u8 q0, q0, q7 @Q0 = q0 - delta 321 vbif q0, q9, q4 @Q0 = (i_macro >= 0 ) ? (q0-delta) : (q0+delta) 426 @ Q0 - Q3(inputs), 923 vld2.8 {d0, d1}, [r0], r1 @Q0=q0 961 vqsub.u8 q0, q0, q7 @Q0 = q0 - delta 963 vbif q0, q9, q4 @Q0 = (i_macro >= 0 ) ? (q0-delta) : (q0+delta) 1080 @ Q0 - Q3(inputs),
|
/external/llvm/test/TableGen/ |
D | ForeachLoop.td | 13 // CHECK: def Q0
|
/external/svox/pico_resources/tools/LingwareBuilding/PicoLingware_source_files/pkb/de-DE/ |
D | de-DE_gl0_kpdf_dur.pkb | 15 …�X �H2��b��1�#�rp�q& 220@pa����� ``Q0AA `"`AA1�…
|
/external/svox/pico_resources/tools/LingwareBuilding/PicoLingware_source_files/pkb/en-GB/ |
D | en-GB_kh0_kpdf_dur.pkb | 8 …�[sp�A!A��BqE!�YQ`��9� 0!`%�*�!0r4aBa!3AQ0�I! @2q1…
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/MCTargetDesc/ |
D | ARMBaseInfo.h | 155 case R0: case S0: case D0: case Q0: return 0; in getARMRegisterNumbering()
|