/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AMDGPU/ |
D | vop3-gfx9.s | 34 v_pack_b32_f16 v5, v1, v2 op_sel:[1,0,0] 37 v_pack_b32_f16 v5, v1, v2 op_sel:[0,1,0] 40 v_pack_b32_f16 v5, v1, v2 op_sel:[0,0,1] 63 v_max3_f16 v5, v1, v2, v3 op_sel:[0,0,0,0] 66 v_max3_f16 v5, v1, v2, v3 op_sel:[1,0,0,0] 69 v_max3_f16 v5, v1, v2, v3 op_sel:[0,1,0,0] 72 v_max3_f16 v5, v1, v2, v3 op_sel:[0,0,1,0] 75 v_max3_f16 v5, v1, v2, v3 op_sel:[0,0,0,1] 78 v_max3_f16 v5, v1, v2, v3 op_sel:[1,1,1,1] 85 v_max3_i16 v5, v1, v2, v3 op_sel:[0,0,0,0] [all …]
|
D | gfx9_asm_all.s | 523 ds_add_rtn_u32 v5, v1, v2 offset:65535 529 ds_add_rtn_u32 v5, v255, v2 offset:65535 532 ds_add_rtn_u32 v5, v1, v255 offset:65535 535 ds_add_rtn_u32 v5, v1, v2 538 ds_add_rtn_u32 v5, v1, v2 offset:0 541 ds_add_rtn_u32 v5, v1, v2 offset:4 544 ds_add_rtn_u32 v5, v1, v2 offset:65535 gds 547 ds_sub_rtn_u32 v5, v1, v2 offset:65535 553 ds_sub_rtn_u32 v5, v255, v2 offset:65535 556 ds_sub_rtn_u32 v5, v1, v255 offset:65535 [all …]
|
D | gfx8_asm_all.s | 522 ds_add_rtn_u32 v5, v1, v2 offset:65535 528 ds_add_rtn_u32 v5, v255, v2 offset:65535 531 ds_add_rtn_u32 v5, v1, v255 offset:65535 534 ds_add_rtn_u32 v5, v1, v2 537 ds_add_rtn_u32 v5, v1, v2 offset:0 540 ds_add_rtn_u32 v5, v1, v2 offset:4 543 ds_add_rtn_u32 v5, v1, v2 offset:65535 gds 546 ds_sub_rtn_u32 v5, v1, v2 offset:65535 552 ds_sub_rtn_u32 v5, v255, v2 offset:65535 555 ds_sub_rtn_u32 v5, v1, v255 offset:65535 [all …]
|
D | vop3.s | 197 v_add_f32_e64 v1, v3, v5 204 v_cndmask_b32 v1, v3, v5, s[4:5] 208 v_cndmask_b32_e64 v1, v3, v5, s[4:5] 212 v_cndmask_b32_e64 v1, v3, v5, vcc 384 v_mad_f32 v9, 0.5, v5, -v8 443 v_fma_f16_e64 v5, v1, v2, v3 446 v_fma_f16 v5, v1, v2, 0.5 449 v_fma_f16 v5, -v1, -v2, -v3 452 v_fma_f16 v5, |v1|, |v2|, |v3| 455 v_fma_f16 v5, v1, v2, v3 clamp [all …]
|
D | gfx7_asm_all.s | 582 ds_add_rtn_u32 v5, v1, v2 offset:65535 588 ds_add_rtn_u32 v5, v255, v2 offset:65535 591 ds_add_rtn_u32 v5, v1, v255 offset:65535 594 ds_add_rtn_u32 v5, v1, v2 597 ds_add_rtn_u32 v5, v1, v2 offset:0 600 ds_add_rtn_u32 v5, v1, v2 offset:4 603 ds_add_rtn_u32 v5, v1, v2 offset:65535 gds 606 ds_sub_rtn_u32 v5, v1, v2 offset:65535 612 ds_sub_rtn_u32 v5, v255, v2 offset:65535 615 ds_sub_rtn_u32 v5, v1, v255 offset:65535 [all …]
|
D | vop3-modifiers.s | 129 v_add_f32 v5, -1, v2 132 v_add_f32 v5, -16, v2 135 v_add_f32 v5, 0x3e22f983, v2 139 v_add_f32 v5, abs(0x3e22f983), v2 143 v_add_f32 v5, neg(0xbe22f983), v2 147 v_add_f32 v5, neg(0x3e22f983), v2 291 v_cvt_f32_i32_e64 v5, s1 clamp 294 v_cvt_f32_i32_e64 v5, s1 mul:2 297 v_cvt_f32_i32_e64 v5, s1 mul:4 300 v_cvt_f32_i32_e64 v5, s1 div:2 [all …]
|
D | dl-insts.s | 8 v_fmac_f32 v5, v1, v2 12 v_fmac_f32 v5, v255, v2 14 v_fmac_f32 v5, s1, v2 16 v_fmac_f32 v5, s101, v2 18 v_fmac_f32 v5, flat_scratch_lo, v2 20 v_fmac_f32 v5, flat_scratch_hi, v2 22 v_fmac_f32 v5, vcc_lo, v2 24 v_fmac_f32 v5, vcc_hi, v2 26 v_fmac_f32 v5, m0, v2 28 v_fmac_f32 v5, exec_lo, v2 [all …]
|
D | flat.s | 55 flat_atomic_add v1, v[3:4], v5 offset:0 glc slc 60 flat_atomic_add v[3:4], v5 slc 135 flat_atomic_swap v[3:4], v5 140 flat_atomic_swap v1, v[3:4], v5 glc 155 flat_atomic_add v[3:4], v5 160 flat_atomic_add v1, v[3:4], v5 glc 165 flat_atomic_sub v[3:4], v5 170 flat_atomic_sub v1, v[3:4], v5 glc 175 flat_atomic_smin v[3:4], v5 180 flat_atomic_smin v1, v[3:4], v5 glc [all …]
|
D | flat-global.s | 135 global_atomic_swap v[3:4], v5, off 143 global_atomic_add v[3:4], v5, off 147 global_atomic_sub v[3:4], v5, off 151 global_atomic_smin v[3:4], v5, off 155 global_atomic_umin v[3:4], v5, off 159 global_atomic_smax v[3:4], v5, off 163 global_atomic_umax v[3:4], v5, off 167 global_atomic_and v[3:4], v5, off 171 global_atomic_or v[3:4], v5, off 175 global_atomic_xor v[3:4], v5, off [all …]
|
D | vop1-gfx9.s | 15 v_cvt_norm_i16_f16 v5, v1 19 v_cvt_norm_i16_f16 v5, -4.0 23 v_cvt_norm_i16_f16 v5, 0xfe0b 27 v_cvt_norm_u16_f16 v5, s101 35 v_sat_pk_u8_i16 v5, -1 39 v_sat_pk_u8_i16 v5, 0x3f717273 43 v_screen_partition_4se_b32 v5, v255 47 v_screen_partition_4se_b32 v5, s101 51 v_screen_partition_4se_b32 v5, -1 55 v_screen_partition_4se_b32 v5, 0x3f717273
|
/external/llvm/test/MC/AMDGPU/ |
D | flat.s | 100 flat_atomic_add v1 v[3:4], v5 glc slc 105 flat_atomic_add v1 v[3:4], v5 glc tfe 110 flat_atomic_add v1 v[3:4], v5 glc slc tfe 115 flat_atomic_add v[3:4], v5 slc 120 flat_atomic_add v[3:4], v5 slc tfe 125 flat_atomic_add v[3:4], v5 tfe 200 flat_atomic_swap v[3:4], v5 205 flat_atomic_swap v1, v[3:4], v5 glc 220 flat_atomic_add v[3:4], v5 225 flat_atomic_add v1, v[3:4], v5 glc [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/ |
D | arm64-simd-ldst.s | 11 ld1.8b {v4, v5, v6}, [x3] 89 st1.2d {v5}, [x1] 102 ; CHECK: ld1.8b { v4, v5, v6 }, [x3] ; encoding: [0x64,0x60,0x40,0x0c] 181 ; CHECK: st1.2d { v5 }, [x1] ; encoding: [0x25,0x7c,0x00,0x4c] 187 ld2.8b {v4, v5}, [x19] 188 ld2.16b {v4, v5}, [x19] 189 ld2.4h {v4, v5}, [x19] 190 ld2.8h {v4, v5}, [x19] 191 ld2.2s {v4, v5}, [x19] 192 ld2.4s {v4, v5}, [x19] [all …]
|
/external/llvm/test/MC/AArch64/ |
D | arm64-simd-ldst.s | 11 ld1.8b {v4, v5, v6}, [x3] 89 st1.2d {v5}, [x1] 102 ; CHECK: ld1.8b { v4, v5, v6 }, [x3] ; encoding: [0x64,0x60,0x40,0x0c] 181 ; CHECK: st1.2d { v5 }, [x1] ; encoding: [0x25,0x7c,0x00,0x4c] 187 ld2.8b {v4, v5}, [x19] 188 ld2.16b {v4, v5}, [x19] 189 ld2.4h {v4, v5}, [x19] 190 ld2.8h {v4, v5}, [x19] 191 ld2.2s {v4, v5}, [x19] 192 ld2.4s {v4, v5}, [x19] [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/Disassembler/AMDGPU/ |
D | vop3_gfx9.txt | 3 # GFX9: v_fma_f16 v5, v1, v2, v3 ; encoding: [0x05,0x00,0x06,0xd2,0x01,0x05,0x0e,0x04] 6 # GFX9: v_fma_f16 v5, -v1, v2, v3 ; encoding: [0x05,0x00,0x06,0xd2,0x01,0x05,0x0e,0x24] 9 # GFX9: v_fma_f16 v5, v1, |v2|, v3 ; encoding: [0x05,0x02,0x06,0xd2,0x01,0x05,0x0e,0x04] 12 # GFX9: v_fma_f16 v5, v1, v2, v3 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x06,0xd2,0x01,0x05,0x0e,0… 15 # GFX9: v_fma_f16 v5, v1, v2, v3 op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x06,0xd2,0x01,0x05,0x0e,0… 18 # GFX9: v_fma_f16 v5, v1, v2, v3 clamp ; encoding: [0x05,0x80,0x06,0xd2,0x01,0x05,0x0e,0x04] 21 # GFX9: v_fma_legacy_f16 v5, v1, v2, v3 ; encoding: [0x05,0x00,0xee,0xd1,0x01,0x05,0x0e,0x04] 24 # GFX9: v_fma_legacy_f16 v5, v1, v2, -v3 ; encoding: [0x05,0x00,0xee,0xd1,0x01,0x05,0x0e,0x84] 27 # GFX9: v_fma_legacy_f16 v5, |v1|, v2, v3 ; encoding: [0x05,0x01,0xee,0xd1,0x01,0x05,0x0e,0x04] 30 # GFX9: v_fma_legacy_f16 v5, v1, v2, v3 clamp ; encoding: [0x05,0x80,0xee,0xd1,0x01,0x05,0x0e,0x… [all …]
|
/external/libhevc/common/arm64/ |
D | ihevc_intra_pred_luma_planar.s | 146 dup v5.8b,w8 //row + 1 148 …mov v7.8b, v5.8b //mov #1 to d7 to used for inc for row+1 and dec for nt-1-… 184 umlal v27.8h, v5.8b, v0.8b //(1)(row+1) * src[nt-1] 196 add v5.8b, v5.8b , v7.8b //(1) 201 umlal v30.8h, v5.8b, v0.8b //(2) 211 add v5.8b, v5.8b , v7.8b //(2) 215 umlal v28.8h, v5.8b, v0.8b //(3) 228 add v5.8b, v5.8b , v7.8b //(3) 232 umlal v25.8h, v5.8b, v0.8b //(4) 245 add v5.8b, v5.8b , v7.8b //(4) [all …]
|
D | ihevc_sao_edge_offset_class1_chroma.s | 159 cmhi v5.16b, v3.16b , v28.16b //vcgtq_u8(pu1_cur_row, pu1_top_row) 164 … SUB v16.16b, v19.16b , v5.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 175 cmhi v5.16b, v3.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_top_row) 183 …SUB v20.16b, v19.16b , v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 186 ADD v5.16b, v0.16b , v16.16b //edge_idx = vaddq_s8(const_2, sign_up) 189 ADD v5.16b, v5.16b , v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down) 194 TBL v5.16b, {v6.16b},v5.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) 201 mov v17.d[0], v5.d[1] 202 UZP1 v27.8b, v5.8b, v17.8b 203 UZP2 v17.8b, v5.8b, v17.8b [all …]
|
D | ihevc_sao_edge_offset_class1.s | 135 cmhi v5.16b, v3.16b , v1.16b //vcgtq_u8(pu1_cur_row, pu1_top_row) 140 … SUB v16.16b, v17.16b , v5.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 148 cmhi v5.16b, v3.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_top_row) 154 …SUB v20.16b, v17.16b , v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) 157 ADD v5.16b, v0.16b , v16.16b //edge_idx = vaddq_s8(const_2, sign_up) 160 ADD v5.16b, v5.16b , v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down) 164 TBL v5.16b, {v6.16b},v5.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) 173 TBL v5.16b, {v7.16b},v5.16b //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx)) 179 …SADDW v20.8h, v20.8h , v5.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0]… 190 …SADDW2 v1.8h, v1.8h , v5.16b //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1]… [all …]
|
/external/clang/test/Sema/ |
D | vector-assign.c | 13 v4ss v5; in test1() local 18 …v1 = v5; // expected-warning {{incompatible vector types assigning to 'v2s' (vector of 2 'int' val… in test1() 23 …v2 = v5; // expected-warning {{incompatible vector types assigning to 'v2u' (vector of 2 'unsigned… in test1() 28 …v3 = v5; // expected-error {{assigning to 'v1s' (vector of 1 'int' value) from incompatible type '… in test1() 33 …v4 = v5; // expected-warning {{incompatible vector types assigning to 'v2f' (vector of 2 'float' v… in test1() 35 …v5 = v1; // expected-warning {{incompatible vector types assigning to 'v4ss' (vector of 4 'short' … in test1() 36 …v5 = v2; // expected-warning {{incompatible vector types assigning to 'v4ss' (vector of 4 'short' … in test1() 37 …v5 = v3; // expected-error {{assigning to 'v4ss' (vector of 4 'short' values) from incompatible ty… in test1() 38 …v5 = v4; // expected-warning {{incompatible vector types assigning to 'v4ss' (vector of 4 'short' … in test1()
|
/external/libavc/common/armv8/ |
D | ih264_inter_pred_filters_luma_horz_av8.s | 133 ld1 {v5.8b, v6.8b, v7.8b}, [x0], x2 //// Load row1 136 ext v28.8b, v5.8b , v6.8b, #5 ////extract a[5] (column1,row1) 139 uaddl v14.8h, v28.8b, v5.8b //// a0 + a5 (column1,row1) 144 ext v28.8b, v5.8b , v6.8b, #2 ////extract a[2] (column1,row1) 152 ext v28.8b, v5.8b , v6.8b, #3 ////extract a[3] (column1,row1) 160 ext v28.8b, v5.8b , v6.8b, #1 ////extract a[1] (column1,row1) 168 ext v28.8b, v5.8b , v6.8b, #4 ////extract a[4] (column1,row1) 176 ld1 {v5.8b, v6.8b, v7.8b}, [x0], x2 //// Load row3 187 ext v28.8b, v5.8b , v6.8b, #5 ////extract a[5] (column1,row3) 192 uaddl v14.8h, v28.8b, v5.8b //// a0 + a5 (column1,row3) [all …]
|
D | ih264_inter_pred_luma_horz_qpel_av8.s | 141 ld1 {v5.8b, v6.8b, v7.8b}, [x0], x2 //// Load row1 144 ext v28.8b, v5.8b , v6.8b , #5 147 uaddl v14.8h, v28.8b, v5.8b //// a0 + a5 (column1,row1) 152 ext v28.8b, v5.8b , v6.8b , #2 160 ext v28.8b, v5.8b , v6.8b , #3 168 ext v28.8b, v5.8b , v6.8b , #1 176 ext v28.8b, v5.8b , v6.8b , #4 185 ld1 {v5.8b, v6.8b, v7.8b}, [x0], x2 //// Load row3 200 ext v28.8b, v5.8b , v6.8b , #5 208 uaddl v14.8h, v28.8b, v5.8b //// a0 + a5 (column1,row3) [all …]
|
/external/boringssl/linux-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 67 ext v5.16b,v0.16b,v3.16b,#12 72 eor v3.16b,v3.16b,v5.16b 73 ext v5.16b,v0.16b,v5.16b,#12 74 eor v3.16b,v3.16b,v5.16b 75 ext v5.16b,v0.16b,v5.16b,#12 77 eor v3.16b,v3.16b,v5.16b 85 ext v5.16b,v0.16b,v3.16b,#12 89 eor v3.16b,v3.16b,v5.16b 90 ext v5.16b,v0.16b,v5.16b,#12 91 eor v3.16b,v3.16b,v5.16b [all …]
|
/external/boringssl/ios-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 66 ext v5.16b,v0.16b,v3.16b,#12 71 eor v3.16b,v3.16b,v5.16b 72 ext v5.16b,v0.16b,v5.16b,#12 73 eor v3.16b,v3.16b,v5.16b 74 ext v5.16b,v0.16b,v5.16b,#12 76 eor v3.16b,v3.16b,v5.16b 84 ext v5.16b,v0.16b,v3.16b,#12 88 eor v3.16b,v3.16b,v5.16b 89 ext v5.16b,v0.16b,v5.16b,#12 90 eor v3.16b,v3.16b,v5.16b [all …]
|
/external/libgav1/libgav1/src/utils/ |
D | bit_mask_set.h | 44 constexpr BitMaskSet(int v1, int v2, int v3, int v4, int v5) in BitMaskSet() argument 45 : mask_((1U << v1) | (1U << v2) | (1U << v3) | (1U << v4) | (1U << v5)) {} in BitMaskSet() 47 constexpr BitMaskSet(int v1, int v2, int v3, int v4, int v5, int v6) in BitMaskSet() argument 48 : mask_((1U << v1) | (1U << v2) | (1U << v3) | (1U << v4) | (1U << v5) | in BitMaskSet() 51 constexpr BitMaskSet(int v1, int v2, int v3, int v4, int v5, int v6, int v7) in BitMaskSet() argument 52 : mask_((1U << v1) | (1U << v2) | (1U << v3) | (1U << v4) | (1U << v5) | in BitMaskSet() 55 constexpr BitMaskSet(int v1, int v2, int v3, int v4, int v5, int v6, int v7, in BitMaskSet() argument 57 : mask_((1U << v1) | (1U << v2) | (1U << v3) | (1U << v4) | (1U << v5) | in BitMaskSet() 60 constexpr BitMaskSet(int v1, int v2, int v3, int v4, int v5, int v6, int v7, in BitMaskSet() argument 62 : mask_((1U << v1) | (1U << v2) | (1U << v3) | (1U << v4) | (1U << v5) | in BitMaskSet()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | mul64-sext.ll | 11 %v5 = shl i64 %a1, 32 12 %v6 = ashr exact i64 %v5, 32 25 %v5 = shl i64 %a1, 32 26 %v6 = ashr exact i64 %v5, 32 39 %v5 = shl i64 %a1, 48 40 %v6 = ashr exact i64 %v5, 48 53 %v5 = ashr i32 %v4, 16 54 %v6 = sext i32 %v5 to i64 66 %v5 = ashr exact i64 %v4, 32 69 %v8 = mul nsw i64 %v7, %v5 [all …]
|
/external/libvpx/libvpx/third_party/googletest/src/include/gtest/ |
D | gtest_pred_impl.h | 307 const T5& v5) { in AssertPred5Helper() argument 308 if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); in AssertPred5Helper() 320 << "\n" << e5 << " evaluates to " << v5; in AssertPred5Helper() 325 #define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ argument 326 GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ 331 #define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ argument 337 #v5, \ 343 v5), on_failure) 346 #define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ argument 347 GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) [all …]
|