/external/llvm/test/MC/AArch64/ |
D | neon-simd-misc.s | 11 rev64 v6.4s, v8.4s 117 suqadd v6.4s, v8.4s 118 suqadd v6.2d, v8.2d 137 usqadd v6.4s, v8.4s 138 usqadd v6.2d, v8.2d 157 sqabs v6.4s, v8.4s 158 sqabs v6.2d, v8.2d 177 sqneg v6.4s, v8.4s 178 sqneg v6.2d, v8.2d 197 abs v6.4s, v8.4s [all …]
|
D | arm64-simd-ldst.s | 11 ld1.8b {v4, v5, v6}, [x3] 102 ; CHECK: ld1.8b { v4, v5, v6 }, [x3] ; encoding: [0x64,0x60,0x40,0x0c] 223 ld3.8b {v4, v5, v6}, [x19] 224 ld3.16b {v4, v5, v6}, [x19] 225 ld3.4h {v4, v5, v6}, [x19] 226 ld3.8h {v4, v5, v6}, [x19] 227 ld3.2s {v4, v5, v6}, [x19] 228 ld3.4s {v4, v5, v6}, [x19] 229 ld3.2d {v4, v5, v6}, [x19] 236 ld3.4s {v4, v5, v6}, [x29] [all …]
|
D | fullfp16-neon-neg.s | 182 fminp v3.8h, v5.8h, v6.8h 190 fminnmp v3.8h, v5.8h, v6.8h 296 fabs v6.8h, v8.8h 300 fneg v6.8h, v8.8h 304 frintn v6.8h, v8.8h 308 frinta v6.8h, v8.8h 312 frintp v6.8h, v8.8h 316 frintm v6.8h, v8.8h 320 frintx v6.8h, v8.8h 324 frintz v6.8h, v8.8h [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/ |
D | neon-simd-misc.s | 11 rev64 v6.4s, v8.4s 117 suqadd v6.4s, v8.4s 118 suqadd v6.2d, v8.2d 137 usqadd v6.4s, v8.4s 138 usqadd v6.2d, v8.2d 157 sqabs v6.4s, v8.4s 158 sqabs v6.2d, v8.2d 177 sqneg v6.4s, v8.4s 178 sqneg v6.2d, v8.2d 197 abs v6.4s, v8.4s [all …]
|
D | arm64-simd-ldst.s | 11 ld1.8b {v4, v5, v6}, [x3] 102 ; CHECK: ld1.8b { v4, v5, v6 }, [x3] ; encoding: [0x64,0x60,0x40,0x0c] 223 ld3.8b {v4, v5, v6}, [x19] 224 ld3.16b {v4, v5, v6}, [x19] 225 ld3.4h {v4, v5, v6}, [x19] 226 ld3.8h {v4, v5, v6}, [x19] 227 ld3.2s {v4, v5, v6}, [x19] 228 ld3.4s {v4, v5, v6}, [x19] 229 ld3.2d {v4, v5, v6}, [x19] 236 ld3.4s {v4, v5, v6}, [x29] [all …]
|
D | fullfp16-neon-neg.s | 182 fminp v3.8h, v5.8h, v6.8h 190 fminnmp v3.8h, v5.8h, v6.8h 296 fabs v6.8h, v8.8h 300 fneg v6.8h, v8.8h 304 frintn v6.8h, v8.8h 308 frinta v6.8h, v8.8h 312 frintp v6.8h, v8.8h 316 frintm v6.8h, v8.8h 320 frintx v6.8h, v8.8h 324 frintz v6.8h, v8.8h [all …]
|
/external/capstone/suite/MC/AArch64/ |
D | neon-simd-misc.s.cs | 4 0x06,0x09,0xa0,0x4e = rev64 v6.4s, v8.4s 40 0x06,0x39,0xa0,0x4e = suqadd v6.4s, v8.4s 41 0x06,0x39,0xe0,0x4e = suqadd v6.2d, v8.2d 47 0x06,0x39,0xa0,0x6e = usqadd v6.4s, v8.4s 48 0x06,0x39,0xe0,0x6e = usqadd v6.2d, v8.2d 54 0x06,0x79,0xa0,0x4e = sqabs v6.4s, v8.4s 55 0x06,0x79,0xe0,0x4e = sqabs v6.2d, v8.2d 61 0x06,0x79,0xa0,0x6e = sqneg v6.4s, v8.4s 62 0x06,0x79,0xe0,0x6e = sqneg v6.2d, v8.2d 68 0x06,0xb9,0xa0,0x4e = abs v6.4s, v8.4s [all …]
|
/external/libavc/common/armv8/ |
D | ih264_weighted_pred_av8.s | 141 ld1 {v6.s}[0], [x0], x2 //load row 3 in source 142 ld1 {v6.s}[1], [x0], x2 //load row 4 in source 145 uxtl v6.8h, v6.8b //converting rows 3,4 to 16-bit 148 mul v6.8h, v6.8h , v2.h[0] //weight mult. for rows 3,4 152 srshl v6.8h, v6.8h , v0.8h //rounds off the weighted samples from rows 3,4 155 saddw v6.8h, v6.8h , v3.8b //adding offset for rows 3,4 158 sqxtun v6.8b, v6.8h //saturating rows 3,4 to unsigned 8-bit 162 st1 {v6.s}[0], [x1], x3 //store row 3 in destination 163 st1 {v6.s}[1], [x1], x3 //store row 4 in destination 172 ld1 {v6.8b}, [x0], x2 //load row 2 in source [all …]
|
D | ih264_inter_pred_filters_luma_horz_av8.s | 133 ld1 {v5.8b, v6.8b, v7.8b}, [x0], x2 //// Load row1 136 ext v28.8b, v5.8b , v6.8b, #5 ////extract a[5] (column1,row1) 138 ext v27.8b, v6.8b , v7.8b, #5 ////extract a[5] (column2,row1) 141 uaddl v16.8h, v27.8b, v6.8b //// a0 + a5 (column2,row1) 144 ext v28.8b, v5.8b , v6.8b, #2 ////extract a[2] (column1,row1) 146 ext v27.8b, v6.8b , v7.8b, #2 ////extract a[2] (column2,row1) 152 ext v28.8b, v5.8b , v6.8b, #3 ////extract a[3] (column1,row1) 154 ext v27.8b, v6.8b , v7.8b, #3 ////extract a[3] (column2,row1) 160 ext v28.8b, v5.8b , v6.8b, #1 ////extract a[1] (column1,row1) 162 ext v27.8b, v6.8b , v7.8b, #1 ////extract a[1] (column2,row1) [all …]
|
D | ih264_inter_pred_luma_horz_qpel_av8.s | 141 ld1 {v5.8b, v6.8b, v7.8b}, [x0], x2 //// Load row1 144 ext v28.8b, v5.8b , v6.8b , #5 146 ext v27.8b, v6.8b , v7.8b , #5 149 uaddl v16.8h, v27.8b, v6.8b //// a0 + a5 (column2,row1) 152 ext v28.8b, v5.8b , v6.8b , #2 154 ext v27.8b, v6.8b , v7.8b , #2 160 ext v28.8b, v5.8b , v6.8b , #3 162 ext v27.8b, v6.8b , v7.8b , #3 168 ext v28.8b, v5.8b , v6.8b , #1 170 ext v27.8b, v6.8b , v7.8b , #1 [all …]
|
D | ih264_padding_neon_av8.s | 198 dup v6.16b, w11 202 st1 {v6.16b}, [x4], x1 // 16 bytes store 214 dup v6.16b, w11 217 st1 {v6.16b}, [x4], x1 // 16 bytes store 238 dup v6.16b, w11 242 st1 {v6.16b}, [x4], #16 // 16 bytes store 246 st1 {v6.16b}, [x4], x6 // 16 bytes store 258 dup v6.16b, w11 261 st1 {v6.16b}, [x4], #16 // 16 bytes store 262 st1 {v6.16b}, [x4], x6 // 16 bytes store [all …]
|
D | ih264_intra_pred_luma_16x16_av8.s | 440 ld1 {v6.2s, v7.2s}, [x7] 442 uxtl v16.8h, v6.8b 500 dup v6.8h, w12 502 shl v26.8h, v6.8h, #3 505 add v28.8h, v30.8h , v6.8h 510 add v26.8h, v26.8h , v6.8h 511 add v28.8h, v28.8h , v6.8h 515 add v26.8h, v26.8h , v6.8h 516 add v28.8h, v28.8h , v6.8h 520 add v26.8h, v26.8h , v6.8h [all …]
|
D | ih264_deblk_chroma_av8.s | 94 ld2 {v6.8b, v7.8b}, [x0], x1 //D6 = p1u , D7 = p1v 101 uaddl v8.8h, v6.8b, v0.8b // 108 mov v6.d[1], v7.d[0] 109 uabd v26.16b, v6.16b , v4.16b //Q13 = ABS(p1 - p0) 119 umlal v14.8h, v6.8b, v31.8b // 203 ld4 {v4.h, v5.h, v6.h, v7.h}[0], [x0], x1 204 ld4 {v4.h, v5.h, v6.h, v7.h}[1], [x0], x1 205 ld4 {v4.h, v5.h, v6.h, v7.h}[2], [x0], x1 206 ld4 {v4.h, v5.h, v6.h, v7.h}[3], [x0], x1 212 mov v10.16b, v6.16b [all …]
|
/external/llvm/test/MC/AMDGPU/ |
D | vop3.s | 13 v_cmp_lt_f32_e64 s[2:3], v4, -v6 19 v_cmp_lt_f32_e64 vcc, v4, v6 27 v_cmp_lt_f32 s[2:3] -v4, v6 31 v_cmp_lt_f32 s[2:3] v4, -v6 35 v_cmp_lt_f32 s[2:3] -v4, -v6 39 v_cmp_lt_f32 s[2:3] |v4|, v6 43 v_cmp_lt_f32 s[2:3] v4, |v6| 47 v_cmp_lt_f32 s[2:3] |v4|, |v6| 51 v_cmp_lt_f32 s[2:3] -|v4|, v6 55 v_cmp_lt_f32 s[2:3] -abs(v4), v6 [all …]
|
D | ds.s | 25 ds_write2_b32 v2, v4, v6 offset0:4 29 ds_write2_b32 v2, v4, v6 offset0:4 offset1:8 33 ds_write2_b32 v2, v4, v6 offset1:8 101 ds_mskor_b32 v2, v4, v6 109 ds_write2_b32 v2, v4, v6 113 ds_write2st64_b32 v2, v4, v6 117 ds_cmpst_b32 v2, v4, v6 121 ds_cmpst_f32 v2, v4, v6 125 ds_min_f32 v2, v4, v6 129 ds_max_f32 v2, v4, v6 [all …]
|
/external/parameter-framework/asio-1.10.6/include/asio/ip/detail/impl/ |
D | endpoint.ipp | 49 data_.v6.sin6_family = ASIO_OS_DEF(AF_INET6); 50 data_.v6.sin6_port = 52 data_.v6.sin6_flowinfo = 0; 53 data_.v6.sin6_addr.s6_addr[0] = 0; data_.v6.sin6_addr.s6_addr[1] = 0; 54 data_.v6.sin6_addr.s6_addr[2] = 0, data_.v6.sin6_addr.s6_addr[3] = 0; 55 data_.v6.sin6_addr.s6_addr[4] = 0, data_.v6.sin6_addr.s6_addr[5] = 0; 56 data_.v6.sin6_addr.s6_addr[6] = 0, data_.v6.sin6_addr.s6_addr[7] = 0; 57 data_.v6.sin6_addr.s6_addr[8] = 0, data_.v6.sin6_addr.s6_addr[9] = 0; 58 data_.v6.sin6_addr.s6_addr[10] = 0, data_.v6.sin6_addr.s6_addr[11] = 0; 59 data_.v6.sin6_addr.s6_addr[12] = 0, data_.v6.sin6_addr.s6_addr[13] = 0; [all …]
|
/external/libhevc/common/arm64/ |
D | ihevc_padding.s | 113 dup v6.16b,w11 141 st1 {v6.16b},[x7],#16 //128/8 = 16 bytes store 142 st1 {v6.16b},[x7],#16 //128/8 = 16 bytes store 143 st1 {v6.16b},[x7],#16 //128/8 = 16 bytes store 144 st1 {v6.16b},[x7],#16 //128/8 = 16 bytes store 145 st1 {v6.16b},[x7] //128/8 = 16 bytes store 231 dup v6.8h,w11 259 st1 {v6.16b},[x7],#16 //128/8 = 16 bytes store 260 st1 {v6.16b},[x7],#16 //128/8 = 16 bytes store 261 st1 {v6.16b},[x7],#16 //128/8 = 16 bytes store [all …]
|
D | ihevc_intra_pred_luma_planar.s | 147 dup v6.8b,w9 //nt - 1 - row 190 umlal v27.8h, v6.8b, v3.8b //(1)(nt-1-row) * src[2nt+1+col] 198 sub v6.8b, v6.8b , v7.8b //(1) 206 umlal v30.8h, v6.8b, v3.8b //(2) 212 sub v6.8b, v6.8b , v7.8b //(2) 221 umlal v28.8h, v6.8b, v3.8b //(3) 229 sub v6.8b, v6.8b , v7.8b //(3) 238 umlal v25.8h, v6.8b, v3.8b //(4) 246 sub v6.8b, v6.8b , v7.8b //(4) 255 umlal v16.8h, v6.8b, v3.8b //(5) [all …]
|
D | ihevc_itrans_recon_8x8.s | 190 ld1 {v6.4h},[x0],#8 192 smull v24.4s, v6.4h, v0.h[1] //// y1 * cos1(part of b0) 194 smull v26.4s, v6.4h, v0.h[3] //// y1 * cos3(part of b1) 196 smull v28.4s, v6.4h, v1.h[1] //// y1 * sin3(part of b2) 198 smull v30.4s, v6.4h, v1.h[3] //// y1 * sin1(part of b3) 210 smull v6.4s, v3.4h, v0.h[2] //// y2 * cos2(part of d0) 240 smlal v6.4s, v11.4h, v1.h[2] //// d0 = y2 * cos2 + y6 * sin2(part of a0 and a1) 250 add v14.4s, v10.4s , v6.4s //// a0 = c0 + d0(part of x0,x7) 251 sub v10.4s, v10.4s , v6.4s //// a3 = c0 - d0(part of x3,x4) 256 sub v6.4s, v14.4s , v24.4s //// a0 - b0(part of x7) [all …]
|
/external/libnetfilter_conntrack/src/conntrack/ |
D | objopt.c | 30 memcpy(&this->src.v6, &other->dst.v6, sizeof(union __nfct_address)); in __autocomplete() 31 memcpy(&this->dst.v6, &other->src.v6, sizeof(union __nfct_address)); in __autocomplete() 63 memcpy(&ct->snat.min_ip.v6, &ct->repl.dst.v6, in setobjopt_undo_snat() 65 memcpy(&ct->snat.max_ip.v6, &ct->snat.min_ip.v6, in setobjopt_undo_snat() 67 memcpy(&ct->repl.dst.v6, &ct->head.orig.src.v6, in setobjopt_undo_snat() 85 memcpy(&ct->dnat.min_ip.v6, &ct->repl.src.v6, in setobjopt_undo_dnat() 87 memcpy(&ct->dnat.max_ip.v6, &ct->dnat.min_ip.v6, in setobjopt_undo_dnat() 89 memcpy(&ct->repl.src.v6, &ct->head.orig.dst.v6, in setobjopt_undo_dnat() 156 if (memcmp(&ct->repl.dst.v6, &ct->head.orig.src.v6, in getobjopt_is_snat() 178 if (memcmp(&ct->repl.src.v6, &ct->head.orig.dst.v6, in getobjopt_is_dnat()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | aggr-licm.ll | 26 %v6 = phi i32 [ 0, %b0 ], [ %v198, %b1 ] 28 %v8 = zext i32 %v6 to i64 31 %v11 = add nuw nsw i32 %v6, 32 38 %v18 = add nuw nsw i32 %v6, 1 43 %v23 = add nsw i32 %v6, 33 50 %v30 = add nsw i32 %v6, 2 55 %v35 = add nsw i32 %v6, 34 62 %v42 = add nsw i32 %v6, 3 67 %v47 = add nsw i32 %v6, 35 74 %v54 = add nsw i32 %v6, 4 [all …]
|
D | mul64-sext.ll | 12 %v6 = ashr exact i64 %v5, 32 13 %v7 = mul nsw i64 %v6, %v4 26 %v6 = ashr exact i64 %v5, 32 27 %v7 = mul nsw i64 %v6, %v4 40 %v6 = ashr exact i64 %v5, 48 41 %v7 = mul nsw i64 %v6, %v4 54 %v6 = sext i32 %v5 to i64 55 %v7 = mul nsw i64 %v3, %v6 67 %v6 = shl i64 %a1, 32 68 %v7 = ashr exact i64 %v6, 32 [all …]
|
/external/libxaac/decoder/armv8/ |
D | ixheaacd_inv_dit_fft_8pt.s | 42 LD1 {v6.s}[0], [x6], x5 44 LD1 {v6.s}[1], [x6], x5 60 SQADD v10.2s, v2.2s, v6.2s //a20_v = vqadd_s32(y1_3,y9_11); 65 SQSUB v5.2s, v2.2s, v6.2s //a3_v = vqsub_s32(y1_3,y9_11); 67 SQSUB v6.2s, v4.2s, v8.2s //a1_v = vqsub_s32(y5_7,y13_15); 75 SQADD v9.2s, v1.2s, v6.2s //x6_14 = vqadd_s32(a0_v,a1_v); 77 SQSUB v10.2s, v1.2s, v6.2s //x2_10 = vqsub_s32(a0_v,a1_v); 83 UZP1 v6.2s, v4.2s, v8.2s //x4_5 = vuzp1_s32(x4_12,x5_13); 90 SQADD v12.2s, v6.2s, v7.2s //real_imag4 = vqadd_s32(x4_5,x13_12); 91 SQSUB v14.2s, v6.2s, v7.2s //a0_1_v = vqsub_s32(x4_5,x13_12); [all …]
|
D | ixheaacd_cos_sin_mod_loop2.s | 87 DUP v6.2s, w9 88 SQSUB v4.2s, v6.2s, v4.2s 105 sMULL v6.2d, v0.2s, v3.2s //add 2nd 106 sshr v6.2d, v6.2d, #16 112 add v12.2d, v8.2d , v6.2d 145 sMULL v6.2d, v0.2s, v3.2s //add 2nd 146 sshr v6.2d, v6.2d, #16 152 add v12.2d, v8.2d , v6.2d 184 sMULL v6.2d, v0.2s, v3.2s //add 2nd 185 sshr v6.2d, v6.2d, #16 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AMDGPU/ |
D | vop3.s | 16 v_cmp_lt_f32_e64 s[2:3], v4, -v6 22 v_cmp_lt_f32_e64 vcc, v4, v6 30 v_cmp_lt_f32 s[2:3] -v4, v6 34 v_cmp_lt_f32 s[2:3] v4, -v6 38 v_cmp_lt_f32 s[2:3] -v4, -v6 42 v_cmp_lt_f32 s[2:3] |v4|, v6 46 v_cmp_lt_f32 s[2:3] v4, |v6| 50 v_cmp_lt_f32 s[2:3] |v4|, |v6| 54 v_cmp_lt_f32 s[2:3] -|v4|, v6 58 v_cmp_lt_f32 s[2:3] -abs(v4), v6 [all …]
|