/external/valgrind/VEX/priv/ |
D | host_generic_simd128.c | 214 res->w8[ 0] = max8S(argL->w8[ 0], argR->w8[ 0]); in h_generic_calc_Max8Sx16() 215 res->w8[ 1] = max8S(argL->w8[ 1], argR->w8[ 1]); in h_generic_calc_Max8Sx16() 216 res->w8[ 2] = max8S(argL->w8[ 2], argR->w8[ 2]); in h_generic_calc_Max8Sx16() 217 res->w8[ 3] = max8S(argL->w8[ 3], argR->w8[ 3]); in h_generic_calc_Max8Sx16() 218 res->w8[ 4] = max8S(argL->w8[ 4], argR->w8[ 4]); in h_generic_calc_Max8Sx16() 219 res->w8[ 5] = max8S(argL->w8[ 5], argR->w8[ 5]); in h_generic_calc_Max8Sx16() 220 res->w8[ 6] = max8S(argL->w8[ 6], argR->w8[ 6]); in h_generic_calc_Max8Sx16() 221 res->w8[ 7] = max8S(argL->w8[ 7], argR->w8[ 7]); in h_generic_calc_Max8Sx16() 222 res->w8[ 8] = max8S(argL->w8[ 8], argR->w8[ 8]); in h_generic_calc_Max8Sx16() 223 res->w8[ 9] = max8S(argL->w8[ 9], argR->w8[ 9]); in h_generic_calc_Max8Sx16() [all …]
|
/external/libxaac/decoder/armv8/ |
D | ixheaacd_fft32x32_ld2_armv8.s | 50 ADD w8, w3, w5 //xh0_1 = x_2 + x_6 66 ADD w2, w6, w8 //n00 = xh0_0 + xh0_1 68 SUB w4, w6, w8 //n20 = xh0_0 - xh0_1 95 ADD w8, w3, w5 //xh0_1 = x_2 + x_6 111 ADD w2, w6, w8 //n00 = xh0_0 + xh0_1 113 SUB w4, w6, w8 //n20 = xh0_0 - xh0_1 140 ADD w8, w3, w5 //xh0_1 = x_2 + x_6 156 ADD w2, w6, w8 //n00 = xh0_0 + xh0_1 158 SUB w4, w6, w8 //n20 = xh0_0 - xh0_1 185 ADD w8, w3, w5 //xh0_1 = x_2 + x_6 [all …]
|
D | ixheaacd_postradixcompute4.s | 47 LDP w7, w8, [x1], #8 // x_2 :x_3 60 ADD w11, w8, w12 // xh1_1 = x_3 + x_7 61 SUB w8, w8, w12 // xl1_1 = x_3 - x_7 69 ADD w11, w5, w8 // n10 = xl0_0 + xl1_1 70 SUB w5, w5, w8 // n30 = xl0_0 - xl1_1 72 ADD w8, w6, w7 // n31 = xl1_0 + xl0_1 87 STR w8, [x0], #0 // y3[h2 + 1] = n31, x7 -> y0[h2+2] 92 LDP w7, w8, [x4], #8 // x_a :x_b 108 ADD w11, w8, w12 109 SUB w8, w8, w12 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | sdivpow2.ll | 6 ; CHECK: add w8, w0, #7 8 ; CHECK: csel w8, w8, w0, lt 9 ; CHECK: asr w0, w8, #3 16 ; CHECK: add w8, w0, #7 18 ; CHECK: csel w8, w8, w0, lt 19 ; CHECK: neg w0, w8, asr #3 26 ; CHECK: add w8, w0, #31 28 ; CHECK: csel w8, w8, w0, lt 29 ; CHECK: asr w0, w8, #5
|
D | fast-isel-folded-shift.ll | 7 ; CHECK: and [[REG:w[0-9]+]], w0, w8 16 ; CHECK: and [[REG:w[0-9]+]], w0, w8 25 ; CHECK: and w0, w0, w8 42 ; CHECK: orr [[REG:w[0-9]+]], w0, w8 51 ; CHECK: orr [[REG:w[0-9]+]], w0, w8 60 ; CHECK: orr w0, w0, w8 77 ; CHECK: eor [[REG:w[0-9]+]], w0, w8 86 ; CHECK: eor [[REG:w[0-9]+]], w0, w8 95 ; CHECK: eor w0, w0, w8 112 ; CHECK: add w0, w0, w8
|
D | fast-isel-branch-cond-split.ll | 43 ; CHECK-NEXT: cset w8, eq 46 ; CHECK-NEXT: orr w8, w8, w9 47 ; CHECK-NEXT: tbnz w8, #0, 65 ; CHECK-NEXT: cset w8, ne 68 ; CHECK-NEXT: and w8, w8, w9 69 ; CHECK-NEXT: tbz w8, #0,
|
/external/llvm/test/MC/AArch64/ |
D | arm64-leaf-compact-unwind.s | 157 ldr w8, [x8] 174 sub w8, w8, w9 175 sub w8, w8, w7, lsl #1 176 sub w8, w8, w6, lsl #1 177 sub w8, w8, w5, lsl #1 178 sub w8, w8, w4, lsl #1 179 sub w8, w8, w3, lsl #1 180 sub w8, w8, w2, lsl #1 181 sub w0, w8, w1, lsl #1
|
D | arm64-aliases.s | 21 mov wsp, w8 22 ; CHECK: mov wsp, w8 64 cmn w8, w9, asr #3 73 ; CHECK: cmn w8, w9, asr #3 ; encoding: [0x1f,0x0d,0x89,0x2b] 86 cmp w8, w9, asr #3 91 cmp x8, w8, uxtw 92 cmp w9, w8, uxtw 99 ; CHECK: cmp w8, w9, asr #3 ; encoding: [0x1f,0x0d,0x89,0x6b] 104 ; CHECK: cmp x8, w8, uxtw ; encoding: [0x1f,0x41,0x28,0xeb] 105 ; CHECK: cmp w9, w8, uxtw ; encoding: [0x3f,0x41,0x28,0x6b]
|
D | arm64-arithmetic-encoding.s | 346 cmp wsp, w8, uxtw 347 subs wzr, wsp, w8, uxtw 348 cmp sp, w8, uxtw 349 subs xzr, sp, w8, uxtw 353 ; CHECK: cmp wsp, w8 ; encoding: [0xff,0x43,0x28,0x6b] 354 ; CHECK: cmp wsp, w8 ; encoding: [0xff,0x43,0x28,0x6b] 355 ; CHECK: cmp sp, w8, uxtw ; encoding: [0xff,0x43,0x28,0xeb] 356 ; CHECK: cmp sp, w8, uxtw ; encoding: [0xff,0x43,0x28,0xeb] 358 sub wsp, w9, w8, uxtw 359 sub w1, wsp, w8, uxtw [all …]
|
/external/libhevc/common/arm64/ |
D | ihevc_sao_edge_offset_class3.s | 89 LDR w8,[sp,#64] //Loads ht 253 csel w8,w20,w8,EQ 256 mov v1.b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 260 LDRB w8,[x5,#1] //pu1_avail[1] 261 mov v1.b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 264 LDRB w8,[x5,#2] //pu1_avail[2] 290 LDRB w8,[x7] //load the value and increment by src_strd 293 STRB w8,[x5,#1]! //store it in the stack pointer 308 LDRB w8,[x8] 311 mov v18.b[15], w8 //I vsetq_lane_u8 [all …]
|
D | ihevc_sao_edge_offset_class2_chroma.s | 100 mov w8, w25 //Loads ht 349 csel w8,w20,w8,EQ 353 mov v1.b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 356 mov v1.b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 359 LDRB w8,[x5,#1] //pu1_avail[1] 360 mov v1.b[14], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 361 mov v1.b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 394 LDRH w8,[x7] //load the value and increment by src_strd 397 STRH w8,[x5],#2 //store it in the stack pointer 445 …mov v17.b[0], w8 //I sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] … [all …]
|
D | ihevc_sao_edge_offset_class2.s | 77 LDR w8,[sp,#16] //Loads ht 244 csel w8,w20,w8,EQ 248 …mov v1.b[0], w8 //au1_mask = vsetq_lane_s8((-1||pu1_avail[0]), au1_mask, 0) 251 LDRB w8,[x5,#1] //pu1_avail[1] 252 mov v1.b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 283 LDRB w8,[x7] //load the value and increment by src_strd 285 STRB w8,[x5,#1]! //store it in the stack pointer 367 LDRB w8,[x11,#16] //III pu1_src_cpy[src_strd + 16] 376 …mov v18.b[0], w8 //III pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_str… 561 csel w8,w20,w8,EQ [all …]
|
D | ihevc_sao_edge_offset_class3_chroma.s | 99 mov w8, w25 //Loads ht 339 csel w8,w20,w8,EQ 343 mov v1.b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 347 mov v1.b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 350 LDRB w8,[x5,#1] //pu1_avail[1] 351 mov v1.b[14], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 352 mov v1.b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 385 LDRH w8,[x7] //load the value and increment by src_strd 388 STRH w8,[x5],#2 //store it in the stack pointer 412 LDRB w8,[x0,#14] //I pu1_src_cpy[14] [all …]
|
/external/libavc/common/armv8/ |
D | ih264_intra_pred_luma_16x16_av8.s | 449 ldrb w8, [x7], #-1 452 sub w12, w8, w9 453 ldrb w8, [x7], #-1 456 sub w8, w8, w9 458 add w12, w12, w8, lsl #1 460 ldrb w8, [x7], #-1 463 sub w8, w8, w9 465 add w8, w8, w8, lsl #1 467 add w12, w12, w8 473 ldrb w8, [x7], #-1 [all …]
|
D | ih264_padding_neon_av8.s | 185 ldrb w8, [x0] 189 dup v0.16b, w8 200 ldrb w8, [x0] 205 dup v0.16b, w8 222 ldrb w8, [x0] 226 dup v0.16b, w8 240 ldrb w8, [x0] 243 dup v0.16b, w8 343 ldrh w8, [x0] 347 dup v0.8h, w8 [all …]
|
/external/llvm/test/MC/Mips/msa/ |
D | test_3rf.s | 8 # CHECK: fceq.d $w0, $w8, $w16 # encoding: [0x78,0xb0,0x40,0x1a] 11 # CHECK: fclt.w $w28, $w8, $w8 # encoding: [0x79,0x08,0x47,0x1a] 22 # CHECK: fcult.d $w27, $w8, $w17 # encoding: [0x79,0x71,0x46,0xda] 23 # CHECK: fcun.w $w4, $w20, $w8 # encoding: [0x78,0x48,0xa1,0x1a] 29 # CHECK: fexdo.h $w8, $w0, $w16 # encoding: [0x7a,0x10,0x02,0x1b] 36 # CHECK: fmax.d $w26, $w18, $w8 # encoding: [0x7b,0xa8,0x96,0x9b] 44 # CHECK: fmsub.d $w8, $w18, $w16 # encoding: [0x79,0x70,0x92,0x1b] 75 # CHECK: maddr_q.h $w8, $w18, $w9 # encoding: [0x7b,0x49,0x92,0x1c] 91 fceq.d $w0, $w8, $w16 94 fclt.w $w28, $w8, $w8 [all …]
|
D | test_elm.s | 3 copy_s.b $13, $w8[2] # CHECK: copy_s.b $13, $w8[2] # encoding: [0x78,0x82,0x43,0x59] 9 sldi.h $w8, $w17[0] # CHECK: sldi.h $w8, $w17[0] # encoding: [0x78,0x20,0x8a,0x19]
|
/external/capstone/suite/MC/Mips/ |
D | test_3rf.s.cs | 7 0x78,0xb0,0x40,0x1a = fceq.d $w0, $w8, $w16 10 0x79,0x08,0x47,0x1a = fclt.w $w28, $w8, $w8 21 0x79,0x71,0x46,0xda = fcult.d $w27, $w8, $w17 22 0x78,0x48,0xa1,0x1a = fcun.w $w4, $w20, $w8 28 0x7a,0x10,0x02,0x1b = fexdo.h $w8, $w0, $w16 35 0x7b,0xa8,0x96,0x9b = fmax.d $w26, $w18, $w8 43 0x79,0x70,0x92,0x1b = fmsub.d $w8, $w18, $w16 74 0x7b,0x49,0x92,0x1c = maddr_q.h $w8, $w18, $w9
|
D | test_3r.s.cs | 80 0x79,0x50,0x45,0xcf = clt_s.w $w23, $w8, $w16 104 0x79,0xd0,0x46,0x13 = dpadd_u.w $w24, $w8, $w16 147 0x7b,0x7f,0x42,0x0e = max_a.d $w8, $w8, $w31 161 0x7a,0x28,0xae,0xce = min_s.h $w27, $w21, $w8 163 0x7a,0x75,0x41,0x8e = min_s.d $w6, $w8, $w21 164 0x7a,0x88,0xd5,0x8e = min_u.b $w22, $w26, $w8 166 0x7a,0xce,0xa2,0x0e = min_u.w $w8, $w20, $w14 173 0x7b,0xa7,0x46,0x12 = mod_u.h $w24, $w8, $w7 189 0x79,0xa8,0x2e,0x94 = pckod.h $w26, $w5, $w8 194 0x78,0x49,0x45,0x14 = sld.w $w20, $w8[$9] [all …]
|
/external/boringssl/ios-aarch64/crypto/fipsmodule/ |
D | sha256-armv8.S | 141 eor w8,w22,w22,ror#14 147 eor w16,w16,w8,ror#11 // Sigma1(e) 148 ror w8,w26,#2 155 eor w17,w8,w17,ror#13 // Sigma0(a) 162 ldp w7,w8,[x1],#2*4 209 rev w8,w8 // 5 218 add w22,w22,w8 // h+=X[i] 411 str w8,[sp,#4] 414 eor w8,w27,w27,ror#14 420 eor w16,w16,w8,ror#11 // Sigma1(e) [all …]
|
D | sha1-armv8.S | 105 add w24,w24,w8 // future e+=X[i] 261 eor w6,w6,w8 289 eor w8,w8,w10 293 eor w8,w8,w16 297 eor w8,w8,w5 301 ror w8,w8,#31 311 add w23,w23,w8 // future e+=X[i] 334 eor w11,w11,w8 390 eor w16,w16,w8 458 eor w6,w6,w8 [all …]
|
/external/boringssl/linux-aarch64/crypto/fipsmodule/ |
D | sha256-armv8.S | 142 eor w8,w22,w22,ror#14 148 eor w16,w16,w8,ror#11 // Sigma1(e) 149 ror w8,w26,#2 156 eor w17,w8,w17,ror#13 // Sigma0(a) 163 ldp w7,w8,[x1],#2*4 210 rev w8,w8 // 5 219 add w22,w22,w8 // h+=X[i] 412 str w8,[sp,#4] 415 eor w8,w27,w27,ror#14 421 eor w16,w16,w8,ror#11 // Sigma1(e) [all …]
|
D | sha1-armv8.S | 106 add w24,w24,w8 // future e+=X[i] 262 eor w6,w6,w8 290 eor w8,w8,w10 294 eor w8,w8,w16 298 eor w8,w8,w5 302 ror w8,w8,#31 312 add w23,w23,w8 // future e+=X[i] 335 eor w11,w11,w8 391 eor w16,w16,w8 459 eor w6,w6,w8 [all …]
|
/external/libmpeg2/common/armv8/ |
D | impeg2_format_conv.s | 143 ldr w8, [sp, #96] //// Load u2_dest_stride_y from stack 144 uxtw x8, w8 186 ldr w8, [sp, #104] //// Load u2_dest_stride_uv from stack 187 sxtw x8, w8 316 ldr w8, [sp, #96] //// Load u2_dest_stride_y from stack 317 uxtw x8, w8 359 ldr w8, [sp, #104] //// Load u2_dest_stride_uv from stack 360 sxtw x8, w8
|
/external/python/cpython2/Modules/ |
D | yuvconvert.c | 85 int w8 = width / 8; /* and so is one eighth */ in yuv_sv411_to_cl422dc_sixteenthsize() local 89 struct yuv422 *out_odd = out_even + w8; in yuv_sv411_to_cl422dc_sixteenthsize() 94 for (j = w8; j--; ) { in yuv_sv411_to_cl422dc_sixteenthsize() 115 out_even += w8; in yuv_sv411_to_cl422dc_sixteenthsize() 116 out_odd += w8; in yuv_sv411_to_cl422dc_sixteenthsize()
|