/external/libhevc/decoder/arm/ |
D | ihevcd_fmt_conv_420sp_to_rgba8888.s | 118 VMOV.16 D0[0],R10 @//C1 121 VMOV.16 D0[1],R10 @//C2 124 VMOV.16 D0[2],R10 @//C3 127 VMOV.16 D0[3],R10 @//C4 176 @VMOV.I8 Q1,#128 230 VMOV.I8 D17,#0 240 VMOV.I8 D23,#0 281 VMOV.I8 D17,#0 291 VMOV.I8 D23,#0 313 @VMOV.I8 Q1,#128 [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_sao_edge_offset_class0_chroma.s | 93 VMOV.I8 Q1,#2 @const_2 = vdupq_n_s8(2) 97 VMOV.I16 Q2,#0 @const_min_clip = vdupq_n_s16(0) 101 VMOV.I16 Q3,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 107 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 137 VMOV.8 D8[0],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 0) 138 VMOV.8 D8[1],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 1) 143 VMOV.16 D8[0],r12 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 149 VMOV.8 D9[6],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 14) 150 VMOV.8 D9[7],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 164 …VMOV.16 D15[3],r11 @vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 1… [all …]
|
D | ihevc_sao_edge_offset_class3_chroma.s | 285 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 286 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 287 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 295 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 311 VMOV.8 D8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 315 VMOV.8 D8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 319 VMOV.8 D9[6],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 320 VMOV.8 D9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 330 VMOV.I8 Q9,#0 361 VMOV.I8 Q9,#0 @I [all …]
|
D | ihevc_sao_edge_offset_class2_chroma.s | 273 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 277 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 281 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 303 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 317 VMOV.8 D8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 320 VMOV.8 D8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 324 VMOV.8 D9[6],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 325 VMOV.8 D9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 374 VMOV.I8 Q9,#0 378 …VMOV.16 D18[0],r5 @I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd +… [all …]
|
D | ihevc_sao_edge_offset_class0.s | 88 VMOV.I8 Q1,#2 @const_2 = vdupq_n_s8(2) 92 VMOV.I16 Q2,#0 @const_min_clip = vdupq_n_s16(0) 96 VMOV.I16 Q3,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 102 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 132 VMOV.8 D8[0],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 0) 137 VMOV.8 D8[0],r12 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 143 VMOV.8 D9[7],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 157 …VMOV.8 D15[7],r11 @vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15) 171 …VMOV.8 D29[7],r11 @II Iteration vsetq_lane_u8(pu1_src_left[ht - row], pu1_cu… 183 …VMOV.8 D14[0],r11 @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_… [all …]
|
D | ihevc_sao_edge_offset_class2.s | 190 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 194 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 198 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 212 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 230 … VMOV.8 d8[0],r8 @au1_mask = vsetq_lane_s8((-1||pu1_avail[0]), au1_mask, 0) 234 VMOV.8 d9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 274 VMOV.I8 Q9,#0 284 …VMOV.8 D18[0],r5 @I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd +… 299 …VMOV.8 D14[0],r4 @I sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] -… 326 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row [all …]
|
D | ihevc_sao_edge_offset_class3.s | 202 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 206 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 210 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 220 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 239 VMOV.8 d8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 244 VMOV.8 d9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 281 VMOV.I8 Q9,#0 298 VMOV.8 D19[7],r8 @I vsetq_lane_u8 314 …VMOV.8 D15[7],r8 @I sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_… 341 VMOV Q6,Q8 [all …]
|
D | ihevc_sao_edge_offset_class1.s | 115 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 116 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 117 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 199 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row 264 VMOV Q5,Q9 @pu1_cur_row = pu1_next_row 339 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row
|
D | ihevc_sao_edge_offset_class1_chroma.s | 118 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 119 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 120 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 206 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row 281 VMOV Q5,Q9 @pu1_cur_row = pu1_next_row 369 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row
|
D | ihevc_sao_band_offset_chroma.s | 150 VMOV.I8 D30,#16 @vdup_n_u8(16) 233 VMOV.I8 D29,#16 @vdup_n_u8(16)
|
D | ihevc_sao_band_offset_luma.s | 139 VMOV.I8 D29,#16 @vdup_n_u8(16)
|
/external/arm-neon-tests/ |
D | ref-rvct-neon.txt | 240 VMOV/VMOVQ output: 241 VMOV/VMOVQ:0:result_int8x8 [] = { fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffff… 242 VMOV/VMOVQ:1:result_int16x4 [] = { fffffff0, fffffff0, fffffff0, fffffff0, } 243 VMOV/VMOVQ:2:result_int32x2 [] = { fffffff0, fffffff0, } 244 VMOV/VMOVQ:3:result_int64x1 [] = { fffffffffffffff0, } 245 VMOV/VMOVQ:4:result_uint8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } 246 VMOV/VMOVQ:5:result_uint16x4 [] = { fff0, fff0, fff0, fff0, } 247 VMOV/VMOVQ:6:result_uint32x2 [] = { fffffff0, fffffff0, } 248 VMOV/VMOVQ:7:result_uint64x1 [] = { fffffffffffffff0, } 249 VMOV/VMOVQ:8:result_poly8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } [all …]
|
D | ref-rvct-neon-nofp16.txt | 222 VMOV/VMOVQ output: 223 VMOV/VMOVQ:0:result_int8x8 [] = { fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffff… 224 VMOV/VMOVQ:1:result_int16x4 [] = { fffffff0, fffffff0, fffffff0, fffffff0, } 225 VMOV/VMOVQ:2:result_int32x2 [] = { fffffff0, fffffff0, } 226 VMOV/VMOVQ:3:result_int64x1 [] = { fffffffffffffff0, } 227 VMOV/VMOVQ:4:result_uint8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } 228 VMOV/VMOVQ:5:result_uint16x4 [] = { fff0, fff0, fff0, fff0, } 229 VMOV/VMOVQ:6:result_uint32x2 [] = { fffffff0, fffffff0, } 230 VMOV/VMOVQ:7:result_uint64x1 [] = { fffffffffffffff0, } 231 VMOV/VMOVQ:8:result_poly8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } [all …]
|
D | ref-rvct-all.txt | 240 VMOV/VMOVQ output: 241 VMOV/VMOVQ:0:result_int8x8 [] = { fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffff… 242 VMOV/VMOVQ:1:result_int16x4 [] = { fffffff0, fffffff0, fffffff0, fffffff0, } 243 VMOV/VMOVQ:2:result_int32x2 [] = { fffffff0, fffffff0, } 244 VMOV/VMOVQ:3:result_int64x1 [] = { fffffffffffffff0, } 245 VMOV/VMOVQ:4:result_uint8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } 246 VMOV/VMOVQ:5:result_uint16x4 [] = { fff0, fff0, fff0, fff0, } 247 VMOV/VMOVQ:6:result_uint32x2 [] = { fffffff0, fffffff0, } 248 VMOV/VMOVQ:7:result_uint64x1 [] = { fffffffffffffff0, } 249 VMOV/VMOVQ:8:result_poly8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | fullfp16-neon-arm.txt | 268 # Existing VMOV(immediate, Advanced SIMD) instructions within the encoding 272 # 2 -- VMOV op
|
D | fullfp16-neon-thumb.txt | 268 # Existing VMOV(immediate, Advanced SIMD) instructions within the encoding 274 # 2 -- VMOV op
|
D | invalid-armv7.txt | 391 # VMOV cmode=0b1111 op=1 is UNDEFINED 396 # VMOV cmode=0b1111 op=1 is UNDEFINED
|
/external/llvm/test/CodeGen/ARM/ |
D | domain-conv-vmovs.ll | 106 ; + Convince LLVM to emit a VMOV to S0
|
/external/libopus/celt/arm/ |
D | celt_pitch_xcorr_arm_gnu.s | 84 @ Unlike VMOV, VAND is a data processsing instruction (and doesn't get 176 VMOV.S32 q15, #1
|
/external/pcre/dist2/src/sljit/ |
D | sljitNativeARM_T2_32.c | 177 #define VMOV 0xee000a10 macro 1644 return push_inst32(compiler, VMOV | (1 << 20) | RT4(dst) | DN4(TMP_FREG1)); in sljit_emit_fop1_conv_sw_from_f64() 1657 FAIL_IF(push_inst32(compiler, VMOV | RT4(src) | DN4(TMP_FREG1))); in sljit_emit_fop1_conv_f64_from_sw() 1664 FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | DN4(TMP_FREG1))); in sljit_emit_fop1_conv_f64_from_sw()
|
D | sljitNativeARM_32.c | 110 #define VMOV 0xee000a10 macro 2122 return push_inst(compiler, VMOV | (1 << 20) | RD(dst) | (TMP_FREG1 << 16)); in sljit_emit_fop1_conv_sw_from_f64() 2135 FAIL_IF(push_inst(compiler, VMOV | RD(src) | (TMP_FREG1 << 16))); in sljit_emit_fop1_conv_f64_from_sw() 2142 FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (TMP_FREG1 << 16))); in sljit_emit_fop1_conv_f64_from_sw()
|
/external/llvm/lib/Target/ARM/ |
D | ARM.td | 119 "Has slow VGETLNi32 - prefer VMOV">; 123 "Has slow VDUP32 - prefer VMOV">;
|
D | ARMScheduleSwift.td | 615 (instregex "VMOVv", "VMOV(S|D)$", "VMOV(S|D)cc",
|
/external/valgrind/none/tests/arm/ |
D | vfp.stdout.exp | 51 ---- VMOV (ARM core register to scalar) ---- 76 ---- VMOV (scalar to ARM core register) ---- 590 ---- VMOV (register) ---- 1032 ----- VMOV (immediate) ----- 1043 ----- VMOV (ARM core register and single register) ----- 1068 ----- VMOV (ARM two core registers and two single registers) ----- 1161 ----- VMOV (ARM two core registers and double register) -----
|
/external/llvm/test/MC/ARM/ |
D | simple-fp-encoding.s | 326 @ VMOV w/ optional data type suffix.
|