/external/llvm/test/CodeGen/AArch64/ |
D | addsub-shifted.ll | 29 %shift4 = shl i32 %rhs4, 19 30 %val4 = sub i32 %shift4, %lhs32 94 %shift4 = lshr i32 %rhs32, 19 95 %val4 = sub i32 %shift4, %lhs32 153 %shift4 = ashr i32 %rhs32, 19 154 %val4 = sub i32 %shift4, %lhs32 217 %shift4 = shl i64 %rhs64, 43 218 %tst4 = icmp uge i64 %lhs64, %shift4 272 %shift4 = shl i64 %rhs64, 43 273 %val4 = sub i64 0, %shift4
|
D | bitfield.ll | 98 %shift4 = ashr i64 %val64, 31 99 store volatile i64 %shift4, i64* @var64
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | addsub-shifted.ll | 32 %shift4 = shl i32 %rhs4, 19 33 %val4 = sub i32 %shift4, %lhs32 97 %shift4 = lshr i32 %rhs32, 19 98 %val4 = sub i32 %shift4, %lhs32 156 %shift4 = ashr i32 %rhs32, 19 157 %val4 = sub i32 %shift4, %lhs32 220 %shift4 = shl i64 %rhs64, 43 221 %tst4 = icmp uge i64 %lhs64, %shift4 275 %shift4 = shl i64 %rhs64, 43 276 %val4 = sub i64 0, %shift4
|
D | bitfield.ll | 98 %shift4 = ashr i64 %val64, 31 99 store volatile i64 %shift4, i64* @var64
|
/external/libgav1/libgav1/src/dsp/x86/ |
D | mask_blend_sse4.cc | 509 const __m128i& shift4, uint16_t* dst, const ptrdiff_t dst_stride) { in WriteMaskBlendLine10bpp4x2_SSE4_1() argument 538 RightShiftWithRoundingConst_S32(sub_0, kRoundBitsMaskBlend, shift4); in WriteMaskBlendLine10bpp4x2_SSE4_1() 540 RightShiftWithRoundingConst_S32(sub_1, kRoundBitsMaskBlend, shift4); in WriteMaskBlendLine10bpp4x2_SSE4_1() 555 const __m128i shift4 = _mm_set1_epi32((1 << kRoundBitsMaskBlend) >> 1); in MaskBlend10bpp4x4_SSE4_1() local 562 pred_mask_1, offset, max, shift4, dst, in MaskBlend10bpp4x4_SSE4_1() 573 pred_mask_1, offset, max, shift4, dst, in MaskBlend10bpp4x4_SSE4_1() 599 const __m128i shift4 = _mm_set1_epi32((1 << kRoundBitsMaskBlend) >> 1); in MaskBlend10bpp4xH_SSE4_1() local 608 shift4, dst, dst_stride); in MaskBlend10bpp4xH_SSE4_1() 619 shift4, dst, dst_stride); in MaskBlend10bpp4xH_SSE4_1() 630 shift4, dst, dst_stride); in MaskBlend10bpp4xH_SSE4_1() [all …]
|
/external/llvm/test/Transforms/InstSimplify/ |
D | exact-nsw-nuw.ll | 30 ; CHECK-LABEL: @shift4( 32 define i32 @shift4(i32 %A, i32 %B) {
|
/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | exact-nsw-nuw.ll | 34 define i32 @shift4(i32 %A, i32 %B) { 35 ; CHECK-LABEL: @shift4(
|
/external/llvm/test/Transforms/SROA/ |
D | big-endian.ll | 108 ; CHECK-NEXT: %[[shift4:.*]] = shl i56 %[[ext4]], 40 110 ; CHECK-NEXT: %[[insert4:.*]] = or i56 %[[mask4]], %[[shift4]]
|
/external/libhevc/common/arm/ |
D | ihevc_itrans_recon_32x32.s | 1215 bhs shift4 1249 bhs shift4 1286 bhs shift4 1321 bhs shift4 1458 shift4: label
|
/external/aac/libSBRdec/src/ |
D | hbe.cpp | 1761 int shift4 = fMin(fMax(temp_e, mVal_e) - mVal_e, 31); in QmfTransposerApply() local 1763 if ((temp_F >> shift3) > (mVal_F >> shift4)) { in QmfTransposerApply()
|
/external/llvm/test/CodeGen/SystemZ/ |
D | asm-18.ll | 523 %shift4 = shl i32 %res1, 5 524 %and4 = and i32 %shift4, 128
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | asm-18.ll | 527 %shift4 = shl i32 %res1, 5 528 %and4 = and i32 %shift4, 128
|
/external/libhevc/common/arm64/ |
D | ihevc_itrans_recon_32x32.s | 1272 bhs shift4 1306 bhs shift4 1343 bhs shift4 1378 bhs shift4 1515 shift4: label
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | inv_txfm_vsx.c | 149 uint16x8_t shift4 = vec_splat_u16(4); 151 #define PIXEL_ADD4(out, in) out = vec_sra(vec_add(in, add8), shift4);
|