/external/elfutils/libelf/ |
D | gnuhash_xlate.h | 42 const Elf32_Word *src32 = src; in elf_cvt_gnuhash() local 49 dest32[cnt] = bswap_32 (src32[cnt]); in elf_cvt_gnuhash() 53 Elf32_Word bitmask_words = encode ? src32[2] : dest32[2]; in elf_cvt_gnuhash() 57 const Elf64_Xword *src64 = (const Elf64_Xword *) &src32[4]; in elf_cvt_gnuhash() 67 src32 = (const Elf32_Word *) &src64[bitmask_words]; in elf_cvt_gnuhash() 71 *dest32++ = bswap_32 (*src32++); in elf_cvt_gnuhash()
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | fround.ll | 12 @src32 = common global [16 x float] zeroinitializer, align 64 884 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0), al… 885 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1), al… 886 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2), al… 887 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 3), al… 899 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 … 905 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 x … 910 …%ld0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0… 911 …%ld1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1… 912 …%ld2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2… [all …]
|
D | bswap.ll | 10 @src32 = common global [8 x i32] zeroinitializer, align 32 83 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*… 88 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4 89 %ld1 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4 90 %ld2 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 4 91 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 105 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*),… 106 …, <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4) to … 114 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([8 x i32]* @src32 to <8 x i32>*),… 119 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2 [all …]
|
D | ctpop.ll | 10 @src32 = common global [8 x i32] zeroinitializer, align 32 71 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*… 76 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4 77 %ld1 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4 78 %ld2 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 4 79 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 93 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*),… 94 …, <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4) to … 102 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([8 x i32]* @src32 to <8 x i32>*),… 107 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2 [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | fround.ll | 13 @src32 = common global [16 x float] zeroinitializer, align 64 885 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0), al… 886 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1), al… 887 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2), al… 888 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 3), al… 900 ; SSE41-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 … 906 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 x … 911 …%ld0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0… 912 …%ld1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1… 913 …%ld2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2… [all …]
|
D | fabs.ll | 12 @src32 = common global [16 x float] zeroinitializer, align 64 130 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 … 135 …%a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0)… 136 …%a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1)… 137 …%a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2)… 138 …%a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 3)… 152 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 x … 153 …at>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 4) to … 161 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([16 x float]* @src32 to <8 x … 166 …%a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0)… [all …]
|
D | sqrt.ll | 10 @src32 = common global [16 x float] zeroinitializer, align 64 128 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 … 133 …%a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0)… 134 …%a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1)… 135 …%a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2)… 136 …%a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 3)… 150 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([16 x float]* @src32 to <4 x … 151 …at>* bitcast (float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 4) to … 159 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([16 x float]* @src32 to <8 x … 164 …%a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0)… [all …]
|
D | fptoui.ll | 12 @src32 = common global [16 x float] zeroinitializer, align 64 302 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0), al… 303 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1), al… 304 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2), al… 305 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 3), al… 306 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 4), al… 307 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 5), al… 308 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 6), al… 309 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 7), al… 329 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0), al… [all …]
|
D | fptosi.ll | 12 @src32 = common global [16 x float] zeroinitializer, align 64 257 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0), al… 258 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 1), al… 259 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 2), al… 260 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 3), al… 261 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 4), al… 262 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 5), al… 263 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 6), al… 264 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 7), al… 284 …= load float, float* getelementptr inbounds ([16 x float], [16 x float]* @src32, i32 0, i64 0), al… [all …]
|
D | ctpop.ll | 11 @src32 = common global [8 x i32] zeroinitializer, align 32 120 ; SSE2-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*)… 126 ; SSE42-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32,… 127 ; SSE42-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32,… 128 ; SSE42-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32,… 129 ; SSE42-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32,… 141 ; AVX-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i… 142 ; AVX-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i… 143 ; AVX-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i… 144 ; AVX-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i… [all …]
|
D | bswap.ll | 10 @src32 = common global [8 x i32] zeroinitializer, align 32 83 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*… 88 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4 89 %ld1 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4 90 %ld2 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 4 91 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 105 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*),… 106 …, <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4) to … 114 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([8 x i32]* @src32 to <8 x i32>*),… 119 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2 [all …]
|
D | ctlz.ll | 11 @src32 = common global [8 x i32] zeroinitializer, align 32 79 ; SSE2-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 80 ; SSE2-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 81 ; SSE2-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 82 ; SSE2-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 94 ; SSE42-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*… 100 ; AVX1-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*)… 106 ; AVX2-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 107 ; AVX2-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 108 ; AVX2-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … [all …]
|
D | cttz.ll | 11 @src32 = common global [8 x i32] zeroinitializer, align 32 79 ; SSE2-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 80 ; SSE2-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 81 ; SSE2-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 82 ; SSE2-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 94 ; SSE42-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*… 100 ; AVX1-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*)… 106 ; AVX2-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 107 ; AVX2-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … 108 ; AVX2-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, … [all …]
|
D | uitofp.ll | 12 @src32 = common global [16 x i32] zeroinitializer, align 64 130 ; SSE-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 131 ; SSE-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 139 …0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), al… 140 …1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 1), al… 148 ; AVX512-NEXT: [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* bitcast ([16 x i32]* @src32 to <2 x i32… 154 ; AVX256DQ-NEXT: [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* bitcast ([16 x i32]* @src32 to <2 x i… 159 …%ld0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), align… 160 …%ld1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 1), align… 170 ; SSE-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… [all …]
|
D | sitofp.ll | 12 @src32 = common global [16 x i32] zeroinitializer, align 64 220 …0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), al… 221 …1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 1), al… 228 …%ld0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), align… 229 …%ld1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 1), align… 239 ; SSE-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 240 ; SSE-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 241 ; SSE-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 242 ; SSE-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 254 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @src32 to <4 x i32>*)… [all …]
|
D | bitreverse.ll | 12 @src32 = common global [8 x i32] zeroinitializer, align 32 79 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*… 84 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4 85 %ld1 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4 86 %ld2 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 4 87 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4 101 ; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*),… 102 …, <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4) to … 110 ; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([8 x i32]* @src32 to <8 x i32>*),… 116 ; XOP-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([8 x i32]* @src32 to <8 x i32>*),… [all …]
|
/external/skqp/src/core/ |
D | SkConvertPixels.cpp | 112 auto src32 = (const uint32_t*) src; in convert_to_alpha8() local 115 dst[x] = src32[x] >> 24; in convert_to_alpha8() 118 src32 = SkTAddOffset<const uint32_t>(src32, srcRB); in convert_to_alpha8() 124 auto src32 = (const uint32_t*) src; in convert_to_alpha8() local 127 dst[x] = (src32[x] >> 30) * 0x55; in convert_to_alpha8() 130 src32 = SkTAddOffset<const uint32_t>(src32, srcRB); in convert_to_alpha8()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_intrin_avx2.c | 98 __m256i src32[8]; in vpx_highbd_hadamard_8x8_avx2() local 109 src32[0] = _mm256_cvtepi16_epi32(src16[0]); in vpx_highbd_hadamard_8x8_avx2() 110 src32[1] = _mm256_cvtepi16_epi32(src16[1]); in vpx_highbd_hadamard_8x8_avx2() 111 src32[2] = _mm256_cvtepi16_epi32(src16[2]); in vpx_highbd_hadamard_8x8_avx2() 112 src32[3] = _mm256_cvtepi16_epi32(src16[3]); in vpx_highbd_hadamard_8x8_avx2() 113 src32[4] = _mm256_cvtepi16_epi32(src16[4]); in vpx_highbd_hadamard_8x8_avx2() 114 src32[5] = _mm256_cvtepi16_epi32(src16[5]); in vpx_highbd_hadamard_8x8_avx2() 115 src32[6] = _mm256_cvtepi16_epi32(src16[6]); in vpx_highbd_hadamard_8x8_avx2() 116 src32[7] = _mm256_cvtepi16_epi32(src16[7]); in vpx_highbd_hadamard_8x8_avx2() 118 highbd_hadamard_col8_avx2(src32, 0); in vpx_highbd_hadamard_8x8_avx2() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | avg_intrin_avx2.c | 339 __m256i src32[8]; in aom_highbd_hadamard_8x8_avx2() local 350 src32[0] = _mm256_cvtepi16_epi32(src16[0]); in aom_highbd_hadamard_8x8_avx2() 351 src32[1] = _mm256_cvtepi16_epi32(src16[1]); in aom_highbd_hadamard_8x8_avx2() 352 src32[2] = _mm256_cvtepi16_epi32(src16[2]); in aom_highbd_hadamard_8x8_avx2() 353 src32[3] = _mm256_cvtepi16_epi32(src16[3]); in aom_highbd_hadamard_8x8_avx2() 354 src32[4] = _mm256_cvtepi16_epi32(src16[4]); in aom_highbd_hadamard_8x8_avx2() 355 src32[5] = _mm256_cvtepi16_epi32(src16[5]); in aom_highbd_hadamard_8x8_avx2() 356 src32[6] = _mm256_cvtepi16_epi32(src16[6]); in aom_highbd_hadamard_8x8_avx2() 357 src32[7] = _mm256_cvtepi16_epi32(src16[7]); in aom_highbd_hadamard_8x8_avx2() 359 highbd_hadamard_col8_avx2(src32, 0); in aom_highbd_hadamard_8x8_avx2() [all …]
|
/external/skia/src/core/ |
D | SkConvertPixels.cpp | 128 auto src32 = (const uint32_t*) src; in convert_to_alpha8() local 131 dst[x] = src32[x] >> 24; in convert_to_alpha8() 134 src32 = SkTAddOffset<const uint32_t>(src32, srcRB); in convert_to_alpha8() 141 auto src32 = (const uint32_t*) src; in convert_to_alpha8() local 144 dst[x] = (src32[x] >> 30) * 0x55; in convert_to_alpha8() 147 src32 = SkTAddOffset<const uint32_t>(src32, srcRB); in convert_to_alpha8()
|
/external/icu/icu4c/source/test/cintltst/ |
D | custrtrn.c | 68 static const UChar32 src32[]={ variable 153 if(err != U_BUFFER_OVERFLOW_ERROR || u32DestLen != UPRV_LENGTHOF(src32)) { in Test_strToUTF32() 156 (long)u32DestLen, (long)UPRV_LENGTHOF(src32), u_errorName(err)); in Test_strToUTF32() 161 u_strToUTF32(u32Target, UPRV_LENGTHOF(src32)+1, &u32DestLen, src16, UPRV_LENGTHOF(src16),&err); in Test_strToUTF32() 162 if(err != U_ZERO_ERROR || u32DestLen != UPRV_LENGTHOF(src32)) { in Test_strToUTF32() 165 (long)u32DestLen, (long)UPRV_LENGTHOF(src32), u_errorName(err)); in Test_strToUTF32() 174 for(i=0; i< UPRV_LENGTHOF(src32); i++){ in Test_strToUTF32() 175 if(u32Target[i] != src32[i]){ in Test_strToUTF32() 176 …strToUTF32(with length) failed expected: %04X got: %04X at index: %i \n", src32[i], u32Target[i],i… in Test_strToUTF32() 186 if(err != U_BUFFER_OVERFLOW_ERROR || u32DestLen != UPRV_LENGTHOF(src32)-1) { in Test_strToUTF32() [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | shuffle-broadcast.ll | 153 define void @test_vXi16(<2 x i16> %src32, <4 x i16> %src64, <8 x i16> %src128, <16 x i16> %src256, … 155 …d an estimated cost of 1 for instruction: %V32 = shufflevector <2 x i16> %src32, <2 x i16> undef, … 163 …d an estimated cost of 1 for instruction: %V32 = shufflevector <2 x i16> %src32, <2 x i16> undef, … 171 …d an estimated cost of 1 for instruction: %V32 = shufflevector <2 x i16> %src32, <2 x i16> undef, … 179 …d an estimated cost of 1 for instruction: %V32 = shufflevector <2 x i16> %src32, <2 x i16> undef, … 187 …d an estimated cost of 1 for instruction: %V32 = shufflevector <2 x i16> %src32, <2 x i16> undef, … 195 …d an estimated cost of 1 for instruction: %V32 = shufflevector <2 x i16> %src32, <2 x i16> undef, … 202 %V32 = shufflevector <2 x i16> %src32, <2 x i16> undef, <2 x i32> zeroinitializer 210 define void @test_vXi8(<2 x i8> %src16, <4 x i8> %src32, <8 x i8> %src64, <16 x i8> %src128, <32 x … 213 …nd an estimated cost of 2 for instruction: %V32 = shufflevector <4 x i8> %src32, <4 x i8> undef, <… [all …]
|
/external/skia/tools/ |
D | ToolUtils.cpp | 419 const uint32_t* src32 = (const uint32_t*)src.getPixels(); in copy_to_g8() local 428 uint32_t s = src32[x]; in copy_to_g8() 434 uint32_t s = src32[x]; in copy_to_g8() 438 src32 = (const uint32_t*)((const char*)src32 + src.rowBytes()); in copy_to_g8()
|
/external/skqp/tools/ |
D | sk_tool_utils.cpp | 346 const uint32_t* src32 = (const uint32_t*)src.getPixels(); in copy_to_g8() local 355 uint32_t s = src32[x]; in copy_to_g8() 361 uint32_t s = src32[x]; in copy_to_g8() 365 src32 = (const uint32_t*)((const char*)src32 + src.rowBytes()); in copy_to_g8()
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-fp128.ll | 94 %src32 = load i32, i32* @var32 95 %val32 = sitofp i32 %src32 to fp128 110 %src32 = load i32, i32* @var32 111 %val32 = uitofp i32 %src32 to fp128
|