Home
last modified time | relevance | path

Searched refs:src64 (Results 1 – 25 of 32) sorted by relevance

12

/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/
Dfptoui.ll11 @src64 = common global [8 x double] zeroinitializer, align 64
24 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
25 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al…
26 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 2), al…
27 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 3), al…
28 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 4), al…
29 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 5), al…
30 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 6), al…
31 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 7), al…
51 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
[all …]
Dfptosi.ll11 @src64 = common global [8 x double] zeroinitializer, align 64
24 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
25 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al…
26 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 2), al…
27 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 3), al…
28 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 4), al…
29 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 5), al…
30 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 6), al…
31 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 7), al…
51 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
[all …]
Dfround.ll11 @src64 = common global [8 x double] zeroinitializer, align 64
30 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
31 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al…
39 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <…
45 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <2 …
50 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
51 …%ld1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
61 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
62 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al…
63 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 2), al…
[all …]
Dfabs.ll11 @src64 = common global [8 x double] zeroinitializer, align 64
25 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <…
30 …%a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
31 …%a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
41 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <2 …
42 …e>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 2) to …
50 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* bitcast ([8 x double]* @src64 to <4 …
55 …%a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
56 …%a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
57 …%a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
[all …]
Dsqrt.ll9 @src64 = common global [8 x double] zeroinitializer, align 64
23 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <…
28 …%a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
29 …%a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
39 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <2 …
40 …e>* bitcast (double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 2) to …
48 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* bitcast ([8 x double]* @src64 to <4 …
53 …%a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
54 …%a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
55 …%a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 …
[all …]
Dsitofp.ll11 @src64 = common global [8 x i64] zeroinitializer, align 64
25 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i…
26 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i…
34 …LD0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), al…
35 …LD1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 1), al…
43 ; AVX512-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @src64 to <2 x i64>…
49 ; AVX256DQ-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @src64 to <2 x i6…
54 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
55 %ld1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 1), align 8
65 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i…
[all …]
Dbswap.ll8 @src64 = common global [4 x i64] zeroinitializer, align 32
21 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
22 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
30 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),…
35 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
36 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
46 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
47 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
48 ; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
49 ; SSE-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
[all …]
Duitofp.ll11 @src64 = common global [8 x i64] zeroinitializer, align 64
25 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @src64 to <2 x i64>*…
30 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
31 %ld1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 1), align 8
41 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([8 x i64]* @src64 to <2 x i64>*),…
42 …, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 2) to …
50 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @src64 to <4 x i64>*),…
55 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
56 %ld1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 1), align 8
57 …%ld2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 2), align 16
[all …]
Dctpop.ll9 @src64 = common global [4 x i64] zeroinitializer, align 32
25 ; SSE2-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*)…
31 ; SSE42-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,…
32 ; SSE42-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,…
40 ; AVX-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
41 ; AVX-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
48 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
49 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
59 ; SSE2-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*)…
60 …, <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2) to …
[all …]
Dbitreverse.ll10 @src64 = common global [4 x i64] zeroinitializer, align 32
26 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*…
31 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
32 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
42 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),…
43 …, <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2) to …
51 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*),…
57 ; XOP-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*),…
62 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
63 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 1), align 4
[all …]
/external/skia/src/core/
DSkConvertPixels.cpp154 auto src64 = (const uint64_t*) src; in convert_to_alpha8() local
157 dst[x] = (uint8_t) (255.0f * SkHalfToFloat(src64[x] >> 48)); in convert_to_alpha8()
160 src64 = SkTAddOffset<const uint64_t>(src64, srcRB); in convert_to_alpha8()
190 auto src64 = (const uint64_t*) src; in convert_to_alpha8() local
193 dst[x] = (src64[x] >> 48) >> 8; in convert_to_alpha8()
196 src64 = SkTAddOffset<const uint64_t>(src64, srcRB); in convert_to_alpha8()
/external/elfutils/libelf/
Dgnuhash_xlate.h57 const Elf64_Xword *src64 = (const Elf64_Xword *) &src32[4]; in elf_cvt_gnuhash() local
62 dest64[cnt] = bswap_64 (src64[cnt]); in elf_cvt_gnuhash()
67 src32 = (const Elf32_Word *) &src64[bitmask_words]; in elf_cvt_gnuhash()
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dfround.ll10 @src64 = common global [8 x double] zeroinitializer, align 64
29 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
30 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al…
38 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <…
44 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <2 …
49 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
50 …%ld1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
60 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al…
61 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al…
62 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 2), al…
[all …]
Dbswap.ll8 @src64 = common global [4 x i64] zeroinitializer, align 32
21 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
22 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
30 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),…
35 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
36 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
46 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
47 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
48 ; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
49 ; SSE-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
[all …]
Dbitreverse.ll10 @src64 = common global [4 x i64] zeroinitializer, align 32
26 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
27 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
35 ; AVX-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
36 ; AVX-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
44 ; XOP-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),…
49 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
50 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
60 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
61 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i…
[all …]
Dctpop.ll8 @src64 = common global [4 x i64] zeroinitializer, align 32
24 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*…
29 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
30 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
40 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),…
41 …, <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2) to …
49 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*),…
54 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
55 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 1), align 4
56 %ld2 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2), align 4
[all …]
/external/llvm-project/llvm/test/Analysis/CostModel/X86/
Dshuffle-broadcast.ll81 define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %src256, <16 x float> …
83 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
90 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
97 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
104 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
110 %V64 = shufflevector <2 x float> %src64, <2 x float> undef, <2 x i32> zeroinitializer
117 define void @test_vXi32(<2 x i32> %src64, <4 x i32> %src128, <8 x i32> %src256, <16 x i32> %src512)…
119 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
126 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
133 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
[all …]
Dshuffle-reverse.ll81 define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %src256, <16 x float> …
83 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
90 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
97 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
104 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
110 %V64 = shufflevector <2 x float> %src64, <2 x float> undef, <2 x i32> <i32 1, i32 0>
117 define void @test_vXi32(<2 x i32> %src64, <4 x i32> %src128, <8 x i32> %src256, <16 x i32> %src512)…
119 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
126 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
133 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
[all …]
Dshuffle-select.ll92 define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %src256, <16 x float> …
94 …an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
102 …an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
110 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
118 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
126 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
134 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
141 %V64 = shufflevector <2 x float> %src64, <2 x float> %src64_1, <2 x i32> <i32 0, i32 3>
149 define void @test_vXi32(<2 x i32> %src64, <4 x i32> %src128, <8 x i32> %src256, <16 x i32> %src512,…
151 …d an estimated cost of 2 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> %src64_…
[all …]
Dshuffle-single-src.ll100 define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %src256, <16 x float> …
102 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
109 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
116 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
123 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
130 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> undef…
136 %V64 = shufflevector <2 x float> %src64, <2 x float> undef, <2 x i32> <i32 1, i32 1>
143 define void @test_vXi32(<2 x i32> %src64, <4 x i32> %src128, <8 x i32> %src256, <16 x i32> %src512,…
145 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
153 …d an estimated cost of 1 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> undef, …
[all …]
Dshuffle-two-src.ll92 define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %src256, <16 x float> …
94 …an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
102 …an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
110 …an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
118 …an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
126 …an estimated cost of 1 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src6…
133 %V64 = shufflevector <2 x float> %src64, <2 x float> %src64_1, <2 x i32> <i32 3, i32 0>
141 define void @test_vXi32(<2 x i32> %src64, <4 x i32> %src128, <8 x i32> %src256, <16 x i32> %src512,…
143 …d an estimated cost of 2 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> %src64_…
151 …d an estimated cost of 2 for instruction: %V64 = shufflevector <2 x i32> %src64, <2 x i32> %src64_…
[all …]
Dshuffle-insert_subvector.ll135 define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %src256, <16 x float> …
137 …el: Unknown cost for instruction: %src64_128 = shufflevector <2 x float> %src64, <2 x float> undef…
138 …el: Unknown cost for instruction: %src64_256 = shufflevector <2 x float> %src64, <2 x float> undef…
139 …el: Unknown cost for instruction: %src64_512 = shufflevector <2 x float> %src64, <2 x float> undef…
168 …el: Unknown cost for instruction: %src64_128 = shufflevector <2 x float> %src64, <2 x float> undef…
169 …el: Unknown cost for instruction: %src64_256 = shufflevector <2 x float> %src64, <2 x float> undef…
170 …el: Unknown cost for instruction: %src64_512 = shufflevector <2 x float> %src64, <2 x float> undef…
199 …el: Unknown cost for instruction: %src64_128 = shufflevector <2 x float> %src64, <2 x float> undef…
200 …el: Unknown cost for instruction: %src64_256 = shufflevector <2 x float> %src64, <2 x float> undef…
201 …el: Unknown cost for instruction: %src64_512 = shufflevector <2 x float> %src64, <2 x float> undef…
[all …]
/external/skqp/src/core/
DSkConvertPixels.cpp136 auto src64 = (const uint64_t*) src; in convert_to_alpha8() local
139 dst[x] = (uint8_t) (255.0f * SkHalfToFloat(src64[x] >> 48)); in convert_to_alpha8()
142 src64 = SkTAddOffset<const uint64_t>(src64, srcRB); in convert_to_alpha8()
/external/llvm/test/CodeGen/AArch64/
Darm64-fp128.ll99 %src64 = load i64, i64* @var64
100 %val64 = sitofp i64 %src64 to fp128
115 %src64 = load i64, i64* @var64
116 %val64 = uitofp i64 %src64 to fp128
/external/mesa3d/src/amd/vulkan/
Dradv_query.c1251 uint64_t const *src64 = (uint64_t const *)src; in radv_GetQueryPoolResults() local
1255 value = p_atomic_read(src64); in radv_GetQueryPoolResults()
1276 uint64_t const *src64 = (uint64_t const *)src; in radv_GetQueryPoolResults() local
1289 start = p_atomic_read(src64 + 2 * i); in radv_GetQueryPoolResults()
1290 end = p_atomic_read(src64 + 2 * i + 1); in radv_GetQueryPoolResults()
1353 uint64_t const *src64 = (uint64_t const *)src; in radv_GetQueryPoolResults() local
1365 if (!(p_atomic_read(src64 + j) & 0x8000000000000000UL)) in radv_GetQueryPoolResults()
1372 num_primitives_written = src64[3] - src64[1]; in radv_GetQueryPoolResults()
1373 primitive_storage_needed = src64[2] - src64[0]; in radv_GetQueryPoolResults()

12