Home
last modified time | relevance | path

Searched full:vaddr (Results 1 – 25 of 237) sorted by relevance

12345678910

/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Davx512vl-mov.ll9 %vaddr = bitcast i8* %addr to <8 x i32>*
10 %res = load <8 x i32>, <8 x i32>* %vaddr, align 1
19 %vaddr = bitcast i8* %addr to <8 x i32>*
20 %res = load <8 x i32>, <8 x i32>* %vaddr, align 32
29 %vaddr = bitcast i8* %addr to <4 x i64>*
30 store <4 x i64>%data, <4 x i64>* %vaddr, align 32
39 %vaddr = bitcast i8* %addr to <8 x i32>*
40 store <8 x i32>%data, <8 x i32>* %vaddr, align 1
49 %vaddr = bitcast i8* %addr to <8 x i32>*
50 store <8 x i32>%data, <8 x i32>* %vaddr, align 32
[all …]
Davx512bwvl-mov.ll9 %vaddr = bitcast i8* %addr to <32 x i8>*
10 %res = load <32 x i8>, <32 x i8>* %vaddr, align 1
19 %vaddr = bitcast i8* %addr to <32 x i8>*
20 store <32 x i8>%data, <32 x i8>* %vaddr, align 1
31 %vaddr = bitcast i8* %addr to <32 x i8>*
32 %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
44 %vaddr = bitcast i8* %addr to <32 x i8>*
45 %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
55 %vaddr = bitcast i8* %addr to <16 x i16>*
56 %res = load <16 x i16>, <16 x i16>* %vaddr, align 1
[all …]
Davx512-mov.ll156 %vaddr = bitcast i8* %addr to <16 x i32>*
157 %res = load <16 x i32>, <16 x i32>* %vaddr, align 1
166 %vaddr = bitcast i8* %addr to <16 x i32>*
167 %res = load <16 x i32>, <16 x i32>* %vaddr, align 64
176 %vaddr = bitcast i8* %addr to <8 x i64>*
177 store <8 x i64>%data, <8 x i64>* %vaddr, align 64
186 %vaddr = bitcast i8* %addr to <16 x i32>*
187 store <16 x i32>%data, <16 x i32>* %vaddr, align 1
196 %vaddr = bitcast i8* %addr to <16 x i32>*
197 store <16 x i32>%data, <16 x i32>* %vaddr, align 64
[all …]
/external/llvm/test/Analysis/CostModel/AMDGPU/
Dadd-sub.ll6 define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
7 %vec = load i32, i32 addrspace(1)* %vaddr
15 define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) …
16 %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
24 define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) …
25 %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
33 define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) …
34 %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
42 define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
43 %vec = load i64, i64 addrspace(1)* %vaddr
[all …]
Dextractelement.ll5 define void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
6 %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
14 define void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) {
15 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
23 define void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) {
24 %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
32 define void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) {
33 %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
41 define void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) {
42 %vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
[all …]
Dmul.ll5 define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
6 %vec = load i32, i32 addrspace(1)* %vaddr
14 define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) …
15 %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
23 define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) …
24 %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
32 define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) …
33 %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
41 define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
42 %vec = load i64, i64 addrspace(1)* %vaddr
[all …]
Dfmul.ll6 define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
7 %vec = load float, float addrspace(1)* %vaddr
15 define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x floa…
16 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
24 define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x floa…
25 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
34 define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
35 %vec = load double, double addrspace(1)* %vaddr
44 define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x do…
45 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfadd.ll6 define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
7 %vec = load float, float addrspace(1)* %vaddr
15 define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x floa…
16 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
24 define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x floa…
25 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
34 define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
35 %vec = load double, double addrspace(1)* %vaddr
44 define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x do…
45 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfsub.ll6 define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
7 %vec = load float, float addrspace(1)* %vaddr
15 define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x floa…
16 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
24 define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x floa…
25 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
34 define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
35 %vec = load double, double addrspace(1)* %vaddr
44 define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x do…
45 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfdiv.ll8 define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
9 %vec = load float, float addrspace(1)* %vaddr
17 define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x floa…
18 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
26 define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x floa…
27 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
38 define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
39 %vec = load double, double addrspace(1)* %vaddr
50 define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x do…
51 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfabs.ll5 define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
6 %vec = load float, float addrspace(1)* %vaddr
14 define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
15 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
23 define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
24 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
32 define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
33 %vec = load double, double addrspace(1)* %vaddr
41 define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
42 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dbit-ops.ll5 define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
6 %vec = load i32, i32 addrspace(1)* %vaddr
14 define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
15 %vec = load i64, i64 addrspace(1)* %vaddr
23 define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
24 %vec = load i32, i32 addrspace(1)* %vaddr
32 define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
33 %vec = load i64, i64 addrspace(1)* %vaddr
42 define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
43 %vec = load i32, i32 addrspace(1)* %vaddr
[all …]
Dshifts.ll6 define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
7 %vec = load i32, i32 addrspace(1)* %vaddr
16 define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
17 %vec = load i64, i64 addrspace(1)* %vaddr
25 define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
26 %vec = load i32, i32 addrspace(1)* %vaddr
35 define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
36 %vec = load i64, i64 addrspace(1)* %vaddr
44 define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
45 %vec = load i32, i32 addrspace(1)* %vaddr
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/AMDGPU/
Dadd-sub.ll6 define amdgpu_kernel void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
7 %vec = load i32, i32 addrspace(1)* %vaddr
15 define amdgpu_kernel void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, …
16 %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
24 define amdgpu_kernel void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, …
25 %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
33 define amdgpu_kernel void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, …
34 %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
42 define amdgpu_kernel void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
43 %vec = load i64, i64 addrspace(1)* %vaddr
[all …]
Dextractelement.ll7 … amdgpu_kernel void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
8 %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
16 …gpu_kernel void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) {
17 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
25 … amdgpu_kernel void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) {
26 %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
34 … amdgpu_kernel void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) {
35 %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
43 … amdgpu_kernel void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) {
44 %vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
[all …]
Dmul.ll5 define amdgpu_kernel void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
6 %vec = load i32, i32 addrspace(1)* %vaddr
14 define amdgpu_kernel void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, …
15 %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
23 define amdgpu_kernel void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, …
24 %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
32 define amdgpu_kernel void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, …
33 %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
41 define amdgpu_kernel void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
42 %vec = load i64, i64 addrspace(1)* %vaddr
[all …]
Dfdiv.ll11 define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b)…
12 %vec = load float, float addrspace(1)* %vaddr
21 …id @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #…
22 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
31 …id @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #…
32 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
43 define amdgpu_kernel void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double …
44 %vec = load double, double addrspace(1)* %vaddr
55 … @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) …
56 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfmul.ll6 define amdgpu_kernel void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b)…
7 %vec = load float, float addrspace(1)* %vaddr
15 …id @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #…
16 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
24 …id @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #…
25 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
34 define amdgpu_kernel void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double …
35 %vec = load double, double addrspace(1)* %vaddr
44 … @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) …
45 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfadd.ll6 define amdgpu_kernel void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b)…
7 %vec = load float, float addrspace(1)* %vaddr
15 …id @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #…
16 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
24 …id @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #…
25 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
34 define amdgpu_kernel void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double …
35 %vec = load double, double addrspace(1)* %vaddr
44 … @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) …
45 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfsub.ll6 define amdgpu_kernel void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b)…
7 %vec = load float, float addrspace(1)* %vaddr
15 …id @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #…
16 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
24 …id @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #…
25 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
34 define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double …
35 %vec = load double, double addrspace(1)* %vaddr
44 … @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) …
45 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dfabs.ll5 define amdgpu_kernel void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
6 %vec = load float, float addrspace(1)* %vaddr
14 …dgpu_kernel void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
15 %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
23 …dgpu_kernel void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
24 %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
32 define amdgpu_kernel void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
33 %vec = load double, double addrspace(1)* %vaddr
41 …pu_kernel void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
42 %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
[all …]
Dbit-ops.ll5 define amdgpu_kernel void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
6 %vec = load i32, i32 addrspace(1)* %vaddr
14 define amdgpu_kernel void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
15 %vec = load i64, i64 addrspace(1)* %vaddr
23 define amdgpu_kernel void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
24 %vec = load i32, i32 addrspace(1)* %vaddr
32 define amdgpu_kernel void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
33 %vec = load i64, i64 addrspace(1)* %vaddr
42 define amdgpu_kernel void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
43 %vec = load i32, i32 addrspace(1)* %vaddr
[all …]
/external/llvm/test/CodeGen/X86/
Davx512vl-mov.ll9 %vaddr = bitcast i8* %addr to <8 x i32>*
10 %res = load <8 x i32>, <8 x i32>* %vaddr, align 1
19 %vaddr = bitcast i8* %addr to <8 x i32>*
20 %res = load <8 x i32>, <8 x i32>* %vaddr, align 32
29 %vaddr = bitcast i8* %addr to <4 x i64>*
30 store <4 x i64>%data, <4 x i64>* %vaddr, align 32
39 %vaddr = bitcast i8* %addr to <8 x i32>*
40 store <8 x i32>%data, <8 x i32>* %vaddr, align 1
49 %vaddr = bitcast i8* %addr to <8 x i32>*
50 store <8 x i32>%data, <8 x i32>* %vaddr, align 32
[all …]
Davx512bwvl-mov.ll9 %vaddr = bitcast i8* %addr to <32 x i8>*
10 %res = load <32 x i8>, <32 x i8>* %vaddr, align 1
19 %vaddr = bitcast i8* %addr to <32 x i8>*
20 store <32 x i8>%data, <32 x i8>* %vaddr, align 1
32 %vaddr = bitcast i8* %addr to <32 x i8>*
33 %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
46 %vaddr = bitcast i8* %addr to <32 x i8>*
47 %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
57 %vaddr = bitcast i8* %addr to <16 x i16>*
58 %res = load <16 x i16>, <16 x i16>* %vaddr, align 1
[all …]
Davx512-mov.ll156 %vaddr = bitcast i8* %addr to <16 x i32>*
157 %res = load <16 x i32>, <16 x i32>* %vaddr, align 1
166 %vaddr = bitcast i8* %addr to <16 x i32>*
167 %res = load <16 x i32>, <16 x i32>* %vaddr, align 64
176 %vaddr = bitcast i8* %addr to <8 x i64>*
177 store <8 x i64>%data, <8 x i64>* %vaddr, align 64
186 %vaddr = bitcast i8* %addr to <16 x i32>*
187 store <16 x i32>%data, <16 x i32>* %vaddr, align 1
196 %vaddr = bitcast i8* %addr to <16 x i32>*
197 store <16 x i32>%data, <16 x i32>* %vaddr, align 64
[all …]

12345678910