1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -msve-vector-bits=512 -fallow-half-arguments-and-returns -S -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s
3
4 #include <arm_sve.h>
5
6 #define N __ARM_FEATURE_SVE_BITS
7
8 typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
9 typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
10
11 fixed_bool_t global_pred;
12 fixed_int32_t global_vec;
13
14 // CHECK-LABEL: @foo(
15 // CHECK-NEXT: entry:
16 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16
17 // CHECK-NEXT: [[PRED_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
18 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
19 // CHECK-NEXT: [[PG:%.*]] = alloca <vscale x 16 x i1>, align 2
20 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 4 x i32>, align 16
21 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
22 // CHECK-NEXT: store <vscale x 16 x i1> [[PRED:%.*]], <vscale x 16 x i1>* [[PRED_ADDR]], align 2
23 // CHECK-NEXT: store <vscale x 4 x i32> [[VEC:%.*]], <vscale x 4 x i32>* [[VEC_ADDR]], align 16
24 // CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PRED_ADDR]], align 2
25 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2
26 // CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* bitcast (<8 x i8>* @global_pred to <vscale x 16 x i1>*), align 2
27 // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2
28 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* bitcast (<8 x i8>* @global_pred to <vscale x 16 x i1>*), align 2
29 // CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP4]])
30 // CHECK-NEXT: store <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1>* [[PG]], align 2
31 // CHECK-NEXT: [[TMP6:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PG]], align 2
32 // CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16
33 // CHECK-NEXT: [[TMP8:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* bitcast (<16 x i32>* @global_vec to <vscale x 4 x i32>*), align 16
34 // CHECK-NEXT: [[TMP9:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[VEC_ADDR]], align 16
35 // CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP6]])
36 // CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> [[TMP10]], <vscale x 4 x i32> [[TMP8]], <vscale x 4 x i32> [[TMP9]])
37 // CHECK-NEXT: store <vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16
38 // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]] to <16 x i32>*
39 // CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16
40 // CHECK-NEXT: store <16 x i32> [[TMP12]], <16 x i32>* [[RETVAL]], align 16
41 // CHECK-NEXT: [[TMP13:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to i8*
42 // CHECK-NEXT: [[TMP14:%.*]] = bitcast <16 x i32>* [[RETVAL]] to i8*
43 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP13]], i8* align 16 [[TMP14]], i64 64, i1 false)
44 // CHECK-NEXT: [[TMP15:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
45 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP15]]
46 //
foo(svbool_t pred,svint32_t vec)47 fixed_int32_t foo(svbool_t pred, svint32_t vec) {
48 svbool_t pg = svand_z(pred, global_pred, global_pred);
49 return svadd_m(pg, global_vec, vec);
50 }
51
52 // CHECK-LABEL: @test_ptr_to_global(
53 // CHECK-NEXT: entry:
54 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16
55 // CHECK-NEXT: [[GLOBAL_VEC_PTR:%.*]] = alloca <16 x i32>*, align 8
56 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
57 // CHECK-NEXT: store <16 x i32>* @global_vec, <16 x i32>** [[GLOBAL_VEC_PTR]], align 8
58 // CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>*, <16 x i32>** [[GLOBAL_VEC_PTR]], align 8
59 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16
60 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16
61 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to i8*
62 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32>* [[RETVAL]] to i8*
63 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP2]], i8* align 16 [[TMP3]], i64 64, i1 false)
64 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
65 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
66 //
test_ptr_to_global()67 fixed_int32_t test_ptr_to_global() {
68 fixed_int32_t *global_vec_ptr;
69 global_vec_ptr = &global_vec;
70 return *global_vec_ptr;
71 }
72
73 //
74 // Test casting pointer from fixed-length array to scalable vector.
75 // CHECK-LABEL: @array_arg(
76 // CHECK-NEXT: entry:
77 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16
78 // CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca <16 x i32>*, align 8
79 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
80 // CHECK-NEXT: store <16 x i32>* [[ARR:%.*]], <16 x i32>** [[ARR_ADDR]], align 8
81 // CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>*, <16 x i32>** [[ARR_ADDR]], align 8
82 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <16 x i32>, <16 x i32>* [[TMP0]], i64 0
83 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[ARRAYIDX]], align 16
84 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16
85 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to i8*
86 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32>* [[RETVAL]] to i8*
87 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP2]], i8* align 16 [[TMP3]], i64 64, i1 false)
88 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
89 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
90 //
array_arg(fixed_int32_t arr[])91 fixed_int32_t array_arg(fixed_int32_t arr[]) {
92 return arr[0];
93 }
94
95 // CHECK-LABEL: @address_of_array_idx(
96 // CHECK-NEXT: entry:
97 // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i8>, align 2
98 // CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <8 x i8>], align 2
99 // CHECK-NEXT: [[PARR:%.*]] = alloca <8 x i8>*, align 8
100 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
101 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], [3 x <8 x i8>]* [[ARR]], i64 0, i64 0
102 // CHECK-NEXT: store <8 x i8>* [[ARRAYIDX]], <8 x i8>** [[PARR]], align 8
103 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>*, <8 x i8>** [[PARR]], align 8
104 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 2
105 // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2
106 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to i8*
107 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8>* [[RETVAL]] to i8*
108 // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP2]], i8* align 2 [[TMP3]], i64 8, i1 false)
109 // CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
110 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP4]]
111 //
address_of_array_idx()112 fixed_bool_t address_of_array_idx() {
113 fixed_bool_t arr[3];
114 fixed_bool_t *parr;
115 parr = &arr[0];
116 return *parr;
117 }
118