1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17
18 // NOTE: For these tests clang converts the struct parameter into
19 // several parameters, one for each member of the original struct.
test_svget4_s8(svint8x4_t tuple)20 svint8_t test_svget4_s8(svint8x4_t tuple)
21 {
22 // CHECK-LABEL: test_svget4_s8
23 // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv64i8(<vscale x 64 x i8> %tuple, i32 0)
24 // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
25 return SVE_ACLE_FUNC(svget4,_s8,,)(tuple, 0);
26 }
27
test_svget4_s16(svint16x4_t tuple)28 svint16_t test_svget4_s16(svint16x4_t tuple)
29 {
30 // CHECK-LABEL: test_svget4_s16
31 // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv32i16(<vscale x 32 x i16> %tuple, i32 2)
32 // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
33 return SVE_ACLE_FUNC(svget4,_s16,,)(tuple, 2);
34 }
35
test_svget4_s32(svint32x4_t tuple)36 svint32_t test_svget4_s32(svint32x4_t tuple)
37 {
38 // CHECK-LABEL: test_svget4_s32
39 // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv16i32(<vscale x 16 x i32> %tuple, i32 2)
40 // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
41 return SVE_ACLE_FUNC(svget4,_s32,,)(tuple, 2);
42 }
43
test_svget4_s64(svint64x4_t tuple)44 svint64_t test_svget4_s64(svint64x4_t tuple)
45 {
46 // CHECK-LABEL: test_svget4_s64
47 // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv8i64(<vscale x 8 x i64> %tuple, i32 3)
48 // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
49 return SVE_ACLE_FUNC(svget4,_s64,,)(tuple, 3);
50 }
51
test_svget4_u8(svuint8x4_t tuple)52 svuint8_t test_svget4_u8(svuint8x4_t tuple)
53 {
54 // CHECK-LABEL: test_svget4_u8
55 // CHECK: %[[EXT:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.get.nxv16i8.nxv64i8(<vscale x 64 x i8> %tuple, i32 2)
56 // CHECK-NEXT: ret <vscale x 16 x i8> %[[EXT]]
57 return SVE_ACLE_FUNC(svget4,_u8,,)(tuple, 2);
58 }
59
test_svget4_u16(svuint16x4_t tuple)60 svuint16_t test_svget4_u16(svuint16x4_t tuple)
61 {
62 // CHECK-LABEL: test_svget4_u16
63 // CHECK: %[[EXT:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tuple.get.nxv8i16.nxv32i16(<vscale x 32 x i16> %tuple, i32 3)
64 // CHECK-NEXT: ret <vscale x 8 x i16> %[[EXT]]
65 return SVE_ACLE_FUNC(svget4,_u16,,)(tuple, 3);
66 }
67
test_svget4_u32(svuint32x4_t tuple)68 svuint32_t test_svget4_u32(svuint32x4_t tuple)
69 {
70 // CHECK-LABEL: test_svget4_u32
71 // CHECK: %[[EXT:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv4i32.nxv16i32(<vscale x 16 x i32> %tuple, i32 0)
72 // CHECK-NEXT: ret <vscale x 4 x i32> %[[EXT]]
73 return SVE_ACLE_FUNC(svget4,_u32,,)(tuple, 0);
74 }
75
test_svget4_u64(svuint64x4_t tuple)76 svuint64_t test_svget4_u64(svuint64x4_t tuple)
77 {
78 // CHECK-LABEL: test_svget4_u64
79 // CHECK: %[[EXT:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tuple.get.nxv2i64.nxv8i64(<vscale x 8 x i64> %tuple, i32 3)
80 // CHECK-NEXT: ret <vscale x 2 x i64> %[[EXT]]
81 return SVE_ACLE_FUNC(svget4,_u64,,)(tuple, 3);
82 }
83
test_svget4_f16(svfloat16x4_t tuple)84 svfloat16_t test_svget4_f16(svfloat16x4_t tuple)
85 {
86 // CHECK-LABEL: test_svget4_f16
87 // CHECK: %[[EXT:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.tuple.get.nxv8f16.nxv32f16(<vscale x 32 x half> %tuple, i32 2)
88 // CHECK-NEXT: ret <vscale x 8 x half> %[[EXT]]
89 return SVE_ACLE_FUNC(svget4,_f16,,)(tuple, 2);
90 }
91
test_svget4_f32(svfloat32x4_t tuple)92 svfloat32_t test_svget4_f32(svfloat32x4_t tuple)
93 {
94 // CHECK-LABEL: test_svget4_f32
95 // CHECK: %[[EXT:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.tuple.get.nxv4f32.nxv16f32(<vscale x 16 x float> %tuple, i32 0)
96 // CHECK-NEXT: ret <vscale x 4 x float> %[[EXT]]
97 return SVE_ACLE_FUNC(svget4,_f32,,)(tuple, 0);
98 }
99
test_svget4_f64(svfloat64x4_t tuple)100 svfloat64_t test_svget4_f64(svfloat64x4_t tuple)
101 {
102 // CHECK-LABEL: test_svget4_f64
103 // CHECK: %[[EXT:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv8f64(<vscale x 8 x double> %tuple, i32 2)
104 // CHECK-NEXT: ret <vscale x 2 x double> %[[EXT]]
105 return SVE_ACLE_FUNC(svget4,_f64,,)(tuple, 2);
106 }
107