Lines Matching refs:ARM64
1 …ocks -fobjc-runtime=ios-11.0 -emit-llvm -o - %s | FileCheck -check-prefix=ARM64 -check-prefix=COMM…
18 // ARM64: define void @test_constructor_destructor_Weak()
19 // ARM64: %[[T:.*]] = alloca %[[STRUCT_WEAK]], align 8
20 // ARM64: %[[V0:.*]] = bitcast %[[STRUCT_WEAK]]* %[[T]] to i8**
21 // ARM64: call void @__default_constructor_8_w8(i8** %[[V0]])
22 // ARM64: %[[V1:.*]] = bitcast %[[STRUCT_WEAK]]* %[[T]] to i8**
23 // ARM64: call void @__destructor_8_w8(i8** %[[V1]])
24 // ARM64: ret void
26 // ARM64: define linkonce_odr hidden void @__default_constructor_8_w8(i8** %[[DST:.*]])
27 // ARM64: %[[DST_ADDR:.*]] = alloca i8**, align 8
28 // ARM64: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8
29 // ARM64: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8
30 // ARM64: %[[V1]] = bitcast i8** %[[V0]] to i8*
31 // ARM64: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 8
32 // ARM64: %[[V3:.*]] = bitcast i8* %[[V2]] to i8**
33 // ARM64: %[[V4:.*]] = bitcast i8** %[[V3]] to i8*
34 // ARM64: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V4]], i8 0, i64 8, i1 false)
36 // ARM64: define linkonce_odr hidden void @__destructor_8_w8(i8** %[[DST:.*]])
37 // ARM64: %[[DST_ADDR:.*]] = alloca i8**, align 8
38 // ARM64: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8
39 // ARM64: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8
40 // ARM64: %[[V1:.*]] = bitcast i8** %[[V0]] to i8*
41 // ARM64: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 8
42 // ARM64: %[[V3:.*]] = bitcast i8* %[[V2]] to i8**
43 // ARM64: call void @llvm.objc.destroyWeak(i8** %[[V3]])
49 // ARM64: define void @test_copy_constructor_Weak(%[[STRUCT_WEAK]]* %{{.*}})
50 // ARM64: call void @__copy_constructor_8_8_t0w4_w8(i8** %{{.*}}, i8** %{{.*}})
51 // ARM64: call void @__destructor_8_w8(i8** %{{.*}})
53 // ARM64: define linkonce_odr hidden void @__copy_constructor_8_8_t0w4_w8(i8** %[[DST:.*]], i8** %[…
54 // ARM64: %[[DST_ADDR:.*]] = alloca i8**, align 8
55 // ARM64: %[[SRC_ADDR:.*]] = alloca i8**, align 8
56 // ARM64: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8
57 // ARM64: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8
58 // ARM64: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8
59 // ARM64: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8
60 // ARM64: %[[V2:.*]] = bitcast i8** %[[V0]] to i32*
61 // ARM64: %[[V3:.*]] = bitcast i8** %[[V1]] to i32*
62 // ARM64: %[[V4:.*]] = load i32, i32* %[[V3]], align 8
63 // ARM64: store i32 %[[V4]], i32* %[[V2]], align 8
64 // ARM64: %[[V5:.*]] = bitcast i8** %[[V0]] to i8*
65 // ARM64: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 8
66 // ARM64: %[[V7:.*]] = bitcast i8* %[[V6]] to i8**
67 // ARM64: %[[V8:.*]] = bitcast i8** %[[V1]] to i8*
68 // ARM64: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 8
69 // ARM64: %[[V10:.*]] = bitcast i8* %[[V9]] to i8**
70 // ARM64: call void @llvm.objc.copyWeak(i8** %[[V7]], i8** %[[V10]])
76 // ARM64: define void @test_copy_assignment_Weak(%[[STRUCT_WEAK]]* %{{.*}}, %[[STRUCT_WEAK]]* %{{.*…
77 // ARM64: call void @__copy_assignment_8_8_t0w4_w8(i8** %{{.*}}, i8** %{{.*}})
79 // ARM64: define linkonce_odr hidden void @__copy_assignment_8_8_t0w4_w8(i8** %[[DST:.*]], i8** %[[…
80 // ARM64: %[[DST_ADDR:.*]] = alloca i8**, align 8
81 // ARM64: %[[SRC_ADDR:.*]] = alloca i8**, align 8
82 // ARM64: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8
83 // ARM64: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8
84 // ARM64: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8
85 // ARM64: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8
86 // ARM64: %[[V2:.*]] = bitcast i8** %[[V0]] to i32*
87 // ARM64: %[[V3:.*]] = bitcast i8** %[[V1]] to i32*
88 // ARM64: %[[V4:.*]] = load i32, i32* %[[V3]], align 8
89 // ARM64: store i32 %[[V4]], i32* %[[V2]], align 8
90 // ARM64: %[[V5:.*]] = bitcast i8** %[[V0]] to i8*
91 // ARM64: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 8
92 // ARM64: %[[V7:.*]] = bitcast i8* %[[V6]] to i8**
93 // ARM64: %[[V8:.*]] = bitcast i8** %[[V1]] to i8*
94 // ARM64: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 8
95 // ARM64: %[[V10:.*]] = bitcast i8* %[[V9]] to i8**
96 // ARM64: %[[V11:.*]] = call i8* @llvm.objc.loadWeakRetained(i8** %[[V10]])
97 // ARM64: %[[V12:.*]] = call i8* @llvm.objc.storeWeak(i8** %[[V7]], i8* %[[V11]])
98 // ARM64: call void @llvm.objc.release(i8* %[[V11]])
104 // ARM64: define internal void @__Block_byref_object_copy_(i8* %0, i8* %1)
105 // ARM64: call void @__move_constructor_8_8_t0w4_w8(i8** %{{.*}}, i8** %{{.*}})
107 // ARM64: define linkonce_odr hidden void @__move_constructor_8_8_t0w4_w8(i8** %[[DST:.*]], i8** %[…
108 // ARM64: %[[DST_ADDR:.*]] = alloca i8**, align 8
109 // ARM64: %[[SRC_ADDR:.*]] = alloca i8**, align 8
110 // ARM64: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8
111 // ARM64: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8
112 // ARM64: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8
113 // ARM64: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8
114 // ARM64: %[[V2:.*]] = bitcast i8** %[[V0]] to i32*
115 // ARM64: %[[V3:.*]] = bitcast i8** %[[V1]] to i32*
116 // ARM64: %[[V4:.*]] = load i32, i32* %[[V3]], align 8
117 // ARM64: store i32 %[[V4]], i32* %[[V2]], align 8
118 // ARM64: %[[V5:.*]] = bitcast i8** %[[V0]] to i8*
119 // ARM64: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 8
120 // ARM64: %[[V7:.*]] = bitcast i8* %[[V6]] to i8**
121 // ARM64: %[[V8:.*]] = bitcast i8** %[[V1]] to i8*
122 // ARM64: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 8
123 // ARM64: %[[V10:.*]] = bitcast i8* %[[V9]] to i8**
124 // ARM64: call void @llvm.objc.moveWeak(i8** %[[V7]], i8** %[[V10]])
131 // ARM64: define void @test_move_assignment_Weak(%[[STRUCT_WEAK]]* %{{.*}})
132 // ARM64: call void @__move_assignment_8_8_t0w4_w8(i8** %{{.*}}, i8** %{{.*}})
134 // ARM64: define linkonce_odr hidden void @__move_assignment_8_8_t0w4_w8(i8** %[[DST:.*]], i8** %[[…
135 // ARM64: %[[DST_ADDR:.*]] = alloca i8**, align 8
136 // ARM64: %[[SRC_ADDR:.*]] = alloca i8**, align 8
137 // ARM64: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8
138 // ARM64: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8
139 // ARM64: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8
140 // ARM64: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8
141 // ARM64: %[[V2:.*]] = bitcast i8** %[[V0]] to i32*
142 // ARM64: %[[V3:.*]] = bitcast i8** %[[V1]] to i32*
143 // ARM64: %[[V4:.*]] = load i32, i32* %[[V3]], align 8
144 // ARM64: store i32 %[[V4]], i32* %[[V2]], align 8
145 // ARM64: %[[V5:.*]] = bitcast i8** %[[V0]] to i8*
146 // ARM64: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 8
147 // ARM64: %[[V7:.*]] = bitcast i8* %[[V6]] to i8**
148 // ARM64: %[[V8:.*]] = bitcast i8** %[[V1]] to i8*
149 // ARM64: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 8
150 // ARM64: %[[V10:.*]] = bitcast i8* %[[V9]] to i8**
151 // ARM64: %[[V11:.*]] = call i8* @llvm.objc.loadWeakRetained(i8** %[[V10]])
152 // ARM64: %[[V12:.*]] = call i8* @llvm.objc.storeWeak(i8** %[[V7]], i8* %[[V11]])
153 // ARM64: call void @llvm.objc.destroyWeak(i8** %[[V10]])
154 // ARM64: call void @llvm.objc.release(i8* %[[V11]])