• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -fno-aapcs-bitfield-width | FileCheck %s -check-prefix=LE
3 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -fno-aapcs-bitfield-width | FileCheck %s -check-prefix=BE
4 // RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -faapcs-bitfield-load -fno-aapcs-bitfield-width | FileCheck %s -check-prefixes=LENUMLOADS
5 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -faapcs-bitfield-load -fno-aapcs-bitfield-width | FileCheck %s -check-prefixes=BENUMLOADS
6 // RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=LEWIDTH
7 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=BEWIDTH
8 // RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -faapcs-bitfield-load | FileCheck %s -check-prefixes=LEWIDTHNUM
9 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -faapcs-bitfield-load | FileCheck %s -check-prefixes=BEWIDTHNUM
10 
11 struct st0 {
12   short c : 7;
13 };
14 
15 // LE-LABEL: @st0_check_load(
16 // LE-NEXT:  entry:
17 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
18 // LE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
19 // LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
20 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
21 // LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
22 // LE-NEXT:    ret i32 [[CONV]]
23 //
24 // BE-LABEL: @st0_check_load(
25 // BE-NEXT:  entry:
26 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
27 // BE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
28 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
29 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
30 // BE-NEXT:    ret i32 [[CONV]]
31 //
32 // LENUMLOADS-LABEL: @st0_check_load(
33 // LENUMLOADS-NEXT:  entry:
34 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
35 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
36 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
37 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
38 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
39 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
40 //
41 // BENUMLOADS-LABEL: @st0_check_load(
42 // BENUMLOADS-NEXT:  entry:
43 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
44 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
45 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
46 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
47 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
48 //
49 // LEWIDTH-LABEL: @st0_check_load(
50 // LEWIDTH-NEXT:  entry:
51 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
52 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
53 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
54 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
55 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
56 // LEWIDTH-NEXT:    ret i32 [[CONV]]
57 //
58 // BEWIDTH-LABEL: @st0_check_load(
59 // BEWIDTH-NEXT:  entry:
60 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
61 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
62 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
63 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
64 // BEWIDTH-NEXT:    ret i32 [[CONV]]
65 //
66 // LEWIDTHNUM-LABEL: @st0_check_load(
67 // LEWIDTHNUM-NEXT:  entry:
68 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
69 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
70 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
71 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
72 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
73 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
74 //
75 // BEWIDTHNUM-LABEL: @st0_check_load(
76 // BEWIDTHNUM-NEXT:  entry:
77 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
78 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
79 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
80 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
81 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
82 //
st0_check_load(struct st0 * m)83 int st0_check_load(struct st0 *m) {
84   return m->c;
85 }
86 
87 // LE-LABEL: @st0_check_store(
88 // LE-NEXT:  entry:
89 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
90 // LE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
91 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
92 // LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
93 // LE-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
94 // LE-NEXT:    ret void
95 //
96 // BE-LABEL: @st0_check_store(
97 // BE-NEXT:  entry:
98 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
99 // BE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
100 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
101 // BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
102 // BE-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
103 // BE-NEXT:    ret void
104 //
105 // LENUMLOADS-LABEL: @st0_check_store(
106 // LENUMLOADS-NEXT:  entry:
107 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
108 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
109 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
110 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
111 // LENUMLOADS-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
112 // LENUMLOADS-NEXT:    ret void
113 //
114 // BENUMLOADS-LABEL: @st0_check_store(
115 // BENUMLOADS-NEXT:  entry:
116 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
117 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
118 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
119 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
120 // BENUMLOADS-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
121 // BENUMLOADS-NEXT:    ret void
122 //
123 // LEWIDTH-LABEL: @st0_check_store(
124 // LEWIDTH-NEXT:  entry:
125 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
126 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
127 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
128 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
129 // LEWIDTH-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
130 // LEWIDTH-NEXT:    ret void
131 //
132 // BEWIDTH-LABEL: @st0_check_store(
133 // BEWIDTH-NEXT:  entry:
134 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
135 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
136 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
137 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
138 // BEWIDTH-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
139 // BEWIDTH-NEXT:    ret void
140 //
141 // LEWIDTHNUM-LABEL: @st0_check_store(
142 // LEWIDTHNUM-NEXT:  entry:
143 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
144 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
145 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
146 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
147 // LEWIDTHNUM-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
148 // LEWIDTHNUM-NEXT:    ret void
149 //
150 // BEWIDTHNUM-LABEL: @st0_check_store(
151 // BEWIDTHNUM-NEXT:  entry:
152 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
153 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
154 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
155 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
156 // BEWIDTHNUM-NEXT:    store i8 [[BF_SET]], i8* [[TMP0]], align 2
157 // BEWIDTHNUM-NEXT:    ret void
158 //
st0_check_store(struct st0 * m)159 void st0_check_store(struct st0 *m) {
160   m->c = 1;
161 }
162 
163 struct st1 {
164   int a : 10;
165   short c : 6;
166 };
167 
168 // LE-LABEL: @st1_check_load(
169 // LE-NEXT:  entry:
170 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
171 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
172 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
173 // LE-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
174 // LE-NEXT:    ret i32 [[CONV]]
175 //
176 // BE-LABEL: @st1_check_load(
177 // BE-NEXT:  entry:
178 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
179 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
180 // BE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
181 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 10
182 // BE-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
183 // BE-NEXT:    ret i32 [[CONV]]
184 //
185 // LENUMLOADS-LABEL: @st1_check_load(
186 // LENUMLOADS-NEXT:  entry:
187 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
188 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
189 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
190 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
191 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
192 //
193 // BENUMLOADS-LABEL: @st1_check_load(
194 // BENUMLOADS-NEXT:  entry:
195 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
196 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
197 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
198 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 10
199 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
200 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
201 //
202 // LEWIDTH-LABEL: @st1_check_load(
203 // LEWIDTH-NEXT:  entry:
204 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
205 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
206 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
207 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
208 // LEWIDTH-NEXT:    ret i32 [[CONV]]
209 //
210 // BEWIDTH-LABEL: @st1_check_load(
211 // BEWIDTH-NEXT:  entry:
212 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
213 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
214 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
215 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 10
216 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
217 // BEWIDTH-NEXT:    ret i32 [[CONV]]
218 //
219 // LEWIDTHNUM-LABEL: @st1_check_load(
220 // LEWIDTHNUM-NEXT:  entry:
221 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
222 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
223 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
224 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
225 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
226 //
227 // BEWIDTHNUM-LABEL: @st1_check_load(
228 // BEWIDTHNUM-NEXT:  entry:
229 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
230 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
231 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
232 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 10
233 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
234 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
235 //
st1_check_load(struct st1 * m)236 int st1_check_load(struct st1 *m) {
237   return m->c;
238 }
239 
240 // LE-LABEL: @st1_check_store(
241 // LE-NEXT:  entry:
242 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
243 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
244 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
245 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
246 // LE-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
247 // LE-NEXT:    ret void
248 //
249 // BE-LABEL: @st1_check_store(
250 // BE-NEXT:  entry:
251 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
252 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
253 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
254 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
255 // BE-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
256 // BE-NEXT:    ret void
257 //
258 // LENUMLOADS-LABEL: @st1_check_store(
259 // LENUMLOADS-NEXT:  entry:
260 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
261 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
262 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
263 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
264 // LENUMLOADS-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
265 // LENUMLOADS-NEXT:    ret void
266 //
267 // BENUMLOADS-LABEL: @st1_check_store(
268 // BENUMLOADS-NEXT:  entry:
269 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
270 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
271 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
272 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
273 // BENUMLOADS-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
274 // BENUMLOADS-NEXT:    ret void
275 //
276 // LEWIDTH-LABEL: @st1_check_store(
277 // LEWIDTH-NEXT:  entry:
278 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
279 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
280 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
281 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
282 // LEWIDTH-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
283 // LEWIDTH-NEXT:    ret void
284 //
285 // BEWIDTH-LABEL: @st1_check_store(
286 // BEWIDTH-NEXT:  entry:
287 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
288 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
289 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
290 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
291 // BEWIDTH-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
292 // BEWIDTH-NEXT:    ret void
293 //
294 // LEWIDTHNUM-LABEL: @st1_check_store(
295 // LEWIDTHNUM-NEXT:  entry:
296 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
297 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
298 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
299 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
300 // LEWIDTHNUM-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
301 // LEWIDTHNUM-NEXT:    ret void
302 //
303 // BEWIDTHNUM-LABEL: @st1_check_store(
304 // BEWIDTHNUM-NEXT:  entry:
305 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
306 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
307 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
308 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
309 // BEWIDTHNUM-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
310 // BEWIDTHNUM-NEXT:    ret void
311 //
st1_check_store(struct st1 * m)312 void st1_check_store(struct st1 *m) {
313   m->c = 1;
314 }
315 
316 struct st2 {
317   int a : 10;
318   short c : 7;
319 };
320 
321 // LE-LABEL: @st2_check_load(
322 // LE-NEXT:  entry:
323 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
324 // LE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
325 // LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
326 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
327 // LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
328 // LE-NEXT:    ret i32 [[CONV]]
329 //
330 // BE-LABEL: @st2_check_load(
331 // BE-NEXT:  entry:
332 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
333 // BE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
334 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
335 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
336 // BE-NEXT:    ret i32 [[CONV]]
337 //
338 // LENUMLOADS-LABEL: @st2_check_load(
339 // LENUMLOADS-NEXT:  entry:
340 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
341 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
342 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
343 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
344 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
345 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
346 //
347 // BENUMLOADS-LABEL: @st2_check_load(
348 // BENUMLOADS-NEXT:  entry:
349 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
350 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
351 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
352 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
353 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
354 //
355 // LEWIDTH-LABEL: @st2_check_load(
356 // LEWIDTH-NEXT:  entry:
357 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
358 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
359 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
360 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
361 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
362 // LEWIDTH-NEXT:    ret i32 [[CONV]]
363 //
364 // BEWIDTH-LABEL: @st2_check_load(
365 // BEWIDTH-NEXT:  entry:
366 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
367 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
368 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
369 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
370 // BEWIDTH-NEXT:    ret i32 [[CONV]]
371 //
372 // LEWIDTHNUM-LABEL: @st2_check_load(
373 // LEWIDTHNUM-NEXT:  entry:
374 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
375 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
376 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
377 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
378 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
379 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
380 //
381 // BEWIDTHNUM-LABEL: @st2_check_load(
382 // BEWIDTHNUM-NEXT:  entry:
383 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
384 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
385 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
386 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
387 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
388 //
st2_check_load(struct st2 * m)389 int st2_check_load(struct st2 *m) {
390   return m->c;
391 }
392 
393 // LE-LABEL: @st2_check_store(
394 // LE-NEXT:  entry:
395 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
396 // LE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
397 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
398 // LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
399 // LE-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
400 // LE-NEXT:    ret void
401 //
402 // BE-LABEL: @st2_check_store(
403 // BE-NEXT:  entry:
404 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
405 // BE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
406 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
407 // BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
408 // BE-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
409 // BE-NEXT:    ret void
410 //
411 // LENUMLOADS-LABEL: @st2_check_store(
412 // LENUMLOADS-NEXT:  entry:
413 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
414 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
415 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
416 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
417 // LENUMLOADS-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
418 // LENUMLOADS-NEXT:    ret void
419 //
420 // BENUMLOADS-LABEL: @st2_check_store(
421 // BENUMLOADS-NEXT:  entry:
422 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
423 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
424 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
425 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
426 // BENUMLOADS-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
427 // BENUMLOADS-NEXT:    ret void
428 //
429 // LEWIDTH-LABEL: @st2_check_store(
430 // LEWIDTH-NEXT:  entry:
431 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
432 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
433 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
434 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
435 // LEWIDTH-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
436 // LEWIDTH-NEXT:    ret void
437 //
438 // BEWIDTH-LABEL: @st2_check_store(
439 // BEWIDTH-NEXT:  entry:
440 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
441 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
442 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
443 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
444 // BEWIDTH-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
445 // BEWIDTH-NEXT:    ret void
446 //
447 // LEWIDTHNUM-LABEL: @st2_check_store(
448 // LEWIDTHNUM-NEXT:  entry:
449 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
450 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
451 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
452 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
453 // LEWIDTHNUM-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
454 // LEWIDTHNUM-NEXT:    ret void
455 //
456 // BEWIDTHNUM-LABEL: @st2_check_store(
457 // BEWIDTHNUM-NEXT:  entry:
458 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
459 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
460 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
461 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
462 // BEWIDTHNUM-NEXT:    store i8 [[BF_SET]], i8* [[C]], align 2
463 // BEWIDTHNUM-NEXT:    ret void
464 //
st2_check_store(struct st2 * m)465 void st2_check_store(struct st2 *m) {
466   m->c = 1;
467 }
468 // Volatile access is allowed to use 16 bits
469 struct st3 {
470   volatile short c : 7;
471 };
472 
473 // LE-LABEL: @st3_check_load(
474 // LE-NEXT:  entry:
475 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
476 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
477 // LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
478 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
479 // LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
480 // LE-NEXT:    ret i32 [[CONV]]
481 //
482 // BE-LABEL: @st3_check_load(
483 // BE-NEXT:  entry:
484 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
485 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
486 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
487 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
488 // BE-NEXT:    ret i32 [[CONV]]
489 //
490 // LENUMLOADS-LABEL: @st3_check_load(
491 // LENUMLOADS-NEXT:  entry:
492 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
493 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
494 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
495 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
496 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
497 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
498 //
499 // BENUMLOADS-LABEL: @st3_check_load(
500 // BENUMLOADS-NEXT:  entry:
501 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
502 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
503 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
504 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
505 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
506 //
507 // LEWIDTH-LABEL: @st3_check_load(
508 // LEWIDTH-NEXT:  entry:
509 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
510 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
511 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
512 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 9
513 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
514 // LEWIDTH-NEXT:    ret i32 [[CONV]]
515 //
516 // BEWIDTH-LABEL: @st3_check_load(
517 // BEWIDTH-NEXT:  entry:
518 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
519 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
520 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 9
521 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
522 // BEWIDTH-NEXT:    ret i32 [[CONV]]
523 //
524 // LEWIDTHNUM-LABEL: @st3_check_load(
525 // LEWIDTHNUM-NEXT:  entry:
526 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
527 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
528 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
529 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 9
530 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
531 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
532 //
533 // BEWIDTHNUM-LABEL: @st3_check_load(
534 // BEWIDTHNUM-NEXT:  entry:
535 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
536 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
537 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 9
538 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
539 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
540 //
st3_check_load(struct st3 * m)541 int st3_check_load(struct st3 *m) {
542   return m->c;
543 }
544 
545 // LE-LABEL: @st3_check_store(
546 // LE-NEXT:  entry:
547 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
548 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
549 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
550 // LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
551 // LE-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP0]], align 2
552 // LE-NEXT:    ret void
553 //
554 // BE-LABEL: @st3_check_store(
555 // BE-NEXT:  entry:
556 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
557 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
558 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
559 // BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
560 // BE-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP0]], align 2
561 // BE-NEXT:    ret void
562 //
563 // LENUMLOADS-LABEL: @st3_check_store(
564 // LENUMLOADS-NEXT:  entry:
565 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
566 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
567 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
568 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
569 // LENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP0]], align 2
570 // LENUMLOADS-NEXT:    ret void
571 //
572 // BENUMLOADS-LABEL: @st3_check_store(
573 // BENUMLOADS-NEXT:  entry:
574 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
575 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
576 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
577 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
578 // BENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP0]], align 2
579 // BENUMLOADS-NEXT:    ret void
580 //
581 // LEWIDTH-LABEL: @st3_check_store(
582 // LEWIDTH-NEXT:  entry:
583 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
584 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
585 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -128
586 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
587 // LEWIDTH-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 2
588 // LEWIDTH-NEXT:    ret void
589 //
590 // BEWIDTH-LABEL: @st3_check_store(
591 // BEWIDTH-NEXT:  entry:
592 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
593 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
594 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 511
595 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
596 // BEWIDTH-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 2
597 // BEWIDTH-NEXT:    ret void
598 //
599 // LEWIDTHNUM-LABEL: @st3_check_store(
600 // LEWIDTHNUM-NEXT:  entry:
601 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
602 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
603 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -128
604 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
605 // LEWIDTHNUM-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 2
606 // LEWIDTHNUM-NEXT:    ret void
607 //
608 // BEWIDTHNUM-LABEL: @st3_check_store(
609 // BEWIDTHNUM-NEXT:  entry:
610 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
611 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
612 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 511
613 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
614 // BEWIDTHNUM-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 2
615 // BEWIDTHNUM-NEXT:    ret void
616 //
st3_check_store(struct st3 * m)617 void st3_check_store(struct st3 *m) {
618   m->c = 1;
619 }
620 // Volatile access to st4.c should use a char ld/st
621 struct st4 {
622   int b : 9;
623   volatile char c : 5;
624 };
625 
626 // LE-LABEL: @st4_check_load(
627 // LE-NEXT:  entry:
628 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
629 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
630 // LE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 2
631 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
632 // LE-NEXT:    [[BF_CAST:%.*]] = zext i16 [[BF_ASHR]] to i32
633 // LE-NEXT:    [[SEXT:%.*]] = shl i32 [[BF_CAST]], 24
634 // LE-NEXT:    [[CONV:%.*]] = ashr exact i32 [[SEXT]], 24
635 // LE-NEXT:    ret i32 [[CONV]]
636 //
637 // BE-LABEL: @st4_check_load(
638 // BE-NEXT:  entry:
639 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
640 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
641 // BE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
642 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
643 // BE-NEXT:    [[BF_CAST:%.*]] = zext i16 [[BF_ASHR]] to i32
644 // BE-NEXT:    [[SEXT:%.*]] = shl i32 [[BF_CAST]], 24
645 // BE-NEXT:    [[CONV:%.*]] = ashr exact i32 [[SEXT]], 24
646 // BE-NEXT:    ret i32 [[CONV]]
647 //
648 // LENUMLOADS-LABEL: @st4_check_load(
649 // LENUMLOADS-NEXT:  entry:
650 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
651 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
652 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 2
653 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
654 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = zext i16 [[BF_ASHR]] to i32
655 // LENUMLOADS-NEXT:    [[SEXT:%.*]] = shl i32 [[BF_CAST]], 24
656 // LENUMLOADS-NEXT:    [[CONV:%.*]] = ashr exact i32 [[SEXT]], 24
657 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
658 //
659 // BENUMLOADS-LABEL: @st4_check_load(
660 // BENUMLOADS-NEXT:  entry:
661 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
662 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
663 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
664 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
665 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = zext i16 [[BF_ASHR]] to i32
666 // BENUMLOADS-NEXT:    [[SEXT:%.*]] = shl i32 [[BF_CAST]], 24
667 // BENUMLOADS-NEXT:    [[CONV:%.*]] = ashr exact i32 [[SEXT]], 24
668 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
669 //
670 // LEWIDTH-LABEL: @st4_check_load(
671 // LEWIDTH-NEXT:  entry:
672 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
673 // LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
674 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
675 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
676 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
677 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
678 // LEWIDTH-NEXT:    ret i32 [[CONV]]
679 //
680 // BEWIDTH-LABEL: @st4_check_load(
681 // BEWIDTH-NEXT:  entry:
682 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
683 // BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
684 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
685 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
686 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
687 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
688 // BEWIDTH-NEXT:    ret i32 [[CONV]]
689 //
690 // LEWIDTHNUM-LABEL: @st4_check_load(
691 // LEWIDTHNUM-NEXT:  entry:
692 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
693 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
694 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
695 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
696 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
697 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
698 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
699 //
700 // BEWIDTHNUM-LABEL: @st4_check_load(
701 // BEWIDTHNUM-NEXT:  entry:
702 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
703 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
704 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
705 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
706 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
707 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
708 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
709 //
st4_check_load(struct st4 * m)710 int st4_check_load(struct st4 *m) {
711   return m->c;
712 }
713 
714 // LE-LABEL: @st4_check_store(
715 // LE-NEXT:  entry:
716 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
717 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
718 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -15873
719 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
720 // LE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
721 // LE-NEXT:    ret void
722 //
723 // BE-LABEL: @st4_check_store(
724 // BE-NEXT:  entry:
725 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
726 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
727 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -125
728 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 4
729 // BE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
730 // BE-NEXT:    ret void
731 //
732 // LENUMLOADS-LABEL: @st4_check_store(
733 // LENUMLOADS-NEXT:  entry:
734 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
735 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
736 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -15873
737 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
738 // LENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
739 // LENUMLOADS-NEXT:    ret void
740 //
741 // BENUMLOADS-LABEL: @st4_check_store(
742 // BENUMLOADS-NEXT:  entry:
743 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
744 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
745 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -125
746 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 4
747 // BENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
748 // BENUMLOADS-NEXT:    ret void
749 //
750 // LEWIDTH-LABEL: @st4_check_store(
751 // LEWIDTH-NEXT:  entry:
752 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
753 // LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
754 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
755 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
756 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
757 // LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP1]], align 1
758 // LEWIDTH-NEXT:    ret void
759 //
760 // BEWIDTH-LABEL: @st4_check_store(
761 // BEWIDTH-NEXT:  entry:
762 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
763 // BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
764 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
765 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
766 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
767 // BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP1]], align 1
768 // BEWIDTH-NEXT:    ret void
769 //
770 // LEWIDTHNUM-LABEL: @st4_check_store(
771 // LEWIDTHNUM-NEXT:  entry:
772 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
773 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
774 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
775 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
776 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
777 // LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP1]], align 1
778 // LEWIDTHNUM-NEXT:    ret void
779 //
780 // BEWIDTHNUM-LABEL: @st4_check_store(
781 // BEWIDTHNUM-NEXT:  entry:
782 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
783 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
784 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
785 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
786 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
787 // BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], i8* [[TMP1]], align 1
788 // BEWIDTHNUM-NEXT:    ret void
789 //
st4_check_store(struct st4 * m)790 void st4_check_store(struct st4 *m) {
791   m->c = 1;
792 }
793 
794 // LE-LABEL: @st4_check_nonv_store(
795 // LE-NEXT:  entry:
796 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
797 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
798 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
799 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
800 // LE-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
801 // LE-NEXT:    ret void
802 //
803 // BE-LABEL: @st4_check_nonv_store(
804 // BE-NEXT:  entry:
805 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
806 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
807 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
808 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
809 // BE-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
810 // BE-NEXT:    ret void
811 //
812 // LENUMLOADS-LABEL: @st4_check_nonv_store(
813 // LENUMLOADS-NEXT:  entry:
814 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
815 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
816 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
817 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
818 // LENUMLOADS-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
819 // LENUMLOADS-NEXT:    ret void
820 //
821 // BENUMLOADS-LABEL: @st4_check_nonv_store(
822 // BENUMLOADS-NEXT:  entry:
823 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
824 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
825 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
826 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
827 // BENUMLOADS-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
828 // BENUMLOADS-NEXT:    ret void
829 //
830 // LEWIDTH-LABEL: @st4_check_nonv_store(
831 // LEWIDTH-NEXT:  entry:
832 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
833 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
834 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
835 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
836 // LEWIDTH-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
837 // LEWIDTH-NEXT:    ret void
838 //
839 // BEWIDTH-LABEL: @st4_check_nonv_store(
840 // BEWIDTH-NEXT:  entry:
841 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
842 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
843 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
844 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
845 // BEWIDTH-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
846 // BEWIDTH-NEXT:    ret void
847 //
848 // LEWIDTHNUM-LABEL: @st4_check_nonv_store(
849 // LEWIDTHNUM-NEXT:  entry:
850 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
851 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
852 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
853 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
854 // LEWIDTHNUM-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
855 // LEWIDTHNUM-NEXT:    ret void
856 //
857 // BEWIDTHNUM-LABEL: @st4_check_nonv_store(
858 // BEWIDTHNUM-NEXT:  entry:
859 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
860 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
861 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
862 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
863 // BEWIDTHNUM-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
864 // BEWIDTHNUM-NEXT:    ret void
865 //
st4_check_nonv_store(struct st4 * m)866 void st4_check_nonv_store(struct st4 *m) {
867   m->b = 1;
868 }
869 
870 struct st5 {
871   int a : 12;
872   volatile char c : 5;
873 };
874 
875 // LE-LABEL: @st5_check_load(
876 // LE-NEXT:  entry:
877 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
878 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
879 // LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
880 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
881 // LE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
882 // LE-NEXT:    ret i32 [[CONV]]
883 //
884 // BE-LABEL: @st5_check_load(
885 // BE-NEXT:  entry:
886 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
887 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
888 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
889 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
890 // BE-NEXT:    ret i32 [[CONV]]
891 //
892 // LENUMLOADS-LABEL: @st5_check_load(
893 // LENUMLOADS-NEXT:  entry:
894 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
895 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
896 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
897 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
898 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
899 // LENUMLOADS-NEXT:    ret i32 [[CONV]]
900 //
901 // BENUMLOADS-LABEL: @st5_check_load(
902 // BENUMLOADS-NEXT:  entry:
903 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
904 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
905 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
906 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
907 // BENUMLOADS-NEXT:    ret i32 [[CONV]]
908 //
909 // LEWIDTH-LABEL: @st5_check_load(
910 // LEWIDTH-NEXT:  entry:
911 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
912 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
913 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
914 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
915 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
916 // LEWIDTH-NEXT:    ret i32 [[CONV]]
917 //
918 // BEWIDTH-LABEL: @st5_check_load(
919 // BEWIDTH-NEXT:  entry:
920 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
921 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
922 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
923 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
924 // BEWIDTH-NEXT:    ret i32 [[CONV]]
925 //
926 // LEWIDTHNUM-LABEL: @st5_check_load(
927 // LEWIDTHNUM-NEXT:  entry:
928 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
929 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
930 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
931 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
932 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
933 // LEWIDTHNUM-NEXT:    ret i32 [[CONV]]
934 //
935 // BEWIDTHNUM-LABEL: @st5_check_load(
936 // BEWIDTHNUM-NEXT:  entry:
937 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
938 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
939 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
940 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
941 // BEWIDTHNUM-NEXT:    ret i32 [[CONV]]
942 //
st5_check_load(struct st5 * m)943 int st5_check_load(struct st5 *m) {
944   return m->c;
945 }
946 
947 // LE-LABEL: @st5_check_store(
948 // LE-NEXT:  entry:
949 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
950 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
951 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
952 // LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
953 // LE-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
954 // LE-NEXT:    ret void
955 //
956 // BE-LABEL: @st5_check_store(
957 // BE-NEXT:  entry:
958 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
959 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
960 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
961 // BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
962 // BE-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
963 // BE-NEXT:    ret void
964 //
965 // LENUMLOADS-LABEL: @st5_check_store(
966 // LENUMLOADS-NEXT:  entry:
967 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
968 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
969 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
970 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
971 // LENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
972 // LENUMLOADS-NEXT:    ret void
973 //
974 // BENUMLOADS-LABEL: @st5_check_store(
975 // BENUMLOADS-NEXT:  entry:
976 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
977 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
978 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
979 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
980 // BENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
981 // BENUMLOADS-NEXT:    ret void
982 //
983 // LEWIDTH-LABEL: @st5_check_store(
984 // LEWIDTH-NEXT:  entry:
985 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
986 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
987 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
988 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
989 // LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
990 // LEWIDTH-NEXT:    ret void
991 //
992 // BEWIDTH-LABEL: @st5_check_store(
993 // BEWIDTH-NEXT:  entry:
994 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
995 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
996 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
997 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
998 // BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
999 // BEWIDTH-NEXT:    ret void
1000 //
1001 // LEWIDTHNUM-LABEL: @st5_check_store(
1002 // LEWIDTHNUM-NEXT:  entry:
1003 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
1004 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
1005 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1006 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
1007 // LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
1008 // LEWIDTHNUM-NEXT:    ret void
1009 //
1010 // BEWIDTHNUM-LABEL: @st5_check_store(
1011 // BEWIDTHNUM-NEXT:  entry:
1012 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
1013 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
1014 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1015 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
1016 // BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], i8* [[C]], align 2
1017 // BEWIDTHNUM-NEXT:    ret void
1018 //
st5_check_store(struct st5 * m)1019 void st5_check_store(struct st5 *m) {
1020   m->c = 1;
1021 }
1022 
1023 struct st6 {
1024   int a : 12;
1025   char b;
1026   int c : 5;
1027 };
1028 
1029 // LE-LABEL: @st6_check_load(
1030 // LE-NEXT:  entry:
1031 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1032 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1033 // LE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
1034 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4
1035 // LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1036 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1037 // LE-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1038 // LE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1039 // LE-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1040 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1041 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1042 // LE-NEXT:    [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
1043 // LE-NEXT:    [[BF_ASHR3:%.*]] = ashr exact i8 [[BF_SHL2]], 3
1044 // LE-NEXT:    [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
1045 // LE-NEXT:    [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
1046 // LE-NEXT:    ret i32 [[ADD5]]
1047 //
1048 // BE-LABEL: @st6_check_load(
1049 // BE-NEXT:  entry:
1050 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1051 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1052 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
1053 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1054 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1055 // BE-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1056 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1057 // BE-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1058 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1059 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1060 // BE-NEXT:    [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1061 // BE-NEXT:    [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1062 // BE-NEXT:    [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1063 // BE-NEXT:    ret i32 [[ADD4]]
1064 //
1065 // LENUMLOADS-LABEL: @st6_check_load(
1066 // LENUMLOADS-NEXT:  entry:
1067 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1068 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1069 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
1070 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4
1071 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1072 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1073 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1074 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1075 // LENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1076 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1077 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1078 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
1079 // LENUMLOADS-NEXT:    [[BF_ASHR3:%.*]] = ashr exact i8 [[BF_SHL2]], 3
1080 // LENUMLOADS-NEXT:    [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
1081 // LENUMLOADS-NEXT:    [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
1082 // LENUMLOADS-NEXT:    ret i32 [[ADD5]]
1083 //
1084 // BENUMLOADS-LABEL: @st6_check_load(
1085 // BENUMLOADS-NEXT:  entry:
1086 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1087 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1088 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
1089 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1090 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1091 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1092 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1093 // BENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1094 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1095 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1096 // BENUMLOADS-NEXT:    [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1097 // BENUMLOADS-NEXT:    [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1098 // BENUMLOADS-NEXT:    [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1099 // BENUMLOADS-NEXT:    ret i32 [[ADD4]]
1100 //
1101 // LEWIDTH-LABEL: @st6_check_load(
1102 // LEWIDTH-NEXT:  entry:
1103 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1104 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1105 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
1106 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4
1107 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1108 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1109 // LEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1110 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1111 // LEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1112 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1113 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1114 // LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
1115 // LEWIDTH-NEXT:    [[BF_ASHR3:%.*]] = ashr exact i8 [[BF_SHL2]], 3
1116 // LEWIDTH-NEXT:    [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
1117 // LEWIDTH-NEXT:    [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
1118 // LEWIDTH-NEXT:    ret i32 [[ADD5]]
1119 //
1120 // BEWIDTH-LABEL: @st6_check_load(
1121 // BEWIDTH-NEXT:  entry:
1122 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1123 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1124 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
1125 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1126 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1127 // BEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1128 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1129 // BEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1130 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1131 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1132 // BEWIDTH-NEXT:    [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1133 // BEWIDTH-NEXT:    [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1134 // BEWIDTH-NEXT:    [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1135 // BEWIDTH-NEXT:    ret i32 [[ADD4]]
1136 //
1137 // LEWIDTHNUM-LABEL: @st6_check_load(
1138 // LEWIDTHNUM-NEXT:  entry:
1139 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1140 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1141 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
1142 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4
1143 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1144 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1145 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1146 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1147 // LEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1148 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1149 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1150 // LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
1151 // LEWIDTHNUM-NEXT:    [[BF_ASHR3:%.*]] = ashr exact i8 [[BF_SHL2]], 3
1152 // LEWIDTHNUM-NEXT:    [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
1153 // LEWIDTHNUM-NEXT:    [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
1154 // LEWIDTHNUM-NEXT:    ret i32 [[ADD5]]
1155 //
1156 // BEWIDTHNUM-LABEL: @st6_check_load(
1157 // BEWIDTHNUM-NEXT:  entry:
1158 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1159 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1160 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
1161 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1162 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1163 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3
1164 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1165 // BEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1166 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1167 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
1168 // BEWIDTHNUM-NEXT:    [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1169 // BEWIDTHNUM-NEXT:    [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1170 // BEWIDTHNUM-NEXT:    [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1171 // BEWIDTHNUM-NEXT:    ret i32 [[ADD4]]
1172 //
st6_check_load(volatile struct st6 * m)1173 int st6_check_load(volatile struct st6 *m) {
1174   int x = m->a;
1175   x += m->b;
1176   x += m->c;
1177   return x;
1178 }
1179 
1180 // LE-LABEL: @st6_check_store(
1181 // LE-NEXT:  entry:
1182 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1183 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1184 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1185 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1186 // LE-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1187 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1188 // LE-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1189 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1190 // LE-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1191 // LE-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1192 // LE-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1193 // LE-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1194 // LE-NEXT:    ret void
1195 //
1196 // BE-LABEL: @st6_check_store(
1197 // BE-NEXT:  entry:
1198 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1199 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1200 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1201 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1202 // BE-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1203 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1204 // BE-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1205 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1206 // BE-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1207 // BE-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1208 // BE-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1209 // BE-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1210 // BE-NEXT:    ret void
1211 //
1212 // LENUMLOADS-LABEL: @st6_check_store(
1213 // LENUMLOADS-NEXT:  entry:
1214 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1215 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1216 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1217 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1218 // LENUMLOADS-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1219 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1220 // LENUMLOADS-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1221 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1222 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1223 // LENUMLOADS-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1224 // LENUMLOADS-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1225 // LENUMLOADS-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1226 // LENUMLOADS-NEXT:    ret void
1227 //
1228 // BENUMLOADS-LABEL: @st6_check_store(
1229 // BENUMLOADS-NEXT:  entry:
1230 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1231 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1232 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1233 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1234 // BENUMLOADS-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1235 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1236 // BENUMLOADS-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1237 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1238 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1239 // BENUMLOADS-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1240 // BENUMLOADS-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1241 // BENUMLOADS-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1242 // BENUMLOADS-NEXT:    ret void
1243 //
1244 // LEWIDTH-LABEL: @st6_check_store(
1245 // LEWIDTH-NEXT:  entry:
1246 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1247 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1248 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1249 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1250 // LEWIDTH-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1251 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1252 // LEWIDTH-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1253 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1254 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1255 // LEWIDTH-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1256 // LEWIDTH-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1257 // LEWIDTH-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1258 // LEWIDTH-NEXT:    ret void
1259 //
1260 // BEWIDTH-LABEL: @st6_check_store(
1261 // BEWIDTH-NEXT:  entry:
1262 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1263 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1264 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1265 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1266 // BEWIDTH-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1267 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1268 // BEWIDTH-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1269 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1270 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1271 // BEWIDTH-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1272 // BEWIDTH-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1273 // BEWIDTH-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1274 // BEWIDTH-NEXT:    ret void
1275 //
1276 // LEWIDTHNUM-LABEL: @st6_check_store(
1277 // LEWIDTHNUM-NEXT:  entry:
1278 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1279 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1280 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1281 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1282 // LEWIDTHNUM-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1283 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1284 // LEWIDTHNUM-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1285 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1286 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1287 // LEWIDTHNUM-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1288 // LEWIDTHNUM-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1289 // LEWIDTHNUM-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1290 // LEWIDTHNUM-NEXT:    ret void
1291 //
1292 // BEWIDTHNUM-LABEL: @st6_check_store(
1293 // BEWIDTHNUM-NEXT:  entry:
1294 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
1295 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
1296 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1297 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1298 // BEWIDTHNUM-NEXT:    store i16 [[BF_SET]], i16* [[TMP0]], align 4
1299 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
1300 // BEWIDTHNUM-NEXT:    store i8 2, i8* [[B]], align 2, !tbaa !3
1301 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
1302 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
1303 // BEWIDTHNUM-NEXT:    [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1304 // BEWIDTHNUM-NEXT:    [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1305 // BEWIDTHNUM-NEXT:    store i8 [[BF_SET3]], i8* [[C]], align 1
1306 // BEWIDTHNUM-NEXT:    ret void
1307 //
st6_check_store(struct st6 * m)1308 void st6_check_store(struct st6 *m) {
1309   m->a = 1;
1310   m->b = 2;
1311   m->c = 3;
1312 }
1313 
1314 // Nested structs and bitfields.
1315 struct st7a {
1316   char a;
1317   int b : 5;
1318 };
1319 
1320 struct st7b {
1321   char x;
1322   volatile struct st7a y;
1323 };
1324 
1325 // LE-LABEL: @st7_check_load(
1326 // LE-NEXT:  entry:
1327 // LE-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1328 // LE-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1329 // LE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1330 // LE-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1331 // LE-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1332 // LE-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1333 // LE-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1334 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1335 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1336 // LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1337 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
1338 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1339 // LE-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1340 // LE-NEXT:    ret i32 [[ADD3]]
1341 //
1342 // BE-LABEL: @st7_check_load(
1343 // BE-NEXT:  entry:
1344 // BE-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1345 // BE-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1346 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1347 // BE-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1348 // BE-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1349 // BE-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1350 // BE-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1351 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1352 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1353 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1354 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1355 // BE-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1356 // BE-NEXT:    ret i32 [[ADD3]]
1357 //
1358 // LENUMLOADS-LABEL: @st7_check_load(
1359 // LENUMLOADS-NEXT:  entry:
1360 // LENUMLOADS-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1361 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1362 // LENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1363 // LENUMLOADS-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1364 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1365 // LENUMLOADS-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1366 // LENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1367 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1368 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1369 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1370 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
1371 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1372 // LENUMLOADS-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1373 // LENUMLOADS-NEXT:    ret i32 [[ADD3]]
1374 //
1375 // BENUMLOADS-LABEL: @st7_check_load(
1376 // BENUMLOADS-NEXT:  entry:
1377 // BENUMLOADS-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1378 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1379 // BENUMLOADS-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1380 // BENUMLOADS-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1381 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1382 // BENUMLOADS-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1383 // BENUMLOADS-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1384 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1385 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1386 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1387 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1388 // BENUMLOADS-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1389 // BENUMLOADS-NEXT:    ret i32 [[ADD3]]
1390 //
1391 // LEWIDTH-LABEL: @st7_check_load(
1392 // LEWIDTH-NEXT:  entry:
1393 // LEWIDTH-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1394 // LEWIDTH-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1395 // LEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1396 // LEWIDTH-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1397 // LEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1398 // LEWIDTH-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1399 // LEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1400 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1401 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1402 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1403 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
1404 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1405 // LEWIDTH-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1406 // LEWIDTH-NEXT:    ret i32 [[ADD3]]
1407 //
1408 // BEWIDTH-LABEL: @st7_check_load(
1409 // BEWIDTH-NEXT:  entry:
1410 // BEWIDTH-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1411 // BEWIDTH-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1412 // BEWIDTH-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1413 // BEWIDTH-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1414 // BEWIDTH-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1415 // BEWIDTH-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1416 // BEWIDTH-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1417 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1418 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1419 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1420 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1421 // BEWIDTH-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1422 // BEWIDTH-NEXT:    ret i32 [[ADD3]]
1423 //
1424 // LEWIDTHNUM-LABEL: @st7_check_load(
1425 // LEWIDTHNUM-NEXT:  entry:
1426 // LEWIDTHNUM-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1427 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1428 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1429 // LEWIDTHNUM-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1430 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1431 // LEWIDTHNUM-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1432 // LEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1433 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1434 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1435 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1436 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
1437 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1438 // LEWIDTHNUM-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1439 // LEWIDTHNUM-NEXT:    ret i32 [[ADD3]]
1440 //
1441 // BEWIDTHNUM-LABEL: @st7_check_load(
1442 // BEWIDTHNUM-NEXT:  entry:
1443 // BEWIDTHNUM-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1444 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8
1445 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1446 // BEWIDTHNUM-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1447 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11
1448 // BEWIDTHNUM-NEXT:    [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1449 // BEWIDTHNUM-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
1450 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1451 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1452 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1453 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1454 // BEWIDTHNUM-NEXT:    [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1455 // BEWIDTHNUM-NEXT:    ret i32 [[ADD3]]
1456 //
st7_check_load(struct st7b * m)1457 int st7_check_load(struct st7b *m) {
1458   int r = m->x;
1459   r += m->y.a;
1460   r += m->y.b;
1461   return r;
1462 }
1463 
1464 // LE-LABEL: @st7_check_store(
1465 // LE-NEXT:  entry:
1466 // LE-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1467 // LE-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1468 // LE-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1469 // LE-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1470 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1471 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1472 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1473 // LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1474 // LE-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1475 // LE-NEXT:    ret void
1476 //
1477 // BE-LABEL: @st7_check_store(
1478 // BE-NEXT:  entry:
1479 // BE-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1480 // BE-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1481 // BE-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1482 // BE-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1483 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1484 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1485 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1486 // BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1487 // BE-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1488 // BE-NEXT:    ret void
1489 //
1490 // LENUMLOADS-LABEL: @st7_check_store(
1491 // LENUMLOADS-NEXT:  entry:
1492 // LENUMLOADS-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1493 // LENUMLOADS-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1494 // LENUMLOADS-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1495 // LENUMLOADS-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1496 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1497 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1498 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1499 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1500 // LENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1501 // LENUMLOADS-NEXT:    ret void
1502 //
1503 // BENUMLOADS-LABEL: @st7_check_store(
1504 // BENUMLOADS-NEXT:  entry:
1505 // BENUMLOADS-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1506 // BENUMLOADS-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1507 // BENUMLOADS-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1508 // BENUMLOADS-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1509 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1510 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1511 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1512 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1513 // BENUMLOADS-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1514 // BENUMLOADS-NEXT:    ret void
1515 //
1516 // LEWIDTH-LABEL: @st7_check_store(
1517 // LEWIDTH-NEXT:  entry:
1518 // LEWIDTH-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1519 // LEWIDTH-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1520 // LEWIDTH-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1521 // LEWIDTH-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1522 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1523 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1524 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1525 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1526 // LEWIDTH-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1527 // LEWIDTH-NEXT:    ret void
1528 //
1529 // BEWIDTH-LABEL: @st7_check_store(
1530 // BEWIDTH-NEXT:  entry:
1531 // BEWIDTH-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1532 // BEWIDTH-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1533 // BEWIDTH-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1534 // BEWIDTH-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1535 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1536 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1537 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1538 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1539 // BEWIDTH-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1540 // BEWIDTH-NEXT:    ret void
1541 //
1542 // LEWIDTHNUM-LABEL: @st7_check_store(
1543 // LEWIDTHNUM-NEXT:  entry:
1544 // LEWIDTHNUM-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1545 // LEWIDTHNUM-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1546 // LEWIDTHNUM-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1547 // LEWIDTHNUM-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1548 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1549 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1550 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1551 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1552 // LEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1553 // LEWIDTHNUM-NEXT:    ret void
1554 //
1555 // BEWIDTHNUM-LABEL: @st7_check_store(
1556 // BEWIDTHNUM-NEXT:  entry:
1557 // BEWIDTHNUM-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
1558 // BEWIDTHNUM-NEXT:    store i8 1, i8* [[X]], align 4, !tbaa !8
1559 // BEWIDTHNUM-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
1560 // BEWIDTHNUM-NEXT:    store volatile i8 2, i8* [[A]], align 4, !tbaa !11
1561 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
1562 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
1563 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1564 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1565 // BEWIDTHNUM-NEXT:    store volatile i8 [[BF_SET]], i8* [[B]], align 1
1566 // BEWIDTHNUM-NEXT:    ret void
1567 //
st7_check_store(struct st7b * m)1568 void st7_check_store(struct st7b *m) {
1569   m->x = 1;
1570   m->y.a = 2;
1571   m->y.b = 3;
1572 }
1573 
1574 // Check overflowing assignments to bitfields.
1575 struct st8 {
1576   unsigned f : 16;
1577 };
1578 
1579 // LE-LABEL: @st8_check_assignment(
1580 // LE-NEXT:  entry:
1581 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1582 // LE-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1583 // LE-NEXT:    ret i32 65535
1584 //
1585 // BE-LABEL: @st8_check_assignment(
1586 // BE-NEXT:  entry:
1587 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1588 // BE-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1589 // BE-NEXT:    ret i32 65535
1590 //
1591 // LENUMLOADS-LABEL: @st8_check_assignment(
1592 // LENUMLOADS-NEXT:  entry:
1593 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1594 // LENUMLOADS-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1595 // LENUMLOADS-NEXT:    ret i32 65535
1596 //
1597 // BENUMLOADS-LABEL: @st8_check_assignment(
1598 // BENUMLOADS-NEXT:  entry:
1599 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1600 // BENUMLOADS-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1601 // BENUMLOADS-NEXT:    ret i32 65535
1602 //
1603 // LEWIDTH-LABEL: @st8_check_assignment(
1604 // LEWIDTH-NEXT:  entry:
1605 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1606 // LEWIDTH-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1607 // LEWIDTH-NEXT:    ret i32 65535
1608 //
1609 // BEWIDTH-LABEL: @st8_check_assignment(
1610 // BEWIDTH-NEXT:  entry:
1611 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1612 // BEWIDTH-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1613 // BEWIDTH-NEXT:    ret i32 65535
1614 //
1615 // LEWIDTHNUM-LABEL: @st8_check_assignment(
1616 // LEWIDTHNUM-NEXT:  entry:
1617 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1618 // LEWIDTHNUM-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1619 // LEWIDTHNUM-NEXT:    ret i32 65535
1620 //
1621 // BEWIDTHNUM-LABEL: @st8_check_assignment(
1622 // BEWIDTHNUM-NEXT:  entry:
1623 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
1624 // BEWIDTHNUM-NEXT:    store i16 -1, i16* [[TMP0]], align 4
1625 // BEWIDTHNUM-NEXT:    ret i32 65535
1626 //
st8_check_assignment(struct st8 * m)1627 int st8_check_assignment(struct st8 *m) {
1628   return m->f = 0xffff;
1629 }
1630 
1631 struct st9{
1632   int f : 8;
1633 };
1634 
1635 // LE-LABEL: @read_st9(
1636 // LE-NEXT:  entry:
1637 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1638 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1639 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1640 // LE-NEXT:    ret i32 [[BF_CAST]]
1641 //
1642 // BE-LABEL: @read_st9(
1643 // BE-NEXT:  entry:
1644 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1645 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1646 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1647 // BE-NEXT:    ret i32 [[BF_CAST]]
1648 //
1649 // LENUMLOADS-LABEL: @read_st9(
1650 // LENUMLOADS-NEXT:  entry:
1651 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1652 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1653 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1654 // LENUMLOADS-NEXT:    ret i32 [[BF_CAST]]
1655 //
1656 // BENUMLOADS-LABEL: @read_st9(
1657 // BENUMLOADS-NEXT:  entry:
1658 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1659 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1660 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1661 // BENUMLOADS-NEXT:    ret i32 [[BF_CAST]]
1662 //
1663 // LEWIDTH-LABEL: @read_st9(
1664 // LEWIDTH-NEXT:  entry:
1665 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1666 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1667 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
1668 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr exact i32 [[BF_SHL]], 24
1669 // LEWIDTH-NEXT:    ret i32 [[BF_ASHR]]
1670 //
1671 // BEWIDTH-LABEL: @read_st9(
1672 // BEWIDTH-NEXT:  entry:
1673 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1674 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1675 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
1676 // BEWIDTH-NEXT:    ret i32 [[BF_ASHR]]
1677 //
1678 // LEWIDTHNUM-LABEL: @read_st9(
1679 // LEWIDTHNUM-NEXT:  entry:
1680 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1681 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1682 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
1683 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr exact i32 [[BF_SHL]], 24
1684 // LEWIDTHNUM-NEXT:    ret i32 [[BF_ASHR]]
1685 //
1686 // BEWIDTHNUM-LABEL: @read_st9(
1687 // BEWIDTHNUM-NEXT:  entry:
1688 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1689 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1690 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
1691 // BEWIDTHNUM-NEXT:    ret i32 [[BF_ASHR]]
1692 //
read_st9(volatile struct st9 * m)1693 int read_st9(volatile struct st9 *m) {
1694   return m->f;
1695 }
1696 
1697 // LE-LABEL: @store_st9(
1698 // LE-NEXT:  entry:
1699 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1700 // LE-NEXT:    store volatile i8 1, i8* [[TMP0]], align 4
1701 // LE-NEXT:    ret void
1702 //
1703 // BE-LABEL: @store_st9(
1704 // BE-NEXT:  entry:
1705 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1706 // BE-NEXT:    store volatile i8 1, i8* [[TMP0]], align 4
1707 // BE-NEXT:    ret void
1708 //
1709 // LENUMLOADS-LABEL: @store_st9(
1710 // LENUMLOADS-NEXT:  entry:
1711 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1712 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1713 // LENUMLOADS-NEXT:    store volatile i8 1, i8* [[TMP0]], align 4
1714 // LENUMLOADS-NEXT:    ret void
1715 //
1716 // BENUMLOADS-LABEL: @store_st9(
1717 // BENUMLOADS-NEXT:  entry:
1718 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1719 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1720 // BENUMLOADS-NEXT:    store volatile i8 1, i8* [[TMP0]], align 4
1721 // BENUMLOADS-NEXT:    ret void
1722 //
1723 // LEWIDTH-LABEL: @store_st9(
1724 // LEWIDTH-NEXT:  entry:
1725 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1726 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1727 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -256
1728 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 1
1729 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1730 // LEWIDTH-NEXT:    ret void
1731 //
1732 // BEWIDTH-LABEL: @store_st9(
1733 // BEWIDTH-NEXT:  entry:
1734 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1735 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1736 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], 16777215
1737 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 16777216
1738 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1739 // BEWIDTH-NEXT:    ret void
1740 //
1741 // LEWIDTHNUM-LABEL: @store_st9(
1742 // LEWIDTHNUM-NEXT:  entry:
1743 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1744 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1745 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -256
1746 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 1
1747 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1748 // LEWIDTHNUM-NEXT:    ret void
1749 //
1750 // BEWIDTHNUM-LABEL: @store_st9(
1751 // BEWIDTHNUM-NEXT:  entry:
1752 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1753 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1754 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], 16777215
1755 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 16777216
1756 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1757 // BEWIDTHNUM-NEXT:    ret void
1758 //
store_st9(volatile struct st9 * m)1759 void store_st9(volatile struct st9 *m) {
1760   m->f = 1;
1761 }
1762 
1763 // LE-LABEL: @increment_st9(
1764 // LE-NEXT:  entry:
1765 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1766 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1767 // LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
1768 // LE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
1769 // LE-NEXT:    ret void
1770 //
1771 // BE-LABEL: @increment_st9(
1772 // BE-NEXT:  entry:
1773 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1774 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1775 // BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
1776 // BE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
1777 // BE-NEXT:    ret void
1778 //
1779 // LENUMLOADS-LABEL: @increment_st9(
1780 // LENUMLOADS-NEXT:  entry:
1781 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1782 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1783 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
1784 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1785 // LENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
1786 // LENUMLOADS-NEXT:    ret void
1787 //
1788 // BENUMLOADS-LABEL: @increment_st9(
1789 // BENUMLOADS-NEXT:  entry:
1790 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
1791 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1792 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
1793 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4
1794 // BENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
1795 // BENUMLOADS-NEXT:    ret void
1796 //
1797 // LEWIDTH-LABEL: @increment_st9(
1798 // LEWIDTH-NEXT:  entry:
1799 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1800 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1801 // LEWIDTH-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
1802 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1803 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 255
1804 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
1805 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
1806 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1807 // LEWIDTH-NEXT:    ret void
1808 //
1809 // BEWIDTH-LABEL: @increment_st9(
1810 // BEWIDTH-NEXT:  entry:
1811 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1812 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1813 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1814 // BEWIDTH-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
1815 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
1816 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
1817 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
1818 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1819 // BEWIDTH-NEXT:    ret void
1820 //
1821 // LEWIDTHNUM-LABEL: @increment_st9(
1822 // LEWIDTHNUM-NEXT:  entry:
1823 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1824 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1825 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
1826 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1827 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 255
1828 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
1829 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
1830 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1831 // LEWIDTHNUM-NEXT:    ret void
1832 //
1833 // BEWIDTHNUM-LABEL: @increment_st9(
1834 // BEWIDTHNUM-NEXT:  entry:
1835 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
1836 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1837 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1838 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
1839 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
1840 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
1841 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
1842 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1843 // BEWIDTHNUM-NEXT:    ret void
1844 //
increment_st9(volatile struct st9 * m)1845 void increment_st9(volatile struct st9 *m) {
1846   ++m->f;
1847 }
1848 
1849 struct st10{
1850   int e : 1;
1851   int f : 8;
1852 };
1853 
1854 // LE-LABEL: @read_st10(
1855 // LE-NEXT:  entry:
1856 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1857 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1858 // LE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 7
1859 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1860 // LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1861 // LE-NEXT:    ret i32 [[BF_CAST]]
1862 //
1863 // BE-LABEL: @read_st10(
1864 // BE-NEXT:  entry:
1865 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1866 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1867 // BE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 1
1868 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1869 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1870 // BE-NEXT:    ret i32 [[BF_CAST]]
1871 //
1872 // LENUMLOADS-LABEL: @read_st10(
1873 // LENUMLOADS-NEXT:  entry:
1874 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1875 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1876 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 7
1877 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1878 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1879 // LENUMLOADS-NEXT:    ret i32 [[BF_CAST]]
1880 //
1881 // BENUMLOADS-LABEL: @read_st10(
1882 // BENUMLOADS-NEXT:  entry:
1883 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1884 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1885 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 1
1886 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1887 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1888 // BENUMLOADS-NEXT:    ret i32 [[BF_CAST]]
1889 //
1890 // LEWIDTH-LABEL: @read_st10(
1891 // LEWIDTH-NEXT:  entry:
1892 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1893 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1894 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 23
1895 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1896 // LEWIDTH-NEXT:    ret i32 [[BF_ASHR]]
1897 //
1898 // BEWIDTH-LABEL: @read_st10(
1899 // BEWIDTH-NEXT:  entry:
1900 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1901 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1902 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 1
1903 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1904 // BEWIDTH-NEXT:    ret i32 [[BF_ASHR]]
1905 //
1906 // LEWIDTHNUM-LABEL: @read_st10(
1907 // LEWIDTHNUM-NEXT:  entry:
1908 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1909 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1910 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 23
1911 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1912 // LEWIDTHNUM-NEXT:    ret i32 [[BF_ASHR]]
1913 //
1914 // BEWIDTHNUM-LABEL: @read_st10(
1915 // BEWIDTHNUM-NEXT:  entry:
1916 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1917 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1918 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 1
1919 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1920 // BEWIDTHNUM-NEXT:    ret i32 [[BF_ASHR]]
1921 //
read_st10(volatile struct st10 * m)1922 int read_st10(volatile struct st10 *m) {
1923   return m->f;
1924 }
1925 
1926 // LE-LABEL: @store_st10(
1927 // LE-NEXT:  entry:
1928 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1929 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1930 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -511
1931 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 2
1932 // LE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
1933 // LE-NEXT:    ret void
1934 //
1935 // BE-LABEL: @store_st10(
1936 // BE-NEXT:  entry:
1937 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1938 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1939 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -32641
1940 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
1941 // BE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
1942 // BE-NEXT:    ret void
1943 //
1944 // LENUMLOADS-LABEL: @store_st10(
1945 // LENUMLOADS-NEXT:  entry:
1946 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1947 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1948 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -511
1949 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 2
1950 // LENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
1951 // LENUMLOADS-NEXT:    ret void
1952 //
1953 // BENUMLOADS-LABEL: @store_st10(
1954 // BENUMLOADS-NEXT:  entry:
1955 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
1956 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
1957 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -32641
1958 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
1959 // BENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
1960 // BENUMLOADS-NEXT:    ret void
1961 //
1962 // LEWIDTH-LABEL: @store_st10(
1963 // LEWIDTH-NEXT:  entry:
1964 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1965 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1966 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -511
1967 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2
1968 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1969 // LEWIDTH-NEXT:    ret void
1970 //
1971 // BEWIDTH-LABEL: @store_st10(
1972 // BEWIDTH-NEXT:  entry:
1973 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1974 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1975 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2139095041
1976 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 8388608
1977 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1978 // BEWIDTH-NEXT:    ret void
1979 //
1980 // LEWIDTHNUM-LABEL: @store_st10(
1981 // LEWIDTHNUM-NEXT:  entry:
1982 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1983 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1984 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -511
1985 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2
1986 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1987 // LEWIDTHNUM-NEXT:    ret void
1988 //
1989 // BEWIDTHNUM-LABEL: @store_st10(
1990 // BEWIDTHNUM-NEXT:  entry:
1991 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
1992 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
1993 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2139095041
1994 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 8388608
1995 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
1996 // BEWIDTHNUM-NEXT:    ret void
1997 //
store_st10(volatile struct st10 * m)1998 void store_st10(volatile struct st10 *m) {
1999   m->f = 1;
2000 }
2001 
2002 // LE-LABEL: @increment_st10(
2003 // LE-NEXT:  entry:
2004 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
2005 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2006 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2007 // LE-NEXT:    [[TMP1:%.*]] = add i16 [[BF_LOAD]], 2
2008 // LE-NEXT:    [[BF_SHL2:%.*]] = and i16 [[TMP1]], 510
2009 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
2010 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
2011 // LE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
2012 // LE-NEXT:    ret void
2013 //
2014 // BE-LABEL: @increment_st10(
2015 // BE-NEXT:  entry:
2016 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
2017 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2018 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2019 // BE-NEXT:    [[TMP1:%.*]] = add i16 [[BF_LOAD]], 128
2020 // BE-NEXT:    [[BF_SHL2:%.*]] = and i16 [[TMP1]], 32640
2021 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
2022 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
2023 // BE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
2024 // BE-NEXT:    ret void
2025 //
2026 // LENUMLOADS-LABEL: @increment_st10(
2027 // LENUMLOADS-NEXT:  entry:
2028 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
2029 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2030 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2031 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = add i16 [[BF_LOAD]], 2
2032 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = and i16 [[TMP1]], 510
2033 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
2034 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
2035 // LENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
2036 // LENUMLOADS-NEXT:    ret void
2037 //
2038 // BENUMLOADS-LABEL: @increment_st10(
2039 // BENUMLOADS-NEXT:  entry:
2040 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
2041 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2042 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
2043 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = add i16 [[BF_LOAD]], 128
2044 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = and i16 [[TMP1]], 32640
2045 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
2046 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
2047 // BENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
2048 // BENUMLOADS-NEXT:    ret void
2049 //
2050 // LEWIDTH-LABEL: @increment_st10(
2051 // LEWIDTH-NEXT:  entry:
2052 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
2053 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2054 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2055 // LEWIDTH-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 2
2056 // LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 510
2057 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -511
2058 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2059 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2060 // LEWIDTH-NEXT:    ret void
2061 //
2062 // BEWIDTH-LABEL: @increment_st10(
2063 // BEWIDTH-NEXT:  entry:
2064 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
2065 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2066 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2067 // BEWIDTH-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 8388608
2068 // BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 2139095040
2069 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -2139095041
2070 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2071 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2072 // BEWIDTH-NEXT:    ret void
2073 //
2074 // LEWIDTHNUM-LABEL: @increment_st10(
2075 // LEWIDTHNUM-NEXT:  entry:
2076 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
2077 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2078 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2079 // LEWIDTHNUM-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 2
2080 // LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 510
2081 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -511
2082 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2083 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2084 // LEWIDTHNUM-NEXT:    ret void
2085 //
2086 // BEWIDTHNUM-LABEL: @increment_st10(
2087 // BEWIDTHNUM-NEXT:  entry:
2088 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
2089 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2090 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2091 // BEWIDTHNUM-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 8388608
2092 // BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 2139095040
2093 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -2139095041
2094 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2095 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2096 // BEWIDTHNUM-NEXT:    ret void
2097 //
increment_st10(volatile struct st10 * m)2098 void increment_st10(volatile struct st10 *m) {
2099   ++m->f;
2100 }
2101 
2102 struct st11{
2103   char e;
2104   int f : 16;
2105 };
2106 
2107 // LE-LABEL: @read_st11(
2108 // LE-NEXT:  entry:
2109 // LE-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2110 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2111 // LE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2112 // LE-NEXT:    ret i32 [[BF_CAST]]
2113 //
2114 // BE-LABEL: @read_st11(
2115 // BE-NEXT:  entry:
2116 // BE-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2117 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2118 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2119 // BE-NEXT:    ret i32 [[BF_CAST]]
2120 //
2121 // LENUMLOADS-LABEL: @read_st11(
2122 // LENUMLOADS-NEXT:  entry:
2123 // LENUMLOADS-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2124 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2125 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2126 // LENUMLOADS-NEXT:    ret i32 [[BF_CAST]]
2127 //
2128 // BENUMLOADS-LABEL: @read_st11(
2129 // BENUMLOADS-NEXT:  entry:
2130 // BENUMLOADS-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2131 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2132 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2133 // BENUMLOADS-NEXT:    ret i32 [[BF_CAST]]
2134 //
2135 // LEWIDTH-LABEL: @read_st11(
2136 // LEWIDTH-NEXT:  entry:
2137 // LEWIDTH-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2138 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2139 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2140 // LEWIDTH-NEXT:    ret i32 [[BF_CAST]]
2141 //
2142 // BEWIDTH-LABEL: @read_st11(
2143 // BEWIDTH-NEXT:  entry:
2144 // BEWIDTH-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2145 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2146 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2147 // BEWIDTH-NEXT:    ret i32 [[BF_CAST]]
2148 //
2149 // LEWIDTHNUM-LABEL: @read_st11(
2150 // LEWIDTHNUM-NEXT:  entry:
2151 // LEWIDTHNUM-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2152 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2153 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2154 // LEWIDTHNUM-NEXT:    ret i32 [[BF_CAST]]
2155 //
2156 // BEWIDTHNUM-LABEL: @read_st11(
2157 // BEWIDTHNUM-NEXT:  entry:
2158 // BEWIDTHNUM-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2159 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2160 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2161 // BEWIDTHNUM-NEXT:    ret i32 [[BF_CAST]]
2162 //
read_st11(volatile struct st11 * m)2163 int read_st11(volatile struct st11 *m) {
2164   return m->f;
2165 }
2166 
2167 // LE-LABEL: @store_st11(
2168 // LE-NEXT:  entry:
2169 // LE-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2170 // LE-NEXT:    store volatile i16 1, i16* [[F]], align 1
2171 // LE-NEXT:    ret void
2172 //
2173 // BE-LABEL: @store_st11(
2174 // BE-NEXT:  entry:
2175 // BE-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2176 // BE-NEXT:    store volatile i16 1, i16* [[F]], align 1
2177 // BE-NEXT:    ret void
2178 //
2179 // LENUMLOADS-LABEL: @store_st11(
2180 // LENUMLOADS-NEXT:  entry:
2181 // LENUMLOADS-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2182 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2183 // LENUMLOADS-NEXT:    store volatile i16 1, i16* [[F]], align 1
2184 // LENUMLOADS-NEXT:    ret void
2185 //
2186 // BENUMLOADS-LABEL: @store_st11(
2187 // BENUMLOADS-NEXT:  entry:
2188 // BENUMLOADS-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2189 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2190 // BENUMLOADS-NEXT:    store volatile i16 1, i16* [[F]], align 1
2191 // BENUMLOADS-NEXT:    ret void
2192 //
2193 // LEWIDTH-LABEL: @store_st11(
2194 // LEWIDTH-NEXT:  entry:
2195 // LEWIDTH-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2196 // LEWIDTH-NEXT:    store volatile i16 1, i16* [[F]], align 1
2197 // LEWIDTH-NEXT:    ret void
2198 //
2199 // BEWIDTH-LABEL: @store_st11(
2200 // BEWIDTH-NEXT:  entry:
2201 // BEWIDTH-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2202 // BEWIDTH-NEXT:    store volatile i16 1, i16* [[F]], align 1
2203 // BEWIDTH-NEXT:    ret void
2204 //
2205 // LEWIDTHNUM-LABEL: @store_st11(
2206 // LEWIDTHNUM-NEXT:  entry:
2207 // LEWIDTHNUM-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2208 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2209 // LEWIDTHNUM-NEXT:    store volatile i16 1, i16* [[F]], align 1
2210 // LEWIDTHNUM-NEXT:    ret void
2211 //
2212 // BEWIDTHNUM-LABEL: @store_st11(
2213 // BEWIDTHNUM-NEXT:  entry:
2214 // BEWIDTHNUM-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2215 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2216 // BEWIDTHNUM-NEXT:    store volatile i16 1, i16* [[F]], align 1
2217 // BEWIDTHNUM-NEXT:    ret void
2218 //
store_st11(volatile struct st11 * m)2219 void store_st11(volatile struct st11 *m) {
2220   m->f = 1;
2221 }
2222 
2223 // LE-LABEL: @increment_st11(
2224 // LE-NEXT:  entry:
2225 // LE-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2226 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2227 // LE-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2228 // LE-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2229 // LE-NEXT:    ret void
2230 //
2231 // BE-LABEL: @increment_st11(
2232 // BE-NEXT:  entry:
2233 // BE-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2234 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2235 // BE-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2236 // BE-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2237 // BE-NEXT:    ret void
2238 //
2239 // LENUMLOADS-LABEL: @increment_st11(
2240 // LENUMLOADS-NEXT:  entry:
2241 // LENUMLOADS-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2242 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2243 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2244 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1
2245 // LENUMLOADS-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2246 // LENUMLOADS-NEXT:    ret void
2247 //
2248 // BENUMLOADS-LABEL: @increment_st11(
2249 // BENUMLOADS-NEXT:  entry:
2250 // BENUMLOADS-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2251 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2252 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2253 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1
2254 // BENUMLOADS-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2255 // BENUMLOADS-NEXT:    ret void
2256 //
2257 // LEWIDTH-LABEL: @increment_st11(
2258 // LEWIDTH-NEXT:  entry:
2259 // LEWIDTH-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2260 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2261 // LEWIDTH-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2262 // LEWIDTH-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2263 // LEWIDTH-NEXT:    ret void
2264 //
2265 // BEWIDTH-LABEL: @increment_st11(
2266 // BEWIDTH-NEXT:  entry:
2267 // BEWIDTH-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2268 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2269 // BEWIDTH-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2270 // BEWIDTH-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2271 // BEWIDTH-NEXT:    ret void
2272 //
2273 // LEWIDTHNUM-LABEL: @increment_st11(
2274 // LEWIDTHNUM-NEXT:  entry:
2275 // LEWIDTHNUM-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2276 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2277 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2278 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1
2279 // LEWIDTHNUM-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2280 // LEWIDTHNUM-NEXT:    ret void
2281 //
2282 // BEWIDTHNUM-LABEL: @increment_st11(
2283 // BEWIDTHNUM-NEXT:  entry:
2284 // BEWIDTHNUM-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
2285 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
2286 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i16 [[BF_LOAD]], 1
2287 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1
2288 // BEWIDTHNUM-NEXT:    store volatile i16 [[INC]], i16* [[F]], align 1
2289 // BEWIDTHNUM-NEXT:    ret void
2290 //
increment_st11(volatile struct st11 * m)2291 void increment_st11(volatile struct st11 *m) {
2292   ++m->f;
2293 }
2294 
2295 // LE-LABEL: @increment_e_st11(
2296 // LE-NEXT:  entry:
2297 // LE-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2298 // LE-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2299 // LE-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2300 // LE-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2301 // LE-NEXT:    ret void
2302 //
2303 // BE-LABEL: @increment_e_st11(
2304 // BE-NEXT:  entry:
2305 // BE-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2306 // BE-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2307 // BE-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2308 // BE-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2309 // BE-NEXT:    ret void
2310 //
2311 // LENUMLOADS-LABEL: @increment_e_st11(
2312 // LENUMLOADS-NEXT:  entry:
2313 // LENUMLOADS-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2314 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2315 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2316 // LENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2317 // LENUMLOADS-NEXT:    ret void
2318 //
2319 // BENUMLOADS-LABEL: @increment_e_st11(
2320 // BENUMLOADS-NEXT:  entry:
2321 // BENUMLOADS-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2322 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2323 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2324 // BENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2325 // BENUMLOADS-NEXT:    ret void
2326 //
2327 // LEWIDTH-LABEL: @increment_e_st11(
2328 // LEWIDTH-NEXT:  entry:
2329 // LEWIDTH-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2330 // LEWIDTH-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2331 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2332 // LEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2333 // LEWIDTH-NEXT:    ret void
2334 //
2335 // BEWIDTH-LABEL: @increment_e_st11(
2336 // BEWIDTH-NEXT:  entry:
2337 // BEWIDTH-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2338 // BEWIDTH-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2339 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2340 // BEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2341 // BEWIDTH-NEXT:    ret void
2342 //
2343 // LEWIDTHNUM-LABEL: @increment_e_st11(
2344 // LEWIDTHNUM-NEXT:  entry:
2345 // LEWIDTHNUM-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2346 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2347 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2348 // LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2349 // LEWIDTHNUM-NEXT:    ret void
2350 //
2351 // BEWIDTHNUM-LABEL: @increment_e_st11(
2352 // BEWIDTHNUM-NEXT:  entry:
2353 // BEWIDTHNUM-NEXT:    [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
2354 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12
2355 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[TMP0]], 1
2356 // BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12
2357 // BEWIDTHNUM-NEXT:    ret void
2358 //
increment_e_st11(volatile struct st11 * m)2359 void increment_e_st11(volatile struct st11 *m) {
2360   ++m->e;
2361 }
2362 
2363 struct st12{
2364   int e : 8;
2365   int f : 16;
2366 };
2367 
2368 // LE-LABEL: @read_st12(
2369 // LE-NEXT:  entry:
2370 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2371 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2372 // LE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2373 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2374 // LE-NEXT:    ret i32 [[BF_ASHR]]
2375 //
2376 // BE-LABEL: @read_st12(
2377 // BE-NEXT:  entry:
2378 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2379 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2380 // BE-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2381 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2382 // BE-NEXT:    ret i32 [[BF_ASHR]]
2383 //
2384 // LENUMLOADS-LABEL: @read_st12(
2385 // LENUMLOADS-NEXT:  entry:
2386 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2387 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2388 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2389 // LENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2390 // LENUMLOADS-NEXT:    ret i32 [[BF_ASHR]]
2391 //
2392 // BENUMLOADS-LABEL: @read_st12(
2393 // BENUMLOADS-NEXT:  entry:
2394 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2395 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2396 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2397 // BENUMLOADS-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2398 // BENUMLOADS-NEXT:    ret i32 [[BF_ASHR]]
2399 //
2400 // LEWIDTH-LABEL: @read_st12(
2401 // LEWIDTH-NEXT:  entry:
2402 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2403 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2404 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2405 // LEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2406 // LEWIDTH-NEXT:    ret i32 [[BF_ASHR]]
2407 //
2408 // BEWIDTH-LABEL: @read_st12(
2409 // BEWIDTH-NEXT:  entry:
2410 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2411 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2412 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2413 // BEWIDTH-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2414 // BEWIDTH-NEXT:    ret i32 [[BF_ASHR]]
2415 //
2416 // LEWIDTHNUM-LABEL: @read_st12(
2417 // LEWIDTHNUM-NEXT:  entry:
2418 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2419 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2420 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2421 // LEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2422 // LEWIDTHNUM-NEXT:    ret i32 [[BF_ASHR]]
2423 //
2424 // BEWIDTHNUM-LABEL: @read_st12(
2425 // BEWIDTHNUM-NEXT:  entry:
2426 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2427 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2428 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2429 // BEWIDTHNUM-NEXT:    [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2430 // BEWIDTHNUM-NEXT:    ret i32 [[BF_ASHR]]
2431 //
read_st12(volatile struct st12 * m)2432 int read_st12(volatile struct st12 *m) {
2433   return m->f;
2434 }
2435 
2436 // LE-LABEL: @store_st12(
2437 // LE-NEXT:  entry:
2438 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2439 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2440 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2441 // LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2442 // LE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2443 // LE-NEXT:    ret void
2444 //
2445 // BE-LABEL: @store_st12(
2446 // BE-NEXT:  entry:
2447 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2448 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2449 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2450 // BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2451 // BE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2452 // BE-NEXT:    ret void
2453 //
2454 // LENUMLOADS-LABEL: @store_st12(
2455 // LENUMLOADS-NEXT:  entry:
2456 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2457 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2458 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2459 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2460 // LENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2461 // LENUMLOADS-NEXT:    ret void
2462 //
2463 // BENUMLOADS-LABEL: @store_st12(
2464 // BENUMLOADS-NEXT:  entry:
2465 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2466 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2467 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2468 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2469 // BENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2470 // BENUMLOADS-NEXT:    ret void
2471 //
2472 // LEWIDTH-LABEL: @store_st12(
2473 // LEWIDTH-NEXT:  entry:
2474 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2475 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2476 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2477 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2478 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2479 // LEWIDTH-NEXT:    ret void
2480 //
2481 // BEWIDTH-LABEL: @store_st12(
2482 // BEWIDTH-NEXT:  entry:
2483 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2484 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2485 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2486 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2487 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2488 // BEWIDTH-NEXT:    ret void
2489 //
2490 // LEWIDTHNUM-LABEL: @store_st12(
2491 // LEWIDTHNUM-NEXT:  entry:
2492 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2493 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2494 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2495 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2496 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2497 // LEWIDTHNUM-NEXT:    ret void
2498 //
2499 // BEWIDTHNUM-LABEL: @store_st12(
2500 // BEWIDTHNUM-NEXT:  entry:
2501 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2502 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2503 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2504 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2505 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2506 // BEWIDTHNUM-NEXT:    ret void
2507 //
store_st12(volatile struct st12 * m)2508 void store_st12(volatile struct st12 *m) {
2509   m->f = 1;
2510 }
2511 
2512 // LE-LABEL: @increment_st12(
2513 // LE-NEXT:  entry:
2514 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2515 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2516 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2517 // LE-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2518 // LE-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2519 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2520 // LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2521 // LE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2522 // LE-NEXT:    ret void
2523 //
2524 // BE-LABEL: @increment_st12(
2525 // BE-NEXT:  entry:
2526 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2527 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2528 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2529 // BE-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2530 // BE-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2531 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2532 // BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2533 // BE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2534 // BE-NEXT:    ret void
2535 //
2536 // LENUMLOADS-LABEL: @increment_st12(
2537 // LENUMLOADS-NEXT:  entry:
2538 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2539 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2540 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2541 // LENUMLOADS-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2542 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2543 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2544 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2545 // LENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2546 // LENUMLOADS-NEXT:    ret void
2547 //
2548 // BENUMLOADS-LABEL: @increment_st12(
2549 // BENUMLOADS-NEXT:  entry:
2550 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2551 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2552 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2553 // BENUMLOADS-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2554 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2555 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2556 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2557 // BENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2558 // BENUMLOADS-NEXT:    ret void
2559 //
2560 // LEWIDTH-LABEL: @increment_st12(
2561 // LEWIDTH-NEXT:  entry:
2562 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2563 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2564 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2565 // LEWIDTH-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2566 // LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2567 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2568 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2569 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2570 // LEWIDTH-NEXT:    ret void
2571 //
2572 // BEWIDTH-LABEL: @increment_st12(
2573 // BEWIDTH-NEXT:  entry:
2574 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2575 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2576 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2577 // BEWIDTH-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2578 // BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2579 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2580 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2581 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2582 // BEWIDTH-NEXT:    ret void
2583 //
2584 // LEWIDTHNUM-LABEL: @increment_st12(
2585 // LEWIDTHNUM-NEXT:  entry:
2586 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2587 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2588 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2589 // LEWIDTHNUM-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2590 // LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2591 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2592 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2593 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2594 // LEWIDTHNUM-NEXT:    ret void
2595 //
2596 // BEWIDTHNUM-LABEL: @increment_st12(
2597 // BEWIDTHNUM-NEXT:  entry:
2598 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2599 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2600 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2601 // BEWIDTHNUM-NEXT:    [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
2602 // BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
2603 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2604 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2605 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2606 // BEWIDTHNUM-NEXT:    ret void
2607 //
increment_st12(volatile struct st12 * m)2608 void increment_st12(volatile struct st12 *m) {
2609   ++m->f;
2610 }
2611 
2612 // LE-LABEL: @increment_e_st12(
2613 // LE-NEXT:  entry:
2614 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2615 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2616 // LE-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
2617 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2618 // LE-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2619 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2620 // LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2621 // LE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2622 // LE-NEXT:    ret void
2623 //
2624 // BE-LABEL: @increment_e_st12(
2625 // BE-NEXT:  entry:
2626 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2627 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2628 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2629 // BE-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
2630 // BE-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
2631 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2632 // BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2633 // BE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2634 // BE-NEXT:    ret void
2635 //
2636 // LENUMLOADS-LABEL: @increment_e_st12(
2637 // LENUMLOADS-NEXT:  entry:
2638 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2639 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2640 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
2641 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2642 // LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2643 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2644 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2645 // LENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2646 // LENUMLOADS-NEXT:    ret void
2647 //
2648 // BENUMLOADS-LABEL: @increment_e_st12(
2649 // BENUMLOADS-NEXT:  entry:
2650 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2651 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2652 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2653 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
2654 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
2655 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2656 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2657 // BENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2658 // BENUMLOADS-NEXT:    ret void
2659 //
2660 // LEWIDTH-LABEL: @increment_e_st12(
2661 // LEWIDTH-NEXT:  entry:
2662 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2663 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2664 // LEWIDTH-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
2665 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2666 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2667 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2668 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2669 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2670 // LEWIDTH-NEXT:    ret void
2671 //
2672 // BEWIDTH-LABEL: @increment_e_st12(
2673 // BEWIDTH-NEXT:  entry:
2674 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2675 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2676 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2677 // BEWIDTH-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
2678 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
2679 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2680 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2681 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2682 // BEWIDTH-NEXT:    ret void
2683 //
2684 // LEWIDTHNUM-LABEL: @increment_e_st12(
2685 // LEWIDTHNUM-NEXT:  entry:
2686 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2687 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2688 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
2689 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2690 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2691 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2692 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2693 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2694 // LEWIDTHNUM-NEXT:    ret void
2695 //
2696 // BEWIDTHNUM-LABEL: @increment_e_st12(
2697 // BEWIDTHNUM-NEXT:  entry:
2698 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
2699 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2700 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
2701 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
2702 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
2703 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2704 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2705 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
2706 // BEWIDTHNUM-NEXT:    ret void
2707 //
increment_e_st12(volatile struct st12 * m)2708 void increment_e_st12(volatile struct st12 *m) {
2709   ++m->e;
2710 }
2711 
2712 struct st13 {
2713   char a : 8;
2714   int b : 32;
2715 } __attribute__((packed));
2716 
2717 // LE-LABEL: @increment_b_st13(
2718 // LE-NEXT:  entry:
2719 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2720 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2721 // LE-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
2722 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
2723 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2724 // LE-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
2725 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2726 // LE-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
2727 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2728 // LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
2729 // LE-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2730 // LE-NEXT:    ret void
2731 //
2732 // BE-LABEL: @increment_b_st13(
2733 // BE-NEXT:  entry:
2734 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2735 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2736 // BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
2737 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2738 // BE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
2739 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2740 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2741 // BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
2742 // BE-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2743 // BE-NEXT:    ret void
2744 //
2745 // LENUMLOADS-LABEL: @increment_b_st13(
2746 // LENUMLOADS-NEXT:  entry:
2747 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2748 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2749 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
2750 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
2751 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2752 // LENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
2753 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2754 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
2755 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2756 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
2757 // LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2758 // LENUMLOADS-NEXT:    ret void
2759 //
2760 // BENUMLOADS-LABEL: @increment_b_st13(
2761 // BENUMLOADS-NEXT:  entry:
2762 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2763 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2764 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
2765 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2766 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
2767 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2768 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2769 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
2770 // BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2771 // BENUMLOADS-NEXT:    ret void
2772 //
2773 // LEWIDTH-LABEL: @increment_b_st13(
2774 // LEWIDTH-NEXT:  entry:
2775 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2776 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2777 // LEWIDTH-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
2778 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
2779 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2780 // LEWIDTH-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
2781 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2782 // LEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
2783 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2784 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
2785 // LEWIDTH-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2786 // LEWIDTH-NEXT:    ret void
2787 //
2788 // BEWIDTH-LABEL: @increment_b_st13(
2789 // BEWIDTH-NEXT:  entry:
2790 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2791 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2792 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
2793 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2794 // BEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
2795 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2796 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2797 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
2798 // BEWIDTH-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2799 // BEWIDTH-NEXT:    ret void
2800 //
2801 // LEWIDTHNUM-LABEL: @increment_b_st13(
2802 // LEWIDTHNUM-NEXT:  entry:
2803 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2804 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2805 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
2806 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
2807 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2808 // LEWIDTHNUM-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
2809 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2810 // LEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
2811 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2812 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
2813 // LEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2814 // LEWIDTHNUM-NEXT:    ret void
2815 //
2816 // BEWIDTHNUM-LABEL: @increment_b_st13(
2817 // BEWIDTHNUM-NEXT:  entry:
2818 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
2819 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2820 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
2821 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2822 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
2823 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
2824 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2825 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
2826 // BEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
2827 // BEWIDTHNUM-NEXT:    ret void
2828 //
increment_b_st13(volatile struct st13 * s)2829 void increment_b_st13(volatile struct st13 *s) {
2830   s->b++;
2831 }
2832 
2833 struct st14 {
2834   char a : 8;
2835 } __attribute__((packed));
2836 
2837 // LE-LABEL: @increment_a_st14(
2838 // LE-NEXT:  entry:
2839 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2840 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2841 // LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2842 // LE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2843 // LE-NEXT:    ret void
2844 //
2845 // BE-LABEL: @increment_a_st14(
2846 // BE-NEXT:  entry:
2847 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2848 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2849 // BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2850 // BE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2851 // BE-NEXT:    ret void
2852 //
2853 // LENUMLOADS-LABEL: @increment_a_st14(
2854 // LENUMLOADS-NEXT:  entry:
2855 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2856 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2857 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2858 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2859 // LENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2860 // LENUMLOADS-NEXT:    ret void
2861 //
2862 // BENUMLOADS-LABEL: @increment_a_st14(
2863 // BENUMLOADS-NEXT:  entry:
2864 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2865 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2866 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2867 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2868 // BENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2869 // BENUMLOADS-NEXT:    ret void
2870 //
2871 // LEWIDTH-LABEL: @increment_a_st14(
2872 // LEWIDTH-NEXT:  entry:
2873 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2874 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2875 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2876 // LEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2877 // LEWIDTH-NEXT:    ret void
2878 //
2879 // BEWIDTH-LABEL: @increment_a_st14(
2880 // BEWIDTH-NEXT:  entry:
2881 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2882 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2883 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2884 // BEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2885 // BEWIDTH-NEXT:    ret void
2886 //
2887 // LEWIDTHNUM-LABEL: @increment_a_st14(
2888 // LEWIDTHNUM-NEXT:  entry:
2889 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2890 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2891 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2892 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2893 // LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2894 // LEWIDTHNUM-NEXT:    ret void
2895 //
2896 // BEWIDTHNUM-LABEL: @increment_a_st14(
2897 // BEWIDTHNUM-NEXT:  entry:
2898 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
2899 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2900 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2901 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2902 // BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2903 // BEWIDTHNUM-NEXT:    ret void
2904 //
increment_a_st14(volatile struct st14 * s)2905 void increment_a_st14(volatile struct st14 *s) {
2906   s->a++;
2907 }
2908 
2909 struct st15 {
2910   short a : 8;
2911 } __attribute__((packed));
2912 
2913 // LE-LABEL: @increment_a_st15(
2914 // LE-NEXT:  entry:
2915 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2916 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2917 // LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2918 // LE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2919 // LE-NEXT:    ret void
2920 //
2921 // BE-LABEL: @increment_a_st15(
2922 // BE-NEXT:  entry:
2923 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2924 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2925 // BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2926 // BE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2927 // BE-NEXT:    ret void
2928 //
2929 // LENUMLOADS-LABEL: @increment_a_st15(
2930 // LENUMLOADS-NEXT:  entry:
2931 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2932 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2933 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2934 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2935 // LENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2936 // LENUMLOADS-NEXT:    ret void
2937 //
2938 // BENUMLOADS-LABEL: @increment_a_st15(
2939 // BENUMLOADS-NEXT:  entry:
2940 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2941 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2942 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2943 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2944 // BENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2945 // BENUMLOADS-NEXT:    ret void
2946 //
2947 // LEWIDTH-LABEL: @increment_a_st15(
2948 // LEWIDTH-NEXT:  entry:
2949 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2950 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2951 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2952 // LEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2953 // LEWIDTH-NEXT:    ret void
2954 //
2955 // BEWIDTH-LABEL: @increment_a_st15(
2956 // BEWIDTH-NEXT:  entry:
2957 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2958 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2959 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2960 // BEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2961 // BEWIDTH-NEXT:    ret void
2962 //
2963 // LEWIDTHNUM-LABEL: @increment_a_st15(
2964 // LEWIDTHNUM-NEXT:  entry:
2965 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2966 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2967 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2968 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2969 // LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2970 // LEWIDTHNUM-NEXT:    ret void
2971 //
2972 // BEWIDTHNUM-LABEL: @increment_a_st15(
2973 // BEWIDTHNUM-NEXT:  entry:
2974 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
2975 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2976 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2977 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
2978 // BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
2979 // BEWIDTHNUM-NEXT:    ret void
2980 //
increment_a_st15(volatile struct st15 * s)2981 void increment_a_st15(volatile struct st15 *s) {
2982   s->a++;
2983 }
2984 
2985 struct st16 {
2986   int a : 32;
2987   int b : 16;
2988   int c : 32;
2989   int d : 16;
2990 };
2991 
2992 // LE-LABEL: @increment_a_st16(
2993 // LE-NEXT:  entry:
2994 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
2995 // LE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
2996 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
2997 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2998 // LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
2999 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3000 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3001 // LE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3002 // LE-NEXT:    ret void
3003 //
3004 // BE-LABEL: @increment_a_st16(
3005 // BE-NEXT:  entry:
3006 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3007 // BE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3008 // BE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3009 // BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3010 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3011 // BE-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3012 // BE-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3013 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3014 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3015 // BE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3016 // BE-NEXT:    ret void
3017 //
3018 // LENUMLOADS-LABEL: @increment_a_st16(
3019 // LENUMLOADS-NEXT:  entry:
3020 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3021 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3022 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3023 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3024 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3025 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3026 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3027 // LENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3028 // LENUMLOADS-NEXT:    ret void
3029 //
3030 // BENUMLOADS-LABEL: @increment_a_st16(
3031 // BENUMLOADS-NEXT:  entry:
3032 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3033 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3034 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3035 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3036 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3037 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3038 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3039 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3040 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3041 // BENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3042 // BENUMLOADS-NEXT:    ret void
3043 //
3044 // LEWIDTH-LABEL: @increment_a_st16(
3045 // LEWIDTH-NEXT:  entry:
3046 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3047 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3048 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3049 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3050 // LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3051 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3052 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3053 // LEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3054 // LEWIDTH-NEXT:    ret void
3055 //
3056 // BEWIDTH-LABEL: @increment_a_st16(
3057 // BEWIDTH-NEXT:  entry:
3058 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3059 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3060 // BEWIDTH-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3061 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3062 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3063 // BEWIDTH-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3064 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3065 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3066 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3067 // BEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3068 // BEWIDTH-NEXT:    ret void
3069 //
3070 // LEWIDTHNUM-LABEL: @increment_a_st16(
3071 // LEWIDTHNUM-NEXT:  entry:
3072 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3073 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3074 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3075 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3076 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3077 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3078 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3079 // LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3080 // LEWIDTHNUM-NEXT:    ret void
3081 //
3082 // BEWIDTHNUM-LABEL: @increment_a_st16(
3083 // BEWIDTHNUM-NEXT:  entry:
3084 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3085 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3086 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3087 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3088 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3089 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3090 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3091 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3092 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3093 // BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3094 // BEWIDTHNUM-NEXT:    ret void
3095 //
increment_a_st16(struct st16 * s)3096 void increment_a_st16(struct st16 *s) {
3097   s->a++;
3098 }
3099 
3100 // LE-LABEL: @increment_b_st16(
3101 // LE-NEXT:  entry:
3102 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3103 // LE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3104 // LE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3105 // LE-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3106 // LE-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3107 // LE-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3108 // LE-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3109 // LE-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3110 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3111 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3112 // LE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3113 // LE-NEXT:    ret void
3114 //
3115 // BE-LABEL: @increment_b_st16(
3116 // BE-NEXT:  entry:
3117 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3118 // BE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3119 // BE-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3120 // BE-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3121 // BE-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3122 // BE-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3123 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3124 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3125 // BE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3126 // BE-NEXT:    ret void
3127 //
3128 // LENUMLOADS-LABEL: @increment_b_st16(
3129 // LENUMLOADS-NEXT:  entry:
3130 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3131 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3132 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3133 // LENUMLOADS-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3134 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3135 // LENUMLOADS-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3136 // LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3137 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3138 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3139 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3140 // LENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3141 // LENUMLOADS-NEXT:    ret void
3142 //
3143 // BENUMLOADS-LABEL: @increment_b_st16(
3144 // BENUMLOADS-NEXT:  entry:
3145 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3146 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3147 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3148 // BENUMLOADS-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3149 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3150 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3151 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3152 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3153 // BENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3154 // BENUMLOADS-NEXT:    ret void
3155 //
3156 // LEWIDTH-LABEL: @increment_b_st16(
3157 // LEWIDTH-NEXT:  entry:
3158 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3159 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3160 // LEWIDTH-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3161 // LEWIDTH-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3162 // LEWIDTH-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3163 // LEWIDTH-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3164 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3165 // LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3166 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3167 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3168 // LEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3169 // LEWIDTH-NEXT:    ret void
3170 //
3171 // BEWIDTH-LABEL: @increment_b_st16(
3172 // BEWIDTH-NEXT:  entry:
3173 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3174 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3175 // BEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3176 // BEWIDTH-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3177 // BEWIDTH-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3178 // BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3179 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3180 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3181 // BEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3182 // BEWIDTH-NEXT:    ret void
3183 //
3184 // LEWIDTHNUM-LABEL: @increment_b_st16(
3185 // LEWIDTHNUM-NEXT:  entry:
3186 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3187 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3188 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3189 // LEWIDTHNUM-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3190 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3191 // LEWIDTHNUM-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3192 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3193 // LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3194 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3195 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3196 // LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3197 // LEWIDTHNUM-NEXT:    ret void
3198 //
3199 // BEWIDTHNUM-LABEL: @increment_b_st16(
3200 // BEWIDTHNUM-NEXT:  entry:
3201 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3202 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3203 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3204 // BEWIDTHNUM-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3205 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3206 // BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3207 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3208 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3209 // BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3210 // BEWIDTHNUM-NEXT:    ret void
3211 //
increment_b_st16(struct st16 * s)3212 void increment_b_st16(struct st16 *s) {
3213   s->b++;
3214 }
3215 
3216 // LE-LABEL: @increment_c_st16(
3217 // LE-NEXT:  entry:
3218 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3219 // LE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3220 // LE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3221 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3222 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3223 // LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3224 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3225 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3226 // LE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3227 // LE-NEXT:    ret void
3228 //
3229 // BE-LABEL: @increment_c_st16(
3230 // BE-NEXT:  entry:
3231 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3232 // BE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3233 // BE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3234 // BE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3235 // BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3236 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3237 // BE-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3238 // BE-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3239 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3240 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3241 // BE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3242 // BE-NEXT:    ret void
3243 //
3244 // LENUMLOADS-LABEL: @increment_c_st16(
3245 // LENUMLOADS-NEXT:  entry:
3246 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3247 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3248 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3249 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3250 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3251 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3252 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3253 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3254 // LENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3255 // LENUMLOADS-NEXT:    ret void
3256 //
3257 // BENUMLOADS-LABEL: @increment_c_st16(
3258 // BENUMLOADS-NEXT:  entry:
3259 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3260 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3261 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3262 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3263 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3264 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3265 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3266 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3267 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3268 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3269 // BENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3270 // BENUMLOADS-NEXT:    ret void
3271 //
3272 // LEWIDTH-LABEL: @increment_c_st16(
3273 // LEWIDTH-NEXT:  entry:
3274 // LEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3275 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3276 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3277 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3278 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3279 // LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3280 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3281 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3282 // LEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3283 // LEWIDTH-NEXT:    ret void
3284 //
3285 // BEWIDTH-LABEL: @increment_c_st16(
3286 // BEWIDTH-NEXT:  entry:
3287 // BEWIDTH-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3288 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3289 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3290 // BEWIDTH-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3291 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3292 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3293 // BEWIDTH-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3294 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3295 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3296 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3297 // BEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3298 // BEWIDTH-NEXT:    ret void
3299 //
3300 // LEWIDTHNUM-LABEL: @increment_c_st16(
3301 // LEWIDTHNUM-NEXT:  entry:
3302 // LEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3303 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3304 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3305 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3306 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3307 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3308 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
3309 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3310 // LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3311 // LEWIDTHNUM-NEXT:    ret void
3312 //
3313 // BEWIDTHNUM-LABEL: @increment_c_st16(
3314 // BEWIDTHNUM-NEXT:  entry:
3315 // BEWIDTHNUM-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3316 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3317 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3318 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3319 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3320 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3321 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3322 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3323 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
3324 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3325 // BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3326 // BEWIDTHNUM-NEXT:    ret void
3327 //
increment_c_st16(struct st16 * s)3328 void increment_c_st16(struct st16 *s) {
3329   s->c++;
3330 }
3331 
3332 // LE-LABEL: @increment_d_st16(
3333 // LE-NEXT:  entry:
3334 // LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3335 // LE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3336 // LE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3337 // LE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3338 // LE-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3339 // LE-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3340 // LE-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3341 // LE-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3342 // LE-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3343 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3344 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3345 // LE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3346 // LE-NEXT:    ret void
3347 //
3348 // BE-LABEL: @increment_d_st16(
3349 // BE-NEXT:  entry:
3350 // BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3351 // BE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3352 // BE-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3353 // BE-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3354 // BE-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3355 // BE-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3356 // BE-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3357 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3358 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3359 // BE-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3360 // BE-NEXT:    ret void
3361 //
3362 // LENUMLOADS-LABEL: @increment_d_st16(
3363 // LENUMLOADS-NEXT:  entry:
3364 // LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3365 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3366 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3367 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3368 // LENUMLOADS-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3369 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3370 // LENUMLOADS-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3371 // LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3372 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3373 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3374 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3375 // LENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3376 // LENUMLOADS-NEXT:    ret void
3377 //
3378 // BENUMLOADS-LABEL: @increment_d_st16(
3379 // BENUMLOADS-NEXT:  entry:
3380 // BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3381 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3382 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3383 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3384 // BENUMLOADS-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3385 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3386 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3387 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3388 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3389 // BENUMLOADS-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3390 // BENUMLOADS-NEXT:    ret void
3391 //
3392 // LEWIDTH-LABEL: @increment_d_st16(
3393 // LEWIDTH-NEXT:  entry:
3394 // LEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3395 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3396 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3397 // LEWIDTH-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3398 // LEWIDTH-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3399 // LEWIDTH-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3400 // LEWIDTH-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3401 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3402 // LEWIDTH-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3403 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3404 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3405 // LEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3406 // LEWIDTH-NEXT:    ret void
3407 //
3408 // BEWIDTH-LABEL: @increment_d_st16(
3409 // BEWIDTH-NEXT:  entry:
3410 // BEWIDTH-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3411 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3412 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3413 // BEWIDTH-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3414 // BEWIDTH-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3415 // BEWIDTH-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3416 // BEWIDTH-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3417 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3418 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3419 // BEWIDTH-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3420 // BEWIDTH-NEXT:    ret void
3421 //
3422 // LEWIDTHNUM-LABEL: @increment_d_st16(
3423 // LEWIDTHNUM-NEXT:  entry:
3424 // LEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3425 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3426 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3427 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3428 // LEWIDTHNUM-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3429 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3430 // LEWIDTHNUM-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3431 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3432 // LEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3433 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
3434 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3435 // LEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3436 // LEWIDTHNUM-NEXT:    ret void
3437 //
3438 // BEWIDTHNUM-LABEL: @increment_d_st16(
3439 // BEWIDTHNUM-NEXT:  entry:
3440 // BEWIDTHNUM-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3441 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3442 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
3443 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3444 // BEWIDTHNUM-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3445 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3446 // BEWIDTHNUM-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3447 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
3448 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3449 // BEWIDTHNUM-NEXT:    store i64 [[BF_SET]], i64* [[TMP0]], align 4
3450 // BEWIDTHNUM-NEXT:    ret void
3451 //
increment_d_st16(struct st16 * s)3452 void increment_d_st16(struct st16 *s) {
3453   s->d++;
3454 }
3455 
3456 // LE-LABEL: @increment_v_a_st16(
3457 // LE-NEXT:  entry:
3458 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3459 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3460 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3461 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3462 // LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3463 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3464 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3465 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3466 // LE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3467 // LE-NEXT:    ret void
3468 //
3469 // BE-LABEL: @increment_v_a_st16(
3470 // BE-NEXT:  entry:
3471 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3472 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3473 // BE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3474 // BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3475 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3476 // BE-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3477 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3478 // BE-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3479 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3480 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3481 // BE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3482 // BE-NEXT:    ret void
3483 //
3484 // LENUMLOADS-LABEL: @increment_v_a_st16(
3485 // LENUMLOADS-NEXT:  entry:
3486 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3487 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3488 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3489 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3490 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3491 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3492 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3493 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3494 // LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3495 // LENUMLOADS-NEXT:    ret void
3496 //
3497 // BENUMLOADS-LABEL: @increment_v_a_st16(
3498 // BENUMLOADS-NEXT:  entry:
3499 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3500 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3501 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3502 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3503 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3504 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3505 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3506 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3507 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3508 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3509 // BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3510 // BENUMLOADS-NEXT:    ret void
3511 //
3512 // LEWIDTH-LABEL: @increment_v_a_st16(
3513 // LEWIDTH-NEXT:  entry:
3514 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3515 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
3516 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3517 // LEWIDTH-NEXT:    store volatile i32 [[INC]], i32* [[TMP0]], align 4
3518 // LEWIDTH-NEXT:    ret void
3519 //
3520 // BEWIDTH-LABEL: @increment_v_a_st16(
3521 // BEWIDTH-NEXT:  entry:
3522 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3523 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
3524 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3525 // BEWIDTH-NEXT:    store volatile i32 [[INC]], i32* [[TMP0]], align 4
3526 // BEWIDTH-NEXT:    ret void
3527 //
3528 // LEWIDTHNUM-LABEL: @increment_v_a_st16(
3529 // LEWIDTHNUM-NEXT:  entry:
3530 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3531 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
3532 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3533 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
3534 // LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], i32* [[TMP0]], align 4
3535 // LEWIDTHNUM-NEXT:    ret void
3536 //
3537 // BEWIDTHNUM-LABEL: @increment_v_a_st16(
3538 // BEWIDTHNUM-NEXT:  entry:
3539 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3540 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
3541 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3542 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
3543 // BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], i32* [[TMP0]], align 4
3544 // BEWIDTHNUM-NEXT:    ret void
3545 //
increment_v_a_st16(volatile struct st16 * s)3546 void increment_v_a_st16(volatile struct st16 *s) {
3547   s->a++;
3548 }
3549 
3550 // LE-LABEL: @increment_v_b_st16(
3551 // LE-NEXT:  entry:
3552 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3553 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3554 // LE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3555 // LE-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3556 // LE-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3557 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3558 // LE-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3559 // LE-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3560 // LE-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3561 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3562 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3563 // LE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3564 // LE-NEXT:    ret void
3565 //
3566 // BE-LABEL: @increment_v_b_st16(
3567 // BE-NEXT:  entry:
3568 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3569 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3570 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3571 // BE-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3572 // BE-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3573 // BE-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3574 // BE-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3575 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3576 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3577 // BE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3578 // BE-NEXT:    ret void
3579 //
3580 // LENUMLOADS-LABEL: @increment_v_b_st16(
3581 // LENUMLOADS-NEXT:  entry:
3582 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3583 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3584 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3585 // LENUMLOADS-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3586 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3587 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3588 // LENUMLOADS-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3589 // LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3590 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3591 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3592 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3593 // LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3594 // LENUMLOADS-NEXT:    ret void
3595 //
3596 // BENUMLOADS-LABEL: @increment_v_b_st16(
3597 // BENUMLOADS-NEXT:  entry:
3598 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
3599 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3600 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3601 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3602 // BENUMLOADS-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3603 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3604 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3605 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3606 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3607 // BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3608 // BENUMLOADS-NEXT:    ret void
3609 //
3610 // LEWIDTH-LABEL: @increment_v_b_st16(
3611 // LEWIDTH-NEXT:  entry:
3612 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3613 // LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 1
3614 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3615 // LEWIDTH-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
3616 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3617 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3618 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
3619 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
3620 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3621 // LEWIDTH-NEXT:    ret void
3622 //
3623 // BEWIDTH-LABEL: @increment_v_b_st16(
3624 // BEWIDTH-NEXT:  entry:
3625 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3626 // BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 1
3627 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3628 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3629 // BEWIDTH-NEXT:    [[TMP2:%.*]] = add i32 [[BF_LOAD]], 65536
3630 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP2]], -65536
3631 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
3632 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
3633 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3634 // BEWIDTH-NEXT:    ret void
3635 //
3636 // LEWIDTHNUM-LABEL: @increment_v_b_st16(
3637 // LEWIDTHNUM-NEXT:  entry:
3638 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3639 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 1
3640 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3641 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
3642 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3643 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3644 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
3645 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
3646 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3647 // LEWIDTHNUM-NEXT:    ret void
3648 //
3649 // BEWIDTHNUM-LABEL: @increment_v_b_st16(
3650 // BEWIDTHNUM-NEXT:  entry:
3651 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3652 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 1
3653 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3654 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3655 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = add i32 [[BF_LOAD]], 65536
3656 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP2]], -65536
3657 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
3658 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
3659 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3660 // BEWIDTHNUM-NEXT:    ret void
3661 //
increment_v_b_st16(volatile struct st16 * s)3662 void increment_v_b_st16(volatile struct st16 *s) {
3663   s->b++;
3664 }
3665 
3666 // LE-LABEL: @increment_v_c_st16(
3667 // LE-NEXT:  entry:
3668 // LE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3669 // LE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3670 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3671 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3672 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3673 // LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3674 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3675 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3676 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3677 // LE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3678 // LE-NEXT:    ret void
3679 //
3680 // BE-LABEL: @increment_v_c_st16(
3681 // BE-NEXT:  entry:
3682 // BE-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3683 // BE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3684 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3685 // BE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3686 // BE-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3687 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3688 // BE-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3689 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3690 // BE-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3691 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3692 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3693 // BE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3694 // BE-NEXT:    ret void
3695 //
3696 // LENUMLOADS-LABEL: @increment_v_c_st16(
3697 // LENUMLOADS-NEXT:  entry:
3698 // LENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3699 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3700 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3701 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
3702 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3703 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i64
3704 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3705 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3706 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
3707 // LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3708 // LENUMLOADS-NEXT:    ret void
3709 //
3710 // BENUMLOADS-LABEL: @increment_v_c_st16(
3711 // BENUMLOADS-NEXT:  entry:
3712 // BENUMLOADS-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3713 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
3714 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3715 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3716 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
3717 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3718 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i64
3719 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3720 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
3721 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3722 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
3723 // BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3724 // BENUMLOADS-NEXT:    ret void
3725 //
3726 // LEWIDTH-LABEL: @increment_v_c_st16(
3727 // LEWIDTH-NEXT:  entry:
3728 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3729 // LEWIDTH-NEXT:    [[TMP1:%.*]] = bitcast i48* [[TMP0]] to i32*
3730 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3731 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3732 // LEWIDTH-NEXT:    store volatile i32 [[INC]], i32* [[TMP1]], align 4
3733 // LEWIDTH-NEXT:    ret void
3734 //
3735 // BEWIDTH-LABEL: @increment_v_c_st16(
3736 // BEWIDTH-NEXT:  entry:
3737 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3738 // BEWIDTH-NEXT:    [[TMP1:%.*]] = bitcast i48* [[TMP0]] to i32*
3739 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3740 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3741 // BEWIDTH-NEXT:    store volatile i32 [[INC]], i32* [[TMP1]], align 4
3742 // BEWIDTH-NEXT:    ret void
3743 //
3744 // LEWIDTHNUM-LABEL: @increment_v_c_st16(
3745 // LEWIDTHNUM-NEXT:  entry:
3746 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3747 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = bitcast i48* [[TMP0]] to i32*
3748 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3749 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3750 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3751 // LEWIDTHNUM-NEXT:    store volatile i32 [[INC]], i32* [[TMP1]], align 4
3752 // LEWIDTHNUM-NEXT:    ret void
3753 //
3754 // BEWIDTHNUM-LABEL: @increment_v_c_st16(
3755 // BEWIDTHNUM-NEXT:  entry:
3756 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3757 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = bitcast i48* [[TMP0]] to i32*
3758 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3759 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3760 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3761 // BEWIDTHNUM-NEXT:    store volatile i32 [[INC]], i32* [[TMP1]], align 4
3762 // BEWIDTHNUM-NEXT:    ret void
3763 //
increment_v_c_st16(volatile struct st16 * s)3764 void increment_v_c_st16(volatile struct st16 *s) {
3765   s->c++;
3766 }
3767 
3768 // LE-LABEL: @increment_v_d_st16(
3769 // LE-NEXT:  entry:
3770 // LE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3771 // LE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3772 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3773 // LE-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3774 // LE-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3775 // LE-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3776 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3777 // LE-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3778 // LE-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3779 // LE-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3780 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3781 // LE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3782 // LE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3783 // LE-NEXT:    ret void
3784 //
3785 // BE-LABEL: @increment_v_d_st16(
3786 // BE-NEXT:  entry:
3787 // BE-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3788 // BE-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3789 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3790 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3791 // BE-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3792 // BE-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3793 // BE-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3794 // BE-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3795 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3796 // BE-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3797 // BE-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3798 // BE-NEXT:    ret void
3799 //
3800 // LENUMLOADS-LABEL: @increment_v_d_st16(
3801 // LENUMLOADS-NEXT:  entry:
3802 // LENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3803 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3804 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3805 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
3806 // LENUMLOADS-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
3807 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i32 [[TMP2]], 1
3808 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3809 // LENUMLOADS-NEXT:    [[TMP3:%.*]] = and i32 [[INC]], 65535
3810 // LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
3811 // LENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
3812 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3813 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
3814 // LENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3815 // LENUMLOADS-NEXT:    ret void
3816 //
3817 // BENUMLOADS-LABEL: @increment_v_d_st16(
3818 // BENUMLOADS-NEXT:  entry:
3819 // BENUMLOADS-NEXT:    [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
3820 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
3821 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3822 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4
3823 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
3824 // BENUMLOADS-NEXT:    [[INC4:%.*]] = add i32 [[TMP1]], 65536
3825 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = and i32 [[INC4]], -65536
3826 // BENUMLOADS-NEXT:    [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
3827 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3828 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3829 // BENUMLOADS-NEXT:    store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
3830 // BENUMLOADS-NEXT:    ret void
3831 //
3832 // LEWIDTH-LABEL: @increment_v_d_st16(
3833 // LEWIDTH-NEXT:  entry:
3834 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3835 // LEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 3
3836 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3837 // LEWIDTH-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
3838 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3839 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3840 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
3841 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
3842 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3843 // LEWIDTH-NEXT:    ret void
3844 //
3845 // BEWIDTH-LABEL: @increment_v_d_st16(
3846 // BEWIDTH-NEXT:  entry:
3847 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3848 // BEWIDTH-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 3
3849 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3850 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3851 // BEWIDTH-NEXT:    [[TMP2:%.*]] = add i32 [[BF_LOAD]], 65536
3852 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP2]], -65536
3853 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
3854 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
3855 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3856 // BEWIDTH-NEXT:    ret void
3857 //
3858 // LEWIDTHNUM-LABEL: @increment_v_d_st16(
3859 // LEWIDTHNUM-NEXT:  entry:
3860 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3861 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 3
3862 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3863 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
3864 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3865 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3866 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
3867 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
3868 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3869 // LEWIDTHNUM-NEXT:    ret void
3870 //
3871 // BEWIDTHNUM-LABEL: @increment_v_d_st16(
3872 // BEWIDTHNUM-NEXT:  entry:
3873 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
3874 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 3
3875 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3876 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
3877 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = add i32 [[BF_LOAD]], 65536
3878 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP2]], -65536
3879 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
3880 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
3881 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
3882 // BEWIDTHNUM-NEXT:    ret void
3883 //
increment_v_d_st16(volatile struct st16 * s)3884 void increment_v_d_st16(volatile struct st16 *s) {
3885   s->d++;
3886 }
3887 // st17 has alignment = 1, the AAPCS defines nothing for the
3888 // accessing of b, but accessing c should use char
3889 struct st17 {
3890 int b : 32;
3891 char c : 8;
3892 } __attribute__((packed));
3893 
3894 // LE-LABEL: @increment_v_b_st17(
3895 // LE-NEXT:  entry:
3896 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3897 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3898 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
3899 // LE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3900 // LE-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
3901 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3902 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
3903 // LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
3904 // LE-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
3905 // LE-NEXT:    ret void
3906 //
3907 // BE-LABEL: @increment_v_b_st17(
3908 // BE-NEXT:  entry:
3909 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3910 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3911 // BE-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
3912 // BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
3913 // BE-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3914 // BE-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
3915 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3916 // BE-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
3917 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
3918 // BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
3919 // BE-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
3920 // BE-NEXT:    ret void
3921 //
3922 // LENUMLOADS-LABEL: @increment_v_b_st17(
3923 // LENUMLOADS-NEXT:  entry:
3924 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3925 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3926 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
3927 // LENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3928 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
3929 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3930 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
3931 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
3932 // LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
3933 // LENUMLOADS-NEXT:    ret void
3934 //
3935 // BENUMLOADS-LABEL: @increment_v_b_st17(
3936 // BENUMLOADS-NEXT:  entry:
3937 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3938 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3939 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
3940 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
3941 // BENUMLOADS-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3942 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
3943 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3944 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
3945 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
3946 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
3947 // BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
3948 // BENUMLOADS-NEXT:    ret void
3949 //
3950 // LEWIDTH-LABEL: @increment_v_b_st17(
3951 // LEWIDTH-NEXT:  entry:
3952 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3953 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3954 // LEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
3955 // LEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3956 // LEWIDTH-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
3957 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3958 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
3959 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
3960 // LEWIDTH-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
3961 // LEWIDTH-NEXT:    ret void
3962 //
3963 // BEWIDTH-LABEL: @increment_v_b_st17(
3964 // BEWIDTH-NEXT:  entry:
3965 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3966 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3967 // BEWIDTH-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
3968 // BEWIDTH-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
3969 // BEWIDTH-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3970 // BEWIDTH-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
3971 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3972 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
3973 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
3974 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
3975 // BEWIDTH-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
3976 // BEWIDTH-NEXT:    ret void
3977 //
3978 // LEWIDTHNUM-LABEL: @increment_v_b_st17(
3979 // LEWIDTHNUM-NEXT:  entry:
3980 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3981 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3982 // LEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
3983 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3984 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = zext i32 [[INC]] to i40
3985 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3986 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
3987 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
3988 // LEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
3989 // LEWIDTHNUM-NEXT:    ret void
3990 //
3991 // BEWIDTHNUM-LABEL: @increment_v_b_st17(
3992 // BEWIDTHNUM-NEXT:  entry:
3993 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
3994 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
3995 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
3996 // BEWIDTHNUM-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
3997 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3998 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = zext i32 [[INC]] to i40
3999 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4000 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
4001 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
4002 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
4003 // BEWIDTHNUM-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
4004 // BEWIDTHNUM-NEXT:    ret void
4005 //
increment_v_b_st17(volatile struct st17 * s)4006 void increment_v_b_st17(volatile struct st17 *s) {
4007   s->b++;
4008 }
4009 
4010 // LE-LABEL: @increment_v_c_st17(
4011 // LE-NEXT:  entry:
4012 // LE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
4013 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4014 // LE-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 32
4015 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i8
4016 // LE-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
4017 // LE-NEXT:    [[TMP2:%.*]] = zext i8 [[INC]] to i40
4018 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4019 // LE-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 32
4020 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
4021 // LE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
4022 // LE-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
4023 // LE-NEXT:    ret void
4024 //
4025 // BE-LABEL: @increment_v_c_st17(
4026 // BE-NEXT:  entry:
4027 // BE-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
4028 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4029 // BE-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i8
4030 // BE-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
4031 // BE-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
4032 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4033 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
4034 // BE-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
4035 // BE-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
4036 // BE-NEXT:    ret void
4037 //
4038 // LENUMLOADS-LABEL: @increment_v_c_st17(
4039 // LENUMLOADS-NEXT:  entry:
4040 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
4041 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4042 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 32
4043 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i8
4044 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
4045 // LENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i8 [[INC]] to i40
4046 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4047 // LENUMLOADS-NEXT:    [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 32
4048 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
4049 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
4050 // LENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
4051 // LENUMLOADS-NEXT:    ret void
4052 //
4053 // BENUMLOADS-LABEL: @increment_v_c_st17(
4054 // BENUMLOADS-NEXT:  entry:
4055 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
4056 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4057 // BENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i8
4058 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_CAST]], 1
4059 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = zext i8 [[INC]] to i40
4060 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
4061 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
4062 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
4063 // BENUMLOADS-NEXT:    store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
4064 // BENUMLOADS-NEXT:    ret void
4065 //
4066 // LEWIDTH-LABEL: @increment_v_c_st17(
4067 // LEWIDTH-NEXT:  entry:
4068 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], %struct.st17* [[S:%.*]], i32 0, i32 0, i32 4
4069 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
4070 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4071 // LEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
4072 // LEWIDTH-NEXT:    ret void
4073 //
4074 // BEWIDTH-LABEL: @increment_v_c_st17(
4075 // BEWIDTH-NEXT:  entry:
4076 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], %struct.st17* [[S:%.*]], i32 0, i32 0, i32 4
4077 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
4078 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4079 // BEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
4080 // BEWIDTH-NEXT:    ret void
4081 //
4082 // LEWIDTHNUM-LABEL: @increment_v_c_st17(
4083 // LEWIDTHNUM-NEXT:  entry:
4084 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], %struct.st17* [[S:%.*]], i32 0, i32 0, i32 4
4085 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
4086 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4087 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
4088 // LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
4089 // LEWIDTHNUM-NEXT:    ret void
4090 //
4091 // BEWIDTHNUM-LABEL: @increment_v_c_st17(
4092 // BEWIDTHNUM-NEXT:  entry:
4093 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], %struct.st17* [[S:%.*]], i32 0, i32 0, i32 4
4094 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
4095 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4096 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
4097 // BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 1
4098 // BEWIDTHNUM-NEXT:    ret void
4099 //
increment_v_c_st17(volatile struct st17 * s)4100 void increment_v_c_st17(volatile struct st17 *s) {
4101   s->c++;
4102 }
4103 
4104 // A zero bitfield should block, as the C11 specification
4105 // requires a and b to be different memory positions
4106 struct zero_bitfield {
4107   int a : 8;
4108   char : 0;
4109   int b : 8;
4110 };
4111 
4112 // LE-LABEL: @increment_a_zero_bitfield(
4113 // LE-NEXT:  entry:
4114 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4115 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4116 // LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4117 // LE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4118 // LE-NEXT:    ret void
4119 //
4120 // BE-LABEL: @increment_a_zero_bitfield(
4121 // BE-NEXT:  entry:
4122 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4123 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4124 // BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4125 // BE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4126 // BE-NEXT:    ret void
4127 //
4128 // LENUMLOADS-LABEL: @increment_a_zero_bitfield(
4129 // LENUMLOADS-NEXT:  entry:
4130 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4131 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4132 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4133 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4134 // LENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4135 // LENUMLOADS-NEXT:    ret void
4136 //
4137 // BENUMLOADS-LABEL: @increment_a_zero_bitfield(
4138 // BENUMLOADS-NEXT:  entry:
4139 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4140 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4141 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4142 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4143 // BENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4144 // BENUMLOADS-NEXT:    ret void
4145 //
4146 // LEWIDTH-LABEL: @increment_a_zero_bitfield(
4147 // LEWIDTH-NEXT:  entry:
4148 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4149 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4150 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4151 // LEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4152 // LEWIDTH-NEXT:    ret void
4153 //
4154 // BEWIDTH-LABEL: @increment_a_zero_bitfield(
4155 // BEWIDTH-NEXT:  entry:
4156 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4157 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4158 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4159 // BEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4160 // BEWIDTH-NEXT:    ret void
4161 //
4162 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
4163 // LEWIDTHNUM-NEXT:  entry:
4164 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4165 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4166 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4167 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4168 // LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4169 // LEWIDTHNUM-NEXT:    ret void
4170 //
4171 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
4172 // BEWIDTHNUM-NEXT:  entry:
4173 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 0
4174 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4175 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4176 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4
4177 // BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
4178 // BEWIDTHNUM-NEXT:    ret void
4179 //
increment_a_zero_bitfield(volatile struct zero_bitfield * s)4180 void increment_a_zero_bitfield(volatile struct zero_bitfield *s) {
4181   s->a++;
4182 }
4183 
4184 // LE-LABEL: @increment_b_zero_bitfield(
4185 // LE-NEXT:  entry:
4186 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4187 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4188 // LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4189 // LE-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4190 // LE-NEXT:    ret void
4191 //
4192 // BE-LABEL: @increment_b_zero_bitfield(
4193 // BE-NEXT:  entry:
4194 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4195 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4196 // BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4197 // BE-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4198 // BE-NEXT:    ret void
4199 //
4200 // LENUMLOADS-LABEL: @increment_b_zero_bitfield(
4201 // LENUMLOADS-NEXT:  entry:
4202 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4203 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4204 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4205 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[B]], align 1
4206 // LENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4207 // LENUMLOADS-NEXT:    ret void
4208 //
4209 // BENUMLOADS-LABEL: @increment_b_zero_bitfield(
4210 // BENUMLOADS-NEXT:  entry:
4211 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4212 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4213 // BENUMLOADS-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4214 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[B]], align 1
4215 // BENUMLOADS-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4216 // BENUMLOADS-NEXT:    ret void
4217 //
4218 // LEWIDTH-LABEL: @increment_b_zero_bitfield(
4219 // LEWIDTH-NEXT:  entry:
4220 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4221 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4222 // LEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4223 // LEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4224 // LEWIDTH-NEXT:    ret void
4225 //
4226 // BEWIDTH-LABEL: @increment_b_zero_bitfield(
4227 // BEWIDTH-NEXT:  entry:
4228 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4229 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4230 // BEWIDTH-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4231 // BEWIDTH-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4232 // BEWIDTH-NEXT:    ret void
4233 //
4234 // LEWIDTHNUM-LABEL: @increment_b_zero_bitfield(
4235 // LEWIDTHNUM-NEXT:  entry:
4236 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4237 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4238 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4239 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[B]], align 1
4240 // LEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4241 // LEWIDTHNUM-NEXT:    ret void
4242 //
4243 // BEWIDTHNUM-LABEL: @increment_b_zero_bitfield(
4244 // BEWIDTHNUM-NEXT:  entry:
4245 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], %struct.zero_bitfield* [[S:%.*]], i32 0, i32 1
4246 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
4247 // BEWIDTHNUM-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4248 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[B]], align 1
4249 // BEWIDTHNUM-NEXT:    store volatile i8 [[INC]], i8* [[B]], align 1
4250 // BEWIDTHNUM-NEXT:    ret void
4251 //
increment_b_zero_bitfield(volatile struct zero_bitfield * s)4252 void increment_b_zero_bitfield(volatile struct zero_bitfield *s) {
4253   s->b++;
4254 }
4255 
4256 // The zero bitfield here does not affect
4257 struct zero_bitfield_ok {
4258   short a : 8;
4259   char a1 : 8;
4260   long : 0;
4261   int b : 24;
4262 };
4263 
4264 // LE-LABEL: @increment_a_zero_bitfield_ok(
4265 // LE-NEXT:  entry:
4266 // LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4267 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4268 // LE-NEXT:    [[CONV:%.*]] = trunc i16 [[BF_LOAD]] to i8
4269 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4270 // LE-NEXT:    [[TMP1:%.*]] = lshr i16 [[BF_LOAD1]], 8
4271 // LE-NEXT:    [[BF_CAST:%.*]] = trunc i16 [[TMP1]] to i8
4272 // LE-NEXT:    [[ADD:%.*]] = add i8 [[BF_CAST]], [[CONV]]
4273 // LE-NEXT:    [[TMP2:%.*]] = zext i8 [[ADD]] to i16
4274 // LE-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4275 // LE-NEXT:    [[BF_SHL6:%.*]] = shl nuw i16 [[TMP2]], 8
4276 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
4277 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_SHL6]], [[BF_CLEAR]]
4278 // LE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
4279 // LE-NEXT:    ret void
4280 //
4281 // BE-LABEL: @increment_a_zero_bitfield_ok(
4282 // BE-NEXT:  entry:
4283 // BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4284 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4285 // BE-NEXT:    [[TMP1:%.*]] = lshr i16 [[BF_LOAD]], 8
4286 // BE-NEXT:    [[CONV:%.*]] = trunc i16 [[TMP1]] to i8
4287 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4288 // BE-NEXT:    [[SEXT:%.*]] = trunc i16 [[BF_LOAD1]] to i8
4289 // BE-NEXT:    [[ADD:%.*]] = add i8 [[SEXT]], [[CONV]]
4290 // BE-NEXT:    [[TMP2:%.*]] = zext i8 [[ADD]] to i16
4291 // BE-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4292 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
4293 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[TMP2]]
4294 // BE-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
4295 // BE-NEXT:    ret void
4296 //
4297 // LENUMLOADS-LABEL: @increment_a_zero_bitfield_ok(
4298 // LENUMLOADS-NEXT:  entry:
4299 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4300 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4301 // LENUMLOADS-NEXT:    [[CONV:%.*]] = trunc i16 [[BF_LOAD]] to i8
4302 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4303 // LENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i16 [[BF_LOAD1]], 8
4304 // LENUMLOADS-NEXT:    [[BF_CAST:%.*]] = trunc i16 [[TMP1]] to i8
4305 // LENUMLOADS-NEXT:    [[ADD:%.*]] = add i8 [[BF_CAST]], [[CONV]]
4306 // LENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i8 [[ADD]] to i16
4307 // LENUMLOADS-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4308 // LENUMLOADS-NEXT:    [[BF_SHL6:%.*]] = shl nuw i16 [[TMP2]], 8
4309 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
4310 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_SHL6]], [[BF_CLEAR]]
4311 // LENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
4312 // LENUMLOADS-NEXT:    ret void
4313 //
4314 // BENUMLOADS-LABEL: @increment_a_zero_bitfield_ok(
4315 // BENUMLOADS-NEXT:  entry:
4316 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4317 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4318 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = lshr i16 [[BF_LOAD]], 8
4319 // BENUMLOADS-NEXT:    [[CONV:%.*]] = trunc i16 [[TMP1]] to i8
4320 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4321 // BENUMLOADS-NEXT:    [[SEXT:%.*]] = trunc i16 [[BF_LOAD1]] to i8
4322 // BENUMLOADS-NEXT:    [[ADD:%.*]] = add i8 [[SEXT]], [[CONV]]
4323 // BENUMLOADS-NEXT:    [[TMP2:%.*]] = zext i8 [[ADD]] to i16
4324 // BENUMLOADS-NEXT:    [[BF_LOAD5:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4325 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
4326 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[TMP2]]
4327 // BENUMLOADS-NEXT:    store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
4328 // BENUMLOADS-NEXT:    ret void
4329 //
4330 // LEWIDTH-LABEL: @increment_a_zero_bitfield_ok(
4331 // LEWIDTH-NEXT:  entry:
4332 // LEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4333 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4334 // LEWIDTH-NEXT:    [[CONV:%.*]] = trunc i16 [[BF_LOAD]] to i8
4335 // LEWIDTH-NEXT:    [[TMP1:%.*]] = bitcast %struct.zero_bitfield_ok* [[S]] to i8*
4336 // LEWIDTH-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i32 1
4337 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP2]], align 1
4338 // LEWIDTH-NEXT:    [[ADD:%.*]] = add i8 [[BF_LOAD1]], [[CONV]]
4339 // LEWIDTH-NEXT:    store volatile i8 [[ADD]], i8* [[TMP2]], align 1
4340 // LEWIDTH-NEXT:    ret void
4341 //
4342 // BEWIDTH-LABEL: @increment_a_zero_bitfield_ok(
4343 // BEWIDTH-NEXT:  entry:
4344 // BEWIDTH-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4345 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4346 // BEWIDTH-NEXT:    [[TMP1:%.*]] = lshr i16 [[BF_LOAD]], 8
4347 // BEWIDTH-NEXT:    [[CONV:%.*]] = trunc i16 [[TMP1]] to i8
4348 // BEWIDTH-NEXT:    [[TMP2:%.*]] = bitcast %struct.zero_bitfield_ok* [[S]] to i8*
4349 // BEWIDTH-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[TMP2]], i32 1
4350 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP3]], align 1
4351 // BEWIDTH-NEXT:    [[ADD:%.*]] = add i8 [[BF_LOAD1]], [[CONV]]
4352 // BEWIDTH-NEXT:    store volatile i8 [[ADD]], i8* [[TMP3]], align 1
4353 // BEWIDTH-NEXT:    ret void
4354 //
4355 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
4356 // LEWIDTHNUM-NEXT:  entry:
4357 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4358 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4359 // LEWIDTHNUM-NEXT:    [[CONV:%.*]] = trunc i16 [[BF_LOAD]] to i8
4360 // LEWIDTHNUM-NEXT:    [[TMP1:%.*]] = bitcast %struct.zero_bitfield_ok* [[S]] to i8*
4361 // LEWIDTHNUM-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i32 1
4362 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP2]], align 1
4363 // LEWIDTHNUM-NEXT:    [[ADD:%.*]] = add i8 [[BF_LOAD1]], [[CONV]]
4364 // LEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, i8* [[TMP2]], align 1
4365 // LEWIDTHNUM-NEXT:    store volatile i8 [[ADD]], i8* [[TMP2]], align 1
4366 // LEWIDTHNUM-NEXT:    ret void
4367 //
4368 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
4369 // BEWIDTHNUM-NEXT:  entry:
4370 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 0
4371 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
4372 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = lshr i16 [[BF_LOAD]], 8
4373 // BEWIDTHNUM-NEXT:    [[CONV:%.*]] = trunc i16 [[TMP1]] to i8
4374 // BEWIDTHNUM-NEXT:    [[TMP2:%.*]] = bitcast %struct.zero_bitfield_ok* [[S]] to i8*
4375 // BEWIDTHNUM-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[TMP2]], i32 1
4376 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP3]], align 1
4377 // BEWIDTHNUM-NEXT:    [[ADD:%.*]] = add i8 [[BF_LOAD1]], [[CONV]]
4378 // BEWIDTHNUM-NEXT:    [[BF_LOAD4:%.*]] = load volatile i8, i8* [[TMP3]], align 1
4379 // BEWIDTHNUM-NEXT:    store volatile i8 [[ADD]], i8* [[TMP3]], align 1
4380 // BEWIDTHNUM-NEXT:    ret void
4381 //
increment_a_zero_bitfield_ok(volatile struct zero_bitfield_ok * s)4382 void increment_a_zero_bitfield_ok(volatile struct zero_bitfield_ok *s) {
4383   s->a1 += s->a;
4384 }
4385 
4386 // LE-LABEL: @increment_b_zero_bitfield_ok(
4387 // LE-NEXT:  entry:
4388 // LE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4389 // LE-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4390 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4391 // LE-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
4392 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4393 // LE-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4394 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4395 // LE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4396 // LE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4397 // LE-NEXT:    ret void
4398 //
4399 // BE-LABEL: @increment_b_zero_bitfield_ok(
4400 // BE-NEXT:  entry:
4401 // BE-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4402 // BE-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4403 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4404 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4405 // BE-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 256
4406 // BE-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -256
4407 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4408 // BE-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4409 // BE-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4410 // BE-NEXT:    ret void
4411 //
4412 // LENUMLOADS-LABEL: @increment_b_zero_bitfield_ok(
4413 // LENUMLOADS-NEXT:  entry:
4414 // LENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4415 // LENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4416 // LENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4417 // LENUMLOADS-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
4418 // LENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4419 // LENUMLOADS-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4420 // LENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4421 // LENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4422 // LENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4423 // LENUMLOADS-NEXT:    ret void
4424 //
4425 // BENUMLOADS-LABEL: @increment_b_zero_bitfield_ok(
4426 // BENUMLOADS-NEXT:  entry:
4427 // BENUMLOADS-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4428 // BENUMLOADS-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4429 // BENUMLOADS-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4430 // BENUMLOADS-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4431 // BENUMLOADS-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 256
4432 // BENUMLOADS-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -256
4433 // BENUMLOADS-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4434 // BENUMLOADS-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4435 // BENUMLOADS-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4436 // BENUMLOADS-NEXT:    ret void
4437 //
4438 // LEWIDTH-LABEL: @increment_b_zero_bitfield_ok(
4439 // LEWIDTH-NEXT:  entry:
4440 // LEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4441 // LEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4442 // LEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4443 // LEWIDTH-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
4444 // LEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4445 // LEWIDTH-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4446 // LEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4447 // LEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4448 // LEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4449 // LEWIDTH-NEXT:    ret void
4450 //
4451 // BEWIDTH-LABEL: @increment_b_zero_bitfield_ok(
4452 // BEWIDTH-NEXT:  entry:
4453 // BEWIDTH-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4454 // BEWIDTH-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4455 // BEWIDTH-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4456 // BEWIDTH-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4457 // BEWIDTH-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 256
4458 // BEWIDTH-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -256
4459 // BEWIDTH-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4460 // BEWIDTH-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4461 // BEWIDTH-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4462 // BEWIDTH-NEXT:    ret void
4463 //
4464 // LEWIDTHNUM-LABEL: @increment_b_zero_bitfield_ok(
4465 // LEWIDTHNUM-NEXT:  entry:
4466 // LEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4467 // LEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4468 // LEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4469 // LEWIDTHNUM-NEXT:    [[INC:%.*]] = add i32 [[BF_LOAD]], 1
4470 // LEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4471 // LEWIDTHNUM-NEXT:    [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4472 // LEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4473 // LEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4474 // LEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4475 // LEWIDTHNUM-NEXT:    ret void
4476 //
4477 // BEWIDTHNUM-LABEL: @increment_b_zero_bitfield_ok(
4478 // BEWIDTHNUM-NEXT:  entry:
4479 // BEWIDTHNUM-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], %struct.zero_bitfield_ok* [[S:%.*]], i32 0, i32 1
4480 // BEWIDTHNUM-NEXT:    [[TMP0:%.*]] = bitcast i24* [[B]] to i32*
4481 // BEWIDTHNUM-NEXT:    [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4482 // BEWIDTHNUM-NEXT:    [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
4483 // BEWIDTHNUM-NEXT:    [[TMP1:%.*]] = add i32 [[BF_LOAD]], 256
4484 // BEWIDTHNUM-NEXT:    [[BF_SHL:%.*]] = and i32 [[TMP1]], -256
4485 // BEWIDTHNUM-NEXT:    [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4486 // BEWIDTHNUM-NEXT:    [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4487 // BEWIDTHNUM-NEXT:    store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
4488 // BEWIDTHNUM-NEXT:    ret void
4489 //
increment_b_zero_bitfield_ok(volatile struct zero_bitfield_ok * s)4490 void increment_b_zero_bitfield_ok(volatile struct zero_bitfield_ok *s) {
4491   s->b++;
4492 }
4493