1 //! Helpers for code generation that need struct layout
2
3 use super::helpers;
4
5 use crate::ir::comp::CompInfo;
6 use crate::ir::context::BindgenContext;
7 use crate::ir::layout::Layout;
8 use crate::ir::ty::{Type, TypeKind};
9 use crate::FieldVisibilityKind;
10 use proc_macro2::{self, Ident, Span};
11 use std::cmp;
12
13 const MAX_GUARANTEED_ALIGN: usize = 8;
14
15 /// Trace the layout of struct.
16 #[derive(Debug)]
17 pub(crate) struct StructLayoutTracker<'a> {
18 name: &'a str,
19 ctx: &'a BindgenContext,
20 comp: &'a CompInfo,
21 is_packed: bool,
22 known_type_layout: Option<Layout>,
23 is_rust_union: bool,
24 can_copy_union_fields: bool,
25 latest_offset: usize,
26 padding_count: usize,
27 latest_field_layout: Option<Layout>,
28 max_field_align: usize,
29 last_field_was_bitfield: bool,
30 visibility: FieldVisibilityKind,
31 }
32
33 /// Returns a size aligned to a given value.
align_to(size: usize, align: usize) -> usize34 pub(crate) fn align_to(size: usize, align: usize) -> usize {
35 if align == 0 {
36 return size;
37 }
38
39 let rem = size % align;
40 if rem == 0 {
41 return size;
42 }
43
44 size + align - rem
45 }
46
47 /// Returns the lower power of two byte count that can hold at most n bits.
bytes_from_bits_pow2(mut n: usize) -> usize48 pub(crate) fn bytes_from_bits_pow2(mut n: usize) -> usize {
49 if n == 0 {
50 return 0;
51 }
52
53 if n <= 8 {
54 return 1;
55 }
56
57 if !n.is_power_of_two() {
58 n = n.next_power_of_two();
59 }
60
61 n / 8
62 }
63
64 #[test]
test_align_to()65 fn test_align_to() {
66 assert_eq!(align_to(1, 1), 1);
67 assert_eq!(align_to(1, 2), 2);
68 assert_eq!(align_to(1, 4), 4);
69 assert_eq!(align_to(5, 1), 5);
70 assert_eq!(align_to(17, 4), 20);
71 }
72
73 #[test]
test_bytes_from_bits_pow2()74 fn test_bytes_from_bits_pow2() {
75 assert_eq!(bytes_from_bits_pow2(0), 0);
76 for i in 1..9 {
77 assert_eq!(bytes_from_bits_pow2(i), 1);
78 }
79 for i in 9..17 {
80 assert_eq!(bytes_from_bits_pow2(i), 2);
81 }
82 for i in 17..33 {
83 assert_eq!(bytes_from_bits_pow2(i), 4);
84 }
85 }
86
87 impl<'a> StructLayoutTracker<'a> {
new( ctx: &'a BindgenContext, comp: &'a CompInfo, ty: &'a Type, name: &'a str, visibility: FieldVisibilityKind, ) -> Self88 pub(crate) fn new(
89 ctx: &'a BindgenContext,
90 comp: &'a CompInfo,
91 ty: &'a Type,
92 name: &'a str,
93 visibility: FieldVisibilityKind,
94 ) -> Self {
95 let known_type_layout = ty.layout(ctx);
96 let is_packed = comp.is_packed(ctx, known_type_layout.as_ref());
97 let (is_rust_union, can_copy_union_fields) =
98 comp.is_rust_union(ctx, known_type_layout.as_ref(), name);
99 StructLayoutTracker {
100 name,
101 ctx,
102 comp,
103 visibility,
104 is_packed,
105 known_type_layout,
106 is_rust_union,
107 can_copy_union_fields,
108 latest_offset: 0,
109 padding_count: 0,
110 latest_field_layout: None,
111 max_field_align: 0,
112 last_field_was_bitfield: false,
113 }
114 }
115
can_copy_union_fields(&self) -> bool116 pub(crate) fn can_copy_union_fields(&self) -> bool {
117 self.can_copy_union_fields
118 }
119
is_rust_union(&self) -> bool120 pub(crate) fn is_rust_union(&self) -> bool {
121 self.is_rust_union
122 }
123
saw_vtable(&mut self)124 pub(crate) fn saw_vtable(&mut self) {
125 debug!("saw vtable for {}", self.name);
126
127 let ptr_size = self.ctx.target_pointer_size();
128 self.latest_offset += ptr_size;
129 self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size));
130 self.max_field_align = ptr_size;
131 }
132
saw_base(&mut self, base_ty: &Type)133 pub(crate) fn saw_base(&mut self, base_ty: &Type) {
134 debug!("saw base for {}", self.name);
135 if let Some(layout) = base_ty.layout(self.ctx) {
136 self.align_to_latest_field(layout);
137
138 self.latest_offset += self.padding_bytes(layout) + layout.size;
139 self.latest_field_layout = Some(layout);
140 self.max_field_align = cmp::max(self.max_field_align, layout.align);
141 }
142 }
143
saw_bitfield_unit(&mut self, layout: Layout)144 pub(crate) fn saw_bitfield_unit(&mut self, layout: Layout) {
145 debug!("saw bitfield unit for {}: {:?}", self.name, layout);
146
147 self.align_to_latest_field(layout);
148
149 self.latest_offset += layout.size;
150
151 debug!(
152 "Offset: <bitfield>: {} -> {}",
153 self.latest_offset - layout.size,
154 self.latest_offset
155 );
156
157 self.latest_field_layout = Some(layout);
158 self.last_field_was_bitfield = true;
159 // NB: We intentionally don't update the max_field_align here, since our
160 // bitfields code doesn't necessarily guarantee it, so we need to
161 // actually generate the dummy alignment.
162 }
163
164 /// Returns a padding field if necessary for a given new field _before_
165 /// adding that field.
saw_field( &mut self, field_name: &str, field_ty: &Type, field_offset: Option<usize>, ) -> Option<proc_macro2::TokenStream>166 pub(crate) fn saw_field(
167 &mut self,
168 field_name: &str,
169 field_ty: &Type,
170 field_offset: Option<usize>,
171 ) -> Option<proc_macro2::TokenStream> {
172 let mut field_layout = field_ty.layout(self.ctx)?;
173
174 if let TypeKind::Array(inner, len) =
175 *field_ty.canonical_type(self.ctx).kind()
176 {
177 // FIXME(emilio): As an _ultra_ hack, we correct the layout returned
178 // by arrays of structs that have a bigger alignment than what we
179 // can support.
180 //
181 // This means that the structs in the array are super-unsafe to
182 // access, since they won't be properly aligned, but there's not too
183 // much we can do about it.
184 if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx)
185 {
186 if layout.align > MAX_GUARANTEED_ALIGN {
187 field_layout.size =
188 align_to(layout.size, layout.align) * len;
189 field_layout.align = MAX_GUARANTEED_ALIGN;
190 }
191 }
192 }
193 self.saw_field_with_layout(field_name, field_layout, field_offset)
194 }
195
saw_field_with_layout( &mut self, field_name: &str, field_layout: Layout, field_offset: Option<usize>, ) -> Option<proc_macro2::TokenStream>196 pub(crate) fn saw_field_with_layout(
197 &mut self,
198 field_name: &str,
199 field_layout: Layout,
200 field_offset: Option<usize>,
201 ) -> Option<proc_macro2::TokenStream> {
202 let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
203
204 let is_union = self.comp.is_union();
205 let padding_bytes = match field_offset {
206 Some(offset) if offset / 8 > self.latest_offset => {
207 offset / 8 - self.latest_offset
208 }
209 _ => {
210 if will_merge_with_bitfield ||
211 field_layout.align == 0 ||
212 is_union
213 {
214 0
215 } else if !self.is_packed {
216 self.padding_bytes(field_layout)
217 } else if let Some(l) = self.known_type_layout {
218 self.padding_bytes(l)
219 } else {
220 0
221 }
222 }
223 };
224
225 self.latest_offset += padding_bytes;
226
227 let padding_layout = if self.is_packed || is_union {
228 None
229 } else {
230 let force_padding = self.ctx.options().force_explicit_padding;
231
232 // Otherwise the padding is useless.
233 let need_padding = force_padding ||
234 padding_bytes >= field_layout.align ||
235 field_layout.align > MAX_GUARANTEED_ALIGN;
236
237 debug!(
238 "Offset: <padding>: {} -> {}",
239 self.latest_offset - padding_bytes,
240 self.latest_offset
241 );
242
243 debug!(
244 "align field {} to {}/{} with {} padding bytes {:?}",
245 field_name,
246 self.latest_offset,
247 field_offset.unwrap_or(0) / 8,
248 padding_bytes,
249 field_layout
250 );
251
252 let padding_align = if force_padding {
253 1
254 } else {
255 cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN)
256 };
257
258 if need_padding && padding_bytes != 0 {
259 Some(Layout::new(padding_bytes, padding_align))
260 } else {
261 None
262 }
263 };
264
265 self.latest_offset += field_layout.size;
266 self.latest_field_layout = Some(field_layout);
267 self.max_field_align =
268 cmp::max(self.max_field_align, field_layout.align);
269 self.last_field_was_bitfield = false;
270
271 debug!(
272 "Offset: {}: {} -> {}",
273 field_name,
274 self.latest_offset - field_layout.size,
275 self.latest_offset
276 );
277
278 padding_layout.map(|layout| self.padding_field(layout))
279 }
280
add_tail_padding( &mut self, comp_name: &str, comp_layout: Layout, ) -> Option<proc_macro2::TokenStream>281 pub(crate) fn add_tail_padding(
282 &mut self,
283 comp_name: &str,
284 comp_layout: Layout,
285 ) -> Option<proc_macro2::TokenStream> {
286 // Only emit an padding field at the end of a struct if the
287 // user configures explicit padding.
288 if !self.ctx.options().force_explicit_padding {
289 return None;
290 }
291
292 // Padding doesn't make sense for rust unions.
293 if self.is_rust_union {
294 return None;
295 }
296
297 if self.latest_offset == comp_layout.size {
298 // This struct does not contain tail padding.
299 return None;
300 }
301
302 trace!(
303 "need a tail padding field for {}: offset {} -> size {}",
304 comp_name,
305 self.latest_offset,
306 comp_layout.size
307 );
308 let size = comp_layout.size - self.latest_offset;
309 Some(self.padding_field(Layout::new(size, 0)))
310 }
311
pad_struct( &mut self, layout: Layout, ) -> Option<proc_macro2::TokenStream>312 pub(crate) fn pad_struct(
313 &mut self,
314 layout: Layout,
315 ) -> Option<proc_macro2::TokenStream> {
316 debug!(
317 "pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}",
318 self, layout
319 );
320
321 if layout.size < self.latest_offset {
322 warn!(
323 "Calculated wrong layout for {}, too more {} bytes",
324 self.name,
325 self.latest_offset - layout.size
326 );
327 return None;
328 }
329
330 let padding_bytes = layout.size - self.latest_offset;
331 if padding_bytes == 0 {
332 return None;
333 }
334
335 let repr_align = self.ctx.options().rust_features().repr_align;
336
337 // We always pad to get to the correct size if the struct is one of
338 // those we can't align properly.
339 //
340 // Note that if the last field we saw was a bitfield, we may need to pad
341 // regardless, because bitfields don't respect alignment as strictly as
342 // other fields.
343 if padding_bytes >= layout.align ||
344 (self.last_field_was_bitfield &&
345 padding_bytes >= self.latest_field_layout.unwrap().align) ||
346 (!repr_align && layout.align > MAX_GUARANTEED_ALIGN)
347 {
348 let layout = if self.is_packed {
349 Layout::new(padding_bytes, 1)
350 } else if self.last_field_was_bitfield ||
351 layout.align > MAX_GUARANTEED_ALIGN
352 {
353 // We've already given up on alignment here.
354 Layout::for_size(self.ctx, padding_bytes)
355 } else {
356 Layout::new(padding_bytes, layout.align)
357 };
358
359 debug!("pad bytes to struct {}, {:?}", self.name, layout);
360
361 Some(self.padding_field(layout))
362 } else {
363 None
364 }
365 }
366
requires_explicit_align(&self, layout: Layout) -> bool367 pub(crate) fn requires_explicit_align(&self, layout: Layout) -> bool {
368 let repr_align = self.ctx.options().rust_features().repr_align;
369
370 // Always force explicit repr(align) for stuff more than 16-byte aligned
371 // to work-around https://github.com/rust-lang/rust/issues/54341.
372 //
373 // Worst-case this just generates redundant alignment attributes.
374 if repr_align && self.max_field_align >= 16 {
375 return true;
376 }
377
378 if self.max_field_align >= layout.align {
379 return false;
380 }
381
382 // We can only generate up-to a 8-bytes of alignment unless we support
383 // repr(align).
384 repr_align || layout.align <= MAX_GUARANTEED_ALIGN
385 }
386
padding_bytes(&self, layout: Layout) -> usize387 fn padding_bytes(&self, layout: Layout) -> usize {
388 align_to(self.latest_offset, layout.align) - self.latest_offset
389 }
390
padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream391 fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream {
392 let ty = helpers::blob(self.ctx, layout);
393 let padding_count = self.padding_count;
394
395 self.padding_count += 1;
396
397 let padding_field_name = Ident::new(
398 &format!("__bindgen_padding_{}", padding_count),
399 Span::call_site(),
400 );
401
402 self.max_field_align = cmp::max(self.max_field_align, layout.align);
403
404 let vis = super::access_specifier(self.visibility);
405
406 quote! {
407 #vis #padding_field_name : #ty ,
408 }
409 }
410
411 /// Returns whether the new field is known to merge with a bitfield.
412 ///
413 /// This is just to avoid doing the same check also in pad_field.
align_to_latest_field(&mut self, new_field_layout: Layout) -> bool414 fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
415 if self.is_packed {
416 // Skip to align fields when packed.
417 return false;
418 }
419
420 let layout = match self.latest_field_layout {
421 Some(l) => l,
422 None => return false,
423 };
424
425 // If it was, we may or may not need to align, depending on what the
426 // current field alignment and the bitfield size and alignment are.
427 debug!(
428 "align_to_bitfield? {}: {:?} {:?}",
429 self.last_field_was_bitfield, layout, new_field_layout
430 );
431
432 // Avoid divide-by-zero errors if align is 0.
433 let align = cmp::max(1, layout.align);
434
435 if self.last_field_was_bitfield &&
436 new_field_layout.align <= layout.size % align &&
437 new_field_layout.size <= layout.size % align
438 {
439 // The new field will be coalesced into some of the remaining bits.
440 //
441 // FIXME(emilio): I think this may not catch everything?
442 debug!("Will merge with bitfield");
443 return true;
444 }
445
446 // Else, just align the obvious way.
447 self.latest_offset += self.padding_bytes(layout);
448 false
449 }
450 }
451