• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Helpers for code generation that need struct layout
2 
3 use super::helpers;
4 
5 use crate::ir::comp::CompInfo;
6 use crate::ir::context::BindgenContext;
7 use crate::ir::layout::Layout;
8 use crate::ir::ty::{Type, TypeKind};
9 use proc_macro2::{self, Ident, Span};
10 use std::cmp;
11 
12 const MAX_GUARANTEED_ALIGN: usize = 8;
13 
14 /// Trace the layout of struct.
15 #[derive(Debug)]
16 pub struct StructLayoutTracker<'a> {
17     name: &'a str,
18     ctx: &'a BindgenContext,
19     comp: &'a CompInfo,
20     is_packed: bool,
21     known_type_layout: Option<Layout>,
22     is_rust_union: bool,
23     latest_offset: usize,
24     padding_count: usize,
25     latest_field_layout: Option<Layout>,
26     max_field_align: usize,
27     last_field_was_bitfield: bool,
28 }
29 
30 /// Returns a size aligned to a given value.
align_to(size: usize, align: usize) -> usize31 pub fn align_to(size: usize, align: usize) -> usize {
32     if align == 0 {
33         return size;
34     }
35 
36     let rem = size % align;
37     if rem == 0 {
38         return size;
39     }
40 
41     size + align - rem
42 }
43 
44 /// Returns the lower power of two byte count that can hold at most n bits.
bytes_from_bits_pow2(mut n: usize) -> usize45 pub fn bytes_from_bits_pow2(mut n: usize) -> usize {
46     if n == 0 {
47         return 0;
48     }
49 
50     if n <= 8 {
51         return 1;
52     }
53 
54     if !n.is_power_of_two() {
55         n = n.next_power_of_two();
56     }
57 
58     n / 8
59 }
60 
61 #[test]
test_align_to()62 fn test_align_to() {
63     assert_eq!(align_to(1, 1), 1);
64     assert_eq!(align_to(1, 2), 2);
65     assert_eq!(align_to(1, 4), 4);
66     assert_eq!(align_to(5, 1), 5);
67     assert_eq!(align_to(17, 4), 20);
68 }
69 
70 #[test]
test_bytes_from_bits_pow2()71 fn test_bytes_from_bits_pow2() {
72     assert_eq!(bytes_from_bits_pow2(0), 0);
73     for i in 1..9 {
74         assert_eq!(bytes_from_bits_pow2(i), 1);
75     }
76     for i in 9..17 {
77         assert_eq!(bytes_from_bits_pow2(i), 2);
78     }
79     for i in 17..33 {
80         assert_eq!(bytes_from_bits_pow2(i), 4);
81     }
82 }
83 
84 impl<'a> StructLayoutTracker<'a> {
new( ctx: &'a BindgenContext, comp: &'a CompInfo, ty: &'a Type, name: &'a str, ) -> Self85     pub fn new(
86         ctx: &'a BindgenContext,
87         comp: &'a CompInfo,
88         ty: &'a Type,
89         name: &'a str,
90     ) -> Self {
91         let known_type_layout = ty.layout(ctx);
92         let is_packed = comp.is_packed(ctx, known_type_layout.as_ref());
93         let is_rust_union = comp.is_union() &&
94             comp.can_be_rust_union(ctx, known_type_layout.as_ref());
95         StructLayoutTracker {
96             name,
97             ctx,
98             comp,
99             is_packed,
100             known_type_layout,
101             is_rust_union,
102             latest_offset: 0,
103             padding_count: 0,
104             latest_field_layout: None,
105             max_field_align: 0,
106             last_field_was_bitfield: false,
107         }
108     }
109 
is_rust_union(&self) -> bool110     pub fn is_rust_union(&self) -> bool {
111         self.is_rust_union
112     }
113 
saw_vtable(&mut self)114     pub fn saw_vtable(&mut self) {
115         debug!("saw vtable for {}", self.name);
116 
117         let ptr_size = self.ctx.target_pointer_size();
118         self.latest_offset += ptr_size;
119         self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size));
120         self.max_field_align = ptr_size;
121     }
122 
saw_base(&mut self, base_ty: &Type)123     pub fn saw_base(&mut self, base_ty: &Type) {
124         debug!("saw base for {}", self.name);
125         if let Some(layout) = base_ty.layout(self.ctx) {
126             self.align_to_latest_field(layout);
127 
128             self.latest_offset += self.padding_bytes(layout) + layout.size;
129             self.latest_field_layout = Some(layout);
130             self.max_field_align = cmp::max(self.max_field_align, layout.align);
131         }
132     }
133 
saw_bitfield_unit(&mut self, layout: Layout)134     pub fn saw_bitfield_unit(&mut self, layout: Layout) {
135         debug!("saw bitfield unit for {}: {:?}", self.name, layout);
136 
137         self.align_to_latest_field(layout);
138 
139         self.latest_offset += layout.size;
140 
141         debug!(
142             "Offset: <bitfield>: {} -> {}",
143             self.latest_offset - layout.size,
144             self.latest_offset
145         );
146 
147         self.latest_field_layout = Some(layout);
148         self.last_field_was_bitfield = true;
149         // NB: We intentionally don't update the max_field_align here, since our
150         // bitfields code doesn't necessarily guarantee it, so we need to
151         // actually generate the dummy alignment.
152     }
153 
154     /// Returns a padding field if necessary for a given new field _before_
155     /// adding that field.
saw_field( &mut self, field_name: &str, field_ty: &Type, field_offset: Option<usize>, ) -> Option<proc_macro2::TokenStream>156     pub fn saw_field(
157         &mut self,
158         field_name: &str,
159         field_ty: &Type,
160         field_offset: Option<usize>,
161     ) -> Option<proc_macro2::TokenStream> {
162         let mut field_layout = field_ty.layout(self.ctx)?;
163 
164         if let TypeKind::Array(inner, len) =
165             *field_ty.canonical_type(self.ctx).kind()
166         {
167             // FIXME(emilio): As an _ultra_ hack, we correct the layout returned
168             // by arrays of structs that have a bigger alignment than what we
169             // can support.
170             //
171             // This means that the structs in the array are super-unsafe to
172             // access, since they won't be properly aligned, but there's not too
173             // much we can do about it.
174             if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx)
175             {
176                 if layout.align > MAX_GUARANTEED_ALIGN {
177                     field_layout.size =
178                         align_to(layout.size, layout.align) * len;
179                     field_layout.align = MAX_GUARANTEED_ALIGN;
180                 }
181             }
182         }
183         self.saw_field_with_layout(field_name, field_layout, field_offset)
184     }
185 
saw_field_with_layout( &mut self, field_name: &str, field_layout: Layout, field_offset: Option<usize>, ) -> Option<proc_macro2::TokenStream>186     pub fn saw_field_with_layout(
187         &mut self,
188         field_name: &str,
189         field_layout: Layout,
190         field_offset: Option<usize>,
191     ) -> Option<proc_macro2::TokenStream> {
192         let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
193 
194         let is_union = self.comp.is_union();
195         let padding_bytes = match field_offset {
196             Some(offset) if offset / 8 > self.latest_offset => {
197                 offset / 8 - self.latest_offset
198             }
199             _ => {
200                 if will_merge_with_bitfield ||
201                     field_layout.align == 0 ||
202                     is_union
203                 {
204                     0
205                 } else if !self.is_packed {
206                     self.padding_bytes(field_layout)
207                 } else if let Some(l) = self.known_type_layout {
208                     self.padding_bytes(l)
209                 } else {
210                     0
211                 }
212             }
213         };
214 
215         self.latest_offset += padding_bytes;
216 
217         let padding_layout = if self.is_packed || is_union {
218             None
219         } else {
220             // Otherwise the padding is useless.
221             let need_padding = padding_bytes >= field_layout.align ||
222                 field_layout.align > MAX_GUARANTEED_ALIGN;
223 
224             debug!(
225                 "Offset: <padding>: {} -> {}",
226                 self.latest_offset - padding_bytes,
227                 self.latest_offset
228             );
229 
230             debug!(
231                 "align field {} to {}/{} with {} padding bytes {:?}",
232                 field_name,
233                 self.latest_offset,
234                 field_offset.unwrap_or(0) / 8,
235                 padding_bytes,
236                 field_layout
237             );
238 
239             if need_padding && padding_bytes != 0 {
240                 Some(Layout::new(
241                     padding_bytes,
242                     cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN),
243                 ))
244             } else {
245                 None
246             }
247         };
248 
249         self.latest_offset += field_layout.size;
250         self.latest_field_layout = Some(field_layout);
251         self.max_field_align =
252             cmp::max(self.max_field_align, field_layout.align);
253         self.last_field_was_bitfield = false;
254 
255         debug!(
256             "Offset: {}: {} -> {}",
257             field_name,
258             self.latest_offset - field_layout.size,
259             self.latest_offset
260         );
261 
262         padding_layout.map(|layout| self.padding_field(layout))
263     }
264 
pad_struct( &mut self, layout: Layout, ) -> Option<proc_macro2::TokenStream>265     pub fn pad_struct(
266         &mut self,
267         layout: Layout,
268     ) -> Option<proc_macro2::TokenStream> {
269         debug!(
270             "pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}",
271             self, layout
272         );
273 
274         if layout.size < self.latest_offset {
275             warn!(
276                 "Calculated wrong layout for {}, too more {} bytes",
277                 self.name,
278                 self.latest_offset - layout.size
279             );
280             return None;
281         }
282 
283         let padding_bytes = layout.size - self.latest_offset;
284         if padding_bytes == 0 {
285             return None;
286         }
287 
288         let repr_align = self.ctx.options().rust_features().repr_align;
289 
290         // We always pad to get to the correct size if the struct is one of
291         // those we can't align properly.
292         //
293         // Note that if the last field we saw was a bitfield, we may need to pad
294         // regardless, because bitfields don't respect alignment as strictly as
295         // other fields.
296         if padding_bytes >= layout.align ||
297             (self.last_field_was_bitfield &&
298                 padding_bytes >= self.latest_field_layout.unwrap().align) ||
299             (!repr_align && layout.align > MAX_GUARANTEED_ALIGN)
300         {
301             let layout = if self.is_packed {
302                 Layout::new(padding_bytes, 1)
303             } else if self.last_field_was_bitfield ||
304                 layout.align > MAX_GUARANTEED_ALIGN
305             {
306                 // We've already given up on alignment here.
307                 Layout::for_size(self.ctx, padding_bytes)
308             } else {
309                 Layout::new(padding_bytes, layout.align)
310             };
311 
312             debug!("pad bytes to struct {}, {:?}", self.name, layout);
313 
314             Some(self.padding_field(layout))
315         } else {
316             None
317         }
318     }
319 
requires_explicit_align(&self, layout: Layout) -> bool320     pub fn requires_explicit_align(&self, layout: Layout) -> bool {
321         let repr_align = self.ctx.options().rust_features().repr_align;
322 
323         // Always force explicit repr(align) for stuff more than 16-byte aligned
324         // to work-around https://github.com/rust-lang/rust/issues/54341.
325         //
326         // Worst-case this just generates redundant alignment attributes.
327         if repr_align && self.max_field_align >= 16 {
328             return true;
329         }
330 
331         if self.max_field_align >= layout.align {
332             return false;
333         }
334 
335         // We can only generate up-to a 8-bytes of alignment unless we support
336         // repr(align).
337         repr_align || layout.align <= MAX_GUARANTEED_ALIGN
338     }
339 
padding_bytes(&self, layout: Layout) -> usize340     fn padding_bytes(&self, layout: Layout) -> usize {
341         align_to(self.latest_offset, layout.align) - self.latest_offset
342     }
343 
padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream344     fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream {
345         let ty = helpers::blob(self.ctx, layout);
346         let padding_count = self.padding_count;
347 
348         self.padding_count += 1;
349 
350         let padding_field_name = Ident::new(
351             &format!("__bindgen_padding_{}", padding_count),
352             Span::call_site(),
353         );
354 
355         self.max_field_align = cmp::max(self.max_field_align, layout.align);
356 
357         quote! {
358             pub #padding_field_name : #ty ,
359         }
360     }
361 
362     /// Returns whether the new field is known to merge with a bitfield.
363     ///
364     /// This is just to avoid doing the same check also in pad_field.
align_to_latest_field(&mut self, new_field_layout: Layout) -> bool365     fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
366         if self.is_packed {
367             // Skip to align fields when packed.
368             return false;
369         }
370 
371         let layout = match self.latest_field_layout {
372             Some(l) => l,
373             None => return false,
374         };
375 
376         // If it was, we may or may not need to align, depending on what the
377         // current field alignment and the bitfield size and alignment are.
378         debug!(
379             "align_to_bitfield? {}: {:?} {:?}",
380             self.last_field_was_bitfield, layout, new_field_layout
381         );
382 
383         // Avoid divide-by-zero errors if align is 0.
384         let align = cmp::max(1, layout.align);
385 
386         if self.last_field_was_bitfield &&
387             new_field_layout.align <= layout.size % align &&
388             new_field_layout.size <= layout.size % align
389         {
390             // The new field will be coalesced into some of the remaining bits.
391             //
392             // FIXME(emilio): I think this may not catch everything?
393             debug!("Will merge with bitfield");
394             return true;
395         }
396 
397         // Else, just align the obvious way.
398         self.latest_offset += self.padding_bytes(layout);
399         return false;
400     }
401 }
402