1//! Helpers for code generation that need struct layout
2
3use super::helpers;
4
5use crate::ir::comp::CompInfo;
6use crate::ir::context::BindgenContext;
7use crate::ir::layout::Layout;
8use crate::ir::ty::{Type, TypeKind};
9use crate::FieldVisibilityKind;
10use proc_macro2::{self, Ident, Span};
11use std::cmp;
12
13const MAX_GUARANTEED_ALIGN: usize = 8;
14
15/// Trace the layout of struct.
16#[derive(Debug)]
17pub(crate) struct StructLayoutTracker<'a> {
18 name: &'a str,
19 ctx: &'a BindgenContext,
20 comp: &'a CompInfo,
21 is_packed: bool,
22 known_type_layout: Option<Layout>,
23 is_rust_union: bool,
24 can_copy_union_fields: bool,
25 latest_offset: usize,
26 padding_count: usize,
27 latest_field_layout: Option<Layout>,
28 max_field_align: usize,
29 last_field_was_bitfield: bool,
30 visibility: FieldVisibilityKind,
31}
32
33/// Returns a size aligned to a given value.
34pub(crate) fn align_to(size: usize, align: usize) -> usize {
35 if align == 0 {
36 return size;
37 }
38
39 let rem: usize = size % align;
40 if rem == 0 {
41 return size;
42 }
43
44 size + align - rem
45}
46
47/// Returns the lower power of two byte count that can hold at most n bits.
48pub(crate) fn bytes_from_bits_pow2(mut n: usize) -> usize {
49 if n == 0 {
50 return 0;
51 }
52
53 if n <= 8 {
54 return 1;
55 }
56
57 if !n.is_power_of_two() {
58 n = n.next_power_of_two();
59 }
60
61 n / 8
62}
63
64#[test]
65fn test_align_to() {
66 assert_eq!(align_to(1, 1), 1);
67 assert_eq!(align_to(1, 2), 2);
68 assert_eq!(align_to(1, 4), 4);
69 assert_eq!(align_to(5, 1), 5);
70 assert_eq!(align_to(17, 4), 20);
71}
72
73#[test]
74fn test_bytes_from_bits_pow2() {
75 assert_eq!(bytes_from_bits_pow2(0), 0);
76 for i: usize in 1..9 {
77 assert_eq!(bytes_from_bits_pow2(i), 1);
78 }
79 for i: usize in 9..17 {
80 assert_eq!(bytes_from_bits_pow2(i), 2);
81 }
82 for i: usize in 17..33 {
83 assert_eq!(bytes_from_bits_pow2(i), 4);
84 }
85}
86
87impl<'a> StructLayoutTracker<'a> {
88 pub(crate) fn new(
89 ctx: &'a BindgenContext,
90 comp: &'a CompInfo,
91 ty: &'a Type,
92 name: &'a str,
93 visibility: FieldVisibilityKind,
94 is_packed: bool,
95 ) -> Self {
96 let known_type_layout = ty.layout(ctx);
97 let (is_rust_union, can_copy_union_fields) =
98 comp.is_rust_union(ctx, known_type_layout.as_ref(), name);
99 StructLayoutTracker {
100 name,
101 ctx,
102 comp,
103 visibility,
104 is_packed,
105 known_type_layout,
106 is_rust_union,
107 can_copy_union_fields,
108 latest_offset: 0,
109 padding_count: 0,
110 latest_field_layout: None,
111 max_field_align: 0,
112 last_field_was_bitfield: false,
113 }
114 }
115
116 pub(crate) fn can_copy_union_fields(&self) -> bool {
117 self.can_copy_union_fields
118 }
119
120 pub(crate) fn is_rust_union(&self) -> bool {
121 self.is_rust_union
122 }
123
124 pub(crate) fn saw_vtable(&mut self) {
125 debug!("saw vtable for {}", self.name);
126
127 let ptr_size = self.ctx.target_pointer_size();
128 self.latest_offset += ptr_size;
129 self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size));
130 self.max_field_align = ptr_size;
131 }
132
133 pub(crate) fn saw_base(&mut self, base_ty: &Type) {
134 debug!("saw base for {}", self.name);
135 if let Some(layout) = base_ty.layout(self.ctx) {
136 self.align_to_latest_field(layout);
137
138 self.latest_offset += self.padding_bytes(layout) + layout.size;
139 self.latest_field_layout = Some(layout);
140 self.max_field_align = cmp::max(self.max_field_align, layout.align);
141 }
142 }
143
144 pub(crate) fn saw_bitfield_unit(&mut self, layout: Layout) {
145 debug!("saw bitfield unit for {}: {:?}", self.name, layout);
146
147 self.align_to_latest_field(layout);
148
149 self.latest_offset += layout.size;
150
151 debug!(
152 "Offset: <bitfield>: {} -> {}",
153 self.latest_offset - layout.size,
154 self.latest_offset
155 );
156
157 self.latest_field_layout = Some(layout);
158 self.last_field_was_bitfield = true;
159 self.max_field_align = cmp::max(self.max_field_align, layout.align);
160 }
161
162 /// Returns a padding field if necessary for a given new field _before_
163 /// adding that field.
164 pub(crate) fn saw_field(
165 &mut self,
166 field_name: &str,
167 field_ty: &Type,
168 field_offset: Option<usize>,
169 ) -> Option<proc_macro2::TokenStream> {
170 let mut field_layout = field_ty.layout(self.ctx)?;
171
172 if let TypeKind::Array(inner, len) =
173 *field_ty.canonical_type(self.ctx).kind()
174 {
175 // FIXME(emilio): As an _ultra_ hack, we correct the layout returned
176 // by arrays of structs that have a bigger alignment than what we
177 // can support.
178 //
179 // This means that the structs in the array are super-unsafe to
180 // access, since they won't be properly aligned, but there's not too
181 // much we can do about it.
182 if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx)
183 {
184 if layout.align > MAX_GUARANTEED_ALIGN {
185 field_layout.size =
186 align_to(layout.size, layout.align) * len;
187 field_layout.align = MAX_GUARANTEED_ALIGN;
188 }
189 }
190 }
191 self.saw_field_with_layout(field_name, field_layout, field_offset)
192 }
193
194 pub(crate) fn saw_field_with_layout(
195 &mut self,
196 field_name: &str,
197 field_layout: Layout,
198 field_offset: Option<usize>,
199 ) -> Option<proc_macro2::TokenStream> {
200 let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
201
202 let is_union = self.comp.is_union();
203 let padding_bytes = match field_offset {
204 Some(offset) if offset / 8 > self.latest_offset => {
205 offset / 8 - self.latest_offset
206 }
207 _ => {
208 if will_merge_with_bitfield ||
209 field_layout.align == 0 ||
210 is_union
211 {
212 0
213 } else if !self.is_packed {
214 self.padding_bytes(field_layout)
215 } else if let Some(mut l) = self.known_type_layout {
216 if field_layout.align < l.align {
217 l.align = field_layout.align;
218 }
219 self.padding_bytes(l)
220 } else {
221 0
222 }
223 }
224 };
225
226 self.latest_offset += padding_bytes;
227
228 let padding_layout = if self.is_packed || is_union {
229 None
230 } else {
231 let force_padding = self.ctx.options().force_explicit_padding;
232
233 // Otherwise the padding is useless.
234 let need_padding = force_padding ||
235 padding_bytes >= field_layout.align ||
236 field_layout.align > MAX_GUARANTEED_ALIGN;
237
238 debug!(
239 "Offset: <padding>: {} -> {}",
240 self.latest_offset - padding_bytes,
241 self.latest_offset
242 );
243
244 debug!(
245 "align field {} to {}/{} with {} padding bytes {:?}",
246 field_name,
247 self.latest_offset,
248 field_offset.unwrap_or(0) / 8,
249 padding_bytes,
250 field_layout
251 );
252
253 let padding_align = if force_padding {
254 1
255 } else {
256 cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN)
257 };
258
259 if need_padding && padding_bytes != 0 {
260 Some(Layout::new(padding_bytes, padding_align))
261 } else {
262 None
263 }
264 };
265
266 self.latest_offset += field_layout.size;
267 self.latest_field_layout = Some(field_layout);
268 self.max_field_align =
269 cmp::max(self.max_field_align, field_layout.align);
270 self.last_field_was_bitfield = false;
271
272 debug!(
273 "Offset: {}: {} -> {}",
274 field_name,
275 self.latest_offset - field_layout.size,
276 self.latest_offset
277 );
278
279 padding_layout.map(|layout| self.padding_field(layout))
280 }
281
282 pub(crate) fn add_tail_padding(
283 &mut self,
284 comp_name: &str,
285 comp_layout: Layout,
286 ) -> Option<proc_macro2::TokenStream> {
287 // Only emit an padding field at the end of a struct if the
288 // user configures explicit padding.
289 if !self.ctx.options().force_explicit_padding {
290 return None;
291 }
292
293 // Padding doesn't make sense for rust unions.
294 if self.is_rust_union {
295 return None;
296 }
297
298 if self.latest_offset == comp_layout.size {
299 // This struct does not contain tail padding.
300 return None;
301 }
302
303 trace!(
304 "need a tail padding field for {}: offset {} -> size {}",
305 comp_name,
306 self.latest_offset,
307 comp_layout.size
308 );
309 let size = comp_layout.size - self.latest_offset;
310 Some(self.padding_field(Layout::new(size, 0)))
311 }
312
313 pub(crate) fn pad_struct(
314 &mut self,
315 layout: Layout,
316 ) -> Option<proc_macro2::TokenStream> {
317 debug!(
318 "pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}",
319 self, layout
320 );
321
322 if layout.size < self.latest_offset {
323 warn!(
324 "Calculated wrong layout for {}, too more {} bytes",
325 self.name,
326 self.latest_offset - layout.size
327 );
328 return None;
329 }
330
331 let padding_bytes = layout.size - self.latest_offset;
332 if padding_bytes == 0 {
333 return None;
334 }
335
336 let repr_align = self.ctx.options().rust_features().repr_align;
337
338 // We always pad to get to the correct size if the struct is one of
339 // those we can't align properly.
340 //
341 // Note that if the last field we saw was a bitfield, we may need to pad
342 // regardless, because bitfields don't respect alignment as strictly as
343 // other fields.
344 if padding_bytes >= layout.align ||
345 (self.last_field_was_bitfield &&
346 padding_bytes >= self.latest_field_layout.unwrap().align) ||
347 (!repr_align && layout.align > MAX_GUARANTEED_ALIGN)
348 {
349 let layout = if self.is_packed {
350 Layout::new(padding_bytes, 1)
351 } else if self.last_field_was_bitfield ||
352 layout.align > MAX_GUARANTEED_ALIGN
353 {
354 // We've already given up on alignment here.
355 Layout::for_size(self.ctx, padding_bytes)
356 } else {
357 Layout::new(padding_bytes, layout.align)
358 };
359
360 debug!("pad bytes to struct {}, {:?}", self.name, layout);
361
362 Some(self.padding_field(layout))
363 } else {
364 None
365 }
366 }
367
368 pub(crate) fn requires_explicit_align(&self, layout: Layout) -> bool {
369 let repr_align = self.ctx.options().rust_features().repr_align;
370
371 // Always force explicit repr(align) for stuff more than 16-byte aligned
372 // to work-around https://github.com/rust-lang/rust/issues/54341.
373 //
374 // Worst-case this just generates redundant alignment attributes.
375 if repr_align && self.max_field_align >= 16 {
376 return true;
377 }
378
379 if self.max_field_align >= layout.align {
380 return false;
381 }
382
383 // We can only generate up-to a 8-bytes of alignment unless we support
384 // repr(align).
385 repr_align || layout.align <= MAX_GUARANTEED_ALIGN
386 }
387
388 fn padding_bytes(&self, layout: Layout) -> usize {
389 align_to(self.latest_offset, layout.align) - self.latest_offset
390 }
391
392 fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream {
393 let ty = helpers::blob(self.ctx, layout);
394 let padding_count = self.padding_count;
395
396 self.padding_count += 1;
397
398 let padding_field_name = Ident::new(
399 &format!("__bindgen_padding_{}", padding_count),
400 Span::call_site(),
401 );
402
403 self.max_field_align = cmp::max(self.max_field_align, layout.align);
404
405 let vis = super::access_specifier(self.visibility);
406
407 quote! {
408 #vis #padding_field_name : #ty ,
409 }
410 }
411
412 /// Returns whether the new field is known to merge with a bitfield.
413 ///
414 /// This is just to avoid doing the same check also in pad_field.
415 fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
416 if self.is_packed {
417 // Skip to align fields when packed.
418 return false;
419 }
420
421 let layout = match self.latest_field_layout {
422 Some(l) => l,
423 None => return false,
424 };
425
426 // If it was, we may or may not need to align, depending on what the
427 // current field alignment and the bitfield size and alignment are.
428 debug!(
429 "align_to_bitfield? {}: {:?} {:?}",
430 self.last_field_was_bitfield, layout, new_field_layout
431 );
432
433 // Avoid divide-by-zero errors if align is 0.
434 let align = cmp::max(1, layout.align);
435
436 if self.last_field_was_bitfield &&
437 new_field_layout.align <= layout.size % align &&
438 new_field_layout.size <= layout.size % align
439 {
440 // The new field will be coalesced into some of the remaining bits.
441 //
442 // FIXME(emilio): I think this may not catch everything?
443 debug!("Will merge with bitfield");
444 return true;
445 }
446
447 // Else, just align the obvious way.
448 self.latest_offset += self.padding_bytes(layout);
449 false
450 }
451}
452