1use std::{
2 num::NonZeroUsize,
3 ops::{Add, AddAssign},
4};
5
6use crate::{FlagsRepr, Int, Resolve, Type, TypeDef, TypeDefKind};
7
8/// Architecture specific alignment
9#[derive(Eq, PartialEq, PartialOrd, Clone, Copy)]
10pub enum Alignment {
11 /// This represents 4 byte alignment on 32bit and 8 byte alignment on 64bit architectures
12 Pointer,
13 /// This alignment is architecture independent (derived from integer or float types)
14 Bytes(NonZeroUsize),
15}
16
17impl Default for Alignment {
18 fn default() -> Self {
19 Alignment::Bytes(NonZeroUsize::new(1).unwrap())
20 }
21}
22
23impl std::fmt::Debug for Alignment {
24 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
25 match self {
26 Alignment::Pointer => f.write_str(data:"ptr"),
27 Alignment::Bytes(b: &NonZero) => f.write_fmt(format_args!("{}", b.get())),
28 }
29 }
30}
31
32impl Ord for Alignment {
33 /// Needed for determining the max alignment of an object from its parts.
34 /// The ordering is: Bytes(1) < Bytes(2) < Bytes(4) < Pointer < Bytes(8)
35 /// as a Pointer is either four or eight byte aligned, depending on the architecture
36 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
37 match (self, other) {
38 (Alignment::Pointer, Alignment::Pointer) => std::cmp::Ordering::Equal,
39 (Alignment::Pointer, Alignment::Bytes(b)) => {
40 if b.get() > 4 {
41 std::cmp::Ordering::Less
42 } else {
43 std::cmp::Ordering::Greater
44 }
45 }
46 (Alignment::Bytes(b), Alignment::Pointer) => {
47 if b.get() > 4 {
48 std::cmp::Ordering::Greater
49 } else {
50 std::cmp::Ordering::Less
51 }
52 }
53 (Alignment::Bytes(a), Alignment::Bytes(b)) => a.cmp(b),
54 }
55 }
56}
57
58impl Alignment {
59 /// for easy migration this gives you the value for wasm32
60 pub fn align_wasm32(&self) -> usize {
61 match self {
62 Alignment::Pointer => 4,
63 Alignment::Bytes(bytes: &NonZero) => bytes.get(),
64 }
65 }
66
67 pub fn align_wasm64(&self) -> usize {
68 match self {
69 Alignment::Pointer => 8,
70 Alignment::Bytes(bytes: &NonZero) => bytes.get(),
71 }
72 }
73
74 pub fn format(&self, ptrsize_expr: &str) -> String {
75 match self {
76 Alignment::Pointer => ptrsize_expr.into(),
77 Alignment::Bytes(bytes: &NonZero) => format!("{}", bytes.get()),
78 }
79 }
80}
81
82/// Architecture specific measurement of position,
83/// the combined amount in bytes is
84/// `bytes + pointers * core::mem::size_of::<*const u8>()`
85#[derive(Default, Clone, Copy, Eq, PartialEq)]
86pub struct ArchitectureSize {
87 /// architecture independent bytes
88 pub bytes: usize,
89 /// amount of pointer sized units to add
90 pub pointers: usize,
91}
92
93impl Add<ArchitectureSize> for ArchitectureSize {
94 type Output = ArchitectureSize;
95
96 fn add(self, rhs: ArchitectureSize) -> Self::Output {
97 ArchitectureSize::new(self.bytes + rhs.bytes, self.pointers + rhs.pointers)
98 }
99}
100
101impl AddAssign<ArchitectureSize> for ArchitectureSize {
102 fn add_assign(&mut self, rhs: ArchitectureSize) {
103 self.bytes += rhs.bytes;
104 self.pointers += rhs.pointers;
105 }
106}
107
108impl From<Alignment> for ArchitectureSize {
109 fn from(align: Alignment) -> Self {
110 match align {
111 Alignment::Bytes(bytes: NonZero) => ArchitectureSize::new(bytes.get(), pointers:0),
112 Alignment::Pointer => ArchitectureSize::new(bytes:0, pointers:1),
113 }
114 }
115}
116
117impl std::fmt::Debug for ArchitectureSize {
118 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
119 f.write_str(&self.format(ptrsize_expr:"ptrsz"))
120 }
121}
122
123impl ArchitectureSize {
124 pub fn new(bytes: usize, pointers: usize) -> Self {
125 Self { bytes, pointers }
126 }
127
128 pub fn max<B: std::borrow::Borrow<Self>>(&self, other: B) -> Self {
129 let other = other.borrow();
130 let self32 = self.size_wasm32();
131 let self64 = self.size_wasm64();
132 let other32 = other.size_wasm32();
133 let other64 = other.size_wasm64();
134 if self32 >= other32 && self64 >= other64 {
135 *self
136 } else if self32 <= other32 && self64 <= other64 {
137 *other
138 } else {
139 // we can assume a combination of bytes and pointers, so align to at least pointer size
140 let new32 = align_to(self32.max(other32), 4);
141 let new64 = align_to(self64.max(other64), 8);
142 ArchitectureSize::new(new32 + new32 - new64, (new64 - new32) / 4)
143 }
144 }
145
146 pub fn add_bytes(&self, b: usize) -> Self {
147 Self::new(self.bytes + b, self.pointers)
148 }
149
150 /// The effective offset/size is
151 /// `constant_bytes() + core::mem::size_of::<*const u8>() * pointers_to_add()`
152 pub fn constant_bytes(&self) -> usize {
153 self.bytes
154 }
155
156 pub fn pointers_to_add(&self) -> usize {
157 self.pointers
158 }
159
160 /// Shortcut for compatibility with previous versions
161 pub fn size_wasm32(&self) -> usize {
162 self.bytes + self.pointers * 4
163 }
164
165 pub fn size_wasm64(&self) -> usize {
166 self.bytes + self.pointers * 8
167 }
168
169 /// prefer this over >0
170 pub fn is_empty(&self) -> bool {
171 self.bytes == 0 && self.pointers == 0
172 }
173
174 // create a suitable expression in bytes from a pointer size argument
175 pub fn format(&self, ptrsize_expr: &str) -> String {
176 if self.pointers != 0 {
177 if self.bytes > 0 {
178 // both
179 format!(
180 "({}+{}*{ptrsize_expr})",
181 self.constant_bytes(),
182 self.pointers_to_add()
183 )
184 } else if self.pointers == 1 {
185 // one pointer
186 ptrsize_expr.into()
187 } else {
188 // only pointer
189 format!("({}*{ptrsize_expr})", self.pointers_to_add())
190 }
191 } else {
192 // only bytes
193 format!("{}", self.constant_bytes())
194 }
195 }
196}
197
198/// Information per structure element
199#[derive(Default)]
200pub struct ElementInfo {
201 pub size: ArchitectureSize,
202 pub align: Alignment,
203}
204
205impl From<Alignment> for ElementInfo {
206 fn from(align: Alignment) -> Self {
207 ElementInfo {
208 size: align.into(),
209 align,
210 }
211 }
212}
213
214impl ElementInfo {
215 fn new(size: ArchitectureSize, align: Alignment) -> Self {
216 Self { size, align }
217 }
218}
219
220/// Collect size and alignment for sub-elements of a structure
221#[derive(Default)]
222pub struct SizeAlign {
223 map: Vec<ElementInfo>,
224}
225
226impl SizeAlign {
227 pub fn fill(&mut self, resolve: &Resolve) {
228 self.map = Vec::new();
229 for (_, ty) in resolve.types.iter() {
230 let pair = self.calculate(ty);
231 self.map.push(pair);
232 }
233 }
234
235 fn calculate(&self, ty: &TypeDef) -> ElementInfo {
236 match &ty.kind {
237 TypeDefKind::Type(t) => ElementInfo::new(self.size(t), self.align(t)),
238 TypeDefKind::List(_) => {
239 ElementInfo::new(ArchitectureSize::new(0, 2), Alignment::Pointer)
240 }
241 TypeDefKind::Record(r) => self.record(r.fields.iter().map(|f| &f.ty)),
242 TypeDefKind::Tuple(t) => self.record(t.types.iter()),
243 TypeDefKind::Flags(f) => match f.repr() {
244 FlagsRepr::U8 => int_size_align(Int::U8),
245 FlagsRepr::U16 => int_size_align(Int::U16),
246 FlagsRepr::U32(n) => ElementInfo::new(
247 ArchitectureSize::new(n * 4, 0),
248 Alignment::Bytes(NonZeroUsize::new(4).unwrap()),
249 ),
250 },
251 TypeDefKind::Variant(v) => self.variant(v.tag(), v.cases.iter().map(|c| c.ty.as_ref())),
252 TypeDefKind::Enum(e) => self.variant(e.tag(), []),
253 TypeDefKind::Option(t) => self.variant(Int::U8, [Some(t)]),
254 TypeDefKind::Result(r) => self.variant(Int::U8, [r.ok.as_ref(), r.err.as_ref()]),
255 // A resource is represented as an index.
256 // A future is represented as an index.
257 // A stream is represented as an index.
258 // An error is represented as an index.
259 TypeDefKind::Handle(_)
260 | TypeDefKind::Future(_)
261 | TypeDefKind::Stream(_)
262 | TypeDefKind::ErrorContext => int_size_align(Int::U32),
263 // This shouldn't be used for anything since raw resources aren't part of the ABI -- just handles to
264 // them.
265 TypeDefKind::Resource => ElementInfo::new(
266 ArchitectureSize::new(usize::MAX, 0),
267 Alignment::Bytes(NonZeroUsize::new(usize::MAX).unwrap()),
268 ),
269 TypeDefKind::Unknown => unreachable!(),
270 }
271 }
272
273 pub fn size(&self, ty: &Type) -> ArchitectureSize {
274 match ty {
275 Type::Bool | Type::U8 | Type::S8 => ArchitectureSize::new(1, 0),
276 Type::U16 | Type::S16 => ArchitectureSize::new(2, 0),
277 Type::U32 | Type::S32 | Type::F32 | Type::Char => ArchitectureSize::new(4, 0),
278 Type::U64 | Type::S64 | Type::F64 => ArchitectureSize::new(8, 0),
279 Type::String => ArchitectureSize::new(0, 2),
280 Type::Id(id) => self.map[id.index()].size,
281 }
282 }
283
284 pub fn align(&self, ty: &Type) -> Alignment {
285 match ty {
286 Type::Bool | Type::U8 | Type::S8 => Alignment::Bytes(NonZeroUsize::new(1).unwrap()),
287 Type::U16 | Type::S16 => Alignment::Bytes(NonZeroUsize::new(2).unwrap()),
288 Type::U32 | Type::S32 | Type::F32 | Type::Char => {
289 Alignment::Bytes(NonZeroUsize::new(4).unwrap())
290 }
291 Type::U64 | Type::S64 | Type::F64 => Alignment::Bytes(NonZeroUsize::new(8).unwrap()),
292 Type::String => Alignment::Pointer,
293 Type::Id(id) => self.map[id.index()].align,
294 }
295 }
296
297 pub fn field_offsets<'a>(
298 &self,
299 types: impl IntoIterator<Item = &'a Type>,
300 ) -> Vec<(ArchitectureSize, &'a Type)> {
301 let mut cur = ArchitectureSize::default();
302 types
303 .into_iter()
304 .map(|ty| {
305 let ret = align_to_arch(cur, self.align(ty));
306 cur = ret + self.size(ty);
307 (ret, ty)
308 })
309 .collect()
310 }
311
312 pub fn payload_offset<'a>(
313 &self,
314 tag: Int,
315 cases: impl IntoIterator<Item = Option<&'a Type>>,
316 ) -> ArchitectureSize {
317 let mut max_align = Alignment::default();
318 for ty in cases {
319 if let Some(ty) = ty {
320 max_align = max_align.max(self.align(ty));
321 }
322 }
323 let tag_size = int_size_align(tag).size;
324 align_to_arch(tag_size, max_align)
325 }
326
327 pub fn record<'a>(&self, types: impl Iterator<Item = &'a Type>) -> ElementInfo {
328 let mut size = ArchitectureSize::default();
329 let mut align = Alignment::default();
330 for ty in types {
331 let field_size = self.size(ty);
332 let field_align = self.align(ty);
333 size = align_to_arch(size, field_align) + field_size;
334 align = align.max(field_align);
335 }
336 ElementInfo::new(align_to_arch(size, align), align)
337 }
338
339 pub fn params<'a>(&self, types: impl IntoIterator<Item = &'a Type>) -> ElementInfo {
340 self.record(types.into_iter())
341 }
342
343 fn variant<'a>(
344 &self,
345 tag: Int,
346 types: impl IntoIterator<Item = Option<&'a Type>>,
347 ) -> ElementInfo {
348 let ElementInfo {
349 size: discrim_size,
350 align: discrim_align,
351 } = int_size_align(tag);
352 let mut case_size = ArchitectureSize::default();
353 let mut case_align = Alignment::default();
354 for ty in types {
355 if let Some(ty) = ty {
356 case_size = case_size.max(&self.size(ty));
357 case_align = case_align.max(self.align(ty));
358 }
359 }
360 let align = discrim_align.max(case_align);
361 let discrim_aligned = align_to_arch(discrim_size, case_align);
362 let size_sum = discrim_aligned + case_size;
363 ElementInfo::new(align_to_arch(size_sum, align), align)
364 }
365}
366
367fn int_size_align(i: Int) -> ElementInfo {
368 matchAlignment i {
369 Int::U8 => Alignment::Bytes(NonZeroUsize::new(1).unwrap()),
370 Int::U16 => Alignment::Bytes(NonZeroUsize::new(2).unwrap()),
371 Int::U32 => Alignment::Bytes(NonZeroUsize::new(4).unwrap()),
372 Int::U64 => Alignment::Bytes(NonZeroUsize::new(8).unwrap()),
373 }
374 .into()
375}
376
377/// Increase `val` to a multiple of `align`;
378/// `align` must be a power of two
379pub(crate) fn align_to(val: usize, align: usize) -> usize {
380 (val + align - 1) & !(align - 1)
381}
382
383/// Increase `val` to a multiple of `align`, with special handling for pointers;
384/// `align` must be a power of two or `Alignment::Pointer`
385pub fn align_to_arch(val: ArchitectureSize, align: Alignment) -> ArchitectureSize {
386 match align {
387 Alignment::Pointer => {
388 let new32 = align_to(val.bytes, 4);
389 if new32 != align_to(new32, 8) {
390 ArchitectureSize::new(new32 - 4, val.pointers + 1)
391 } else {
392 ArchitectureSize::new(new32, val.pointers)
393 }
394 }
395 Alignment::Bytes(align_bytes) => {
396 let align_bytes = align_bytes.get();
397 if align_bytes > 4 && (val.pointers & 1) != 0 {
398 let new_bytes = align_to(val.bytes, align_bytes);
399 if (new_bytes - val.bytes) >= 4 {
400 // up to four extra bytes fit together with a the extra 32 bit pointer
401 // and the 64 bit pointer is always 8 bytes (so no change in value)
402 ArchitectureSize::new(new_bytes - 8, val.pointers + 1)
403 } else {
404 // there is no room to combine, so the odd pointer aligns to 8 bytes
405 ArchitectureSize::new(new_bytes + 8, val.pointers - 1)
406 }
407 } else {
408 ArchitectureSize::new(align_to(val.bytes, align_bytes), val.pointers)
409 }
410 }
411 }
412}
413
414#[cfg(test)]
415mod test {
416 use super::*;
417
418 #[test]
419 fn align() {
420 // u8 + ptr
421 assert_eq!(
422 align_to_arch(ArchitectureSize::new(1, 0), Alignment::Pointer),
423 ArchitectureSize::new(0, 1)
424 );
425 // u8 + u64
426 assert_eq!(
427 align_to_arch(
428 ArchitectureSize::new(1, 0),
429 Alignment::Bytes(NonZeroUsize::new(8).unwrap())
430 ),
431 ArchitectureSize::new(8, 0)
432 );
433 // u8 + u32
434 assert_eq!(
435 align_to_arch(
436 ArchitectureSize::new(1, 0),
437 Alignment::Bytes(NonZeroUsize::new(4).unwrap())
438 ),
439 ArchitectureSize::new(4, 0)
440 );
441 // ptr + u64
442 assert_eq!(
443 align_to_arch(
444 ArchitectureSize::new(0, 1),
445 Alignment::Bytes(NonZeroUsize::new(8).unwrap())
446 ),
447 ArchitectureSize::new(8, 0)
448 );
449 // u32 + ptr
450 assert_eq!(
451 align_to_arch(ArchitectureSize::new(4, 0), Alignment::Pointer),
452 ArchitectureSize::new(0, 1)
453 );
454 // u32, ptr + u64
455 assert_eq!(
456 align_to_arch(
457 ArchitectureSize::new(0, 2),
458 Alignment::Bytes(NonZeroUsize::new(8).unwrap())
459 ),
460 ArchitectureSize::new(0, 2)
461 );
462 // ptr, u8 + u64
463 assert_eq!(
464 align_to_arch(
465 ArchitectureSize::new(1, 1),
466 Alignment::Bytes(NonZeroUsize::new(8).unwrap())
467 ),
468 ArchitectureSize::new(0, 2)
469 );
470 // ptr, u8 + ptr
471 assert_eq!(
472 align_to_arch(ArchitectureSize::new(1, 1), Alignment::Pointer),
473 ArchitectureSize::new(0, 2)
474 );
475 // ptr, ptr, u8 + u64
476 assert_eq!(
477 align_to_arch(
478 ArchitectureSize::new(1, 2),
479 Alignment::Bytes(NonZeroUsize::new(8).unwrap())
480 ),
481 ArchitectureSize::new(8, 2)
482 );
483 assert_eq!(
484 align_to_arch(
485 ArchitectureSize::new(30, 3),
486 Alignment::Bytes(NonZeroUsize::new(8).unwrap())
487 ),
488 ArchitectureSize::new(40, 2)
489 );
490
491 assert_eq!(
492 ArchitectureSize::new(12, 0).max(&ArchitectureSize::new(0, 2)),
493 ArchitectureSize::new(8, 1)
494 );
495 assert_eq!(
496 ArchitectureSize::new(10, 0).max(&ArchitectureSize::new(0, 2)),
497 ArchitectureSize::new(8, 1)
498 );
499
500 assert_eq!(
501 align_to_arch(
502 ArchitectureSize::new(2, 0),
503 Alignment::Bytes(NonZeroUsize::new(8).unwrap())
504 ),
505 ArchitectureSize::new(8, 0)
506 );
507 assert_eq!(
508 align_to_arch(ArchitectureSize::new(2, 0), Alignment::Pointer),
509 ArchitectureSize::new(0, 1)
510 );
511 }
512
513 #[test]
514 fn resource_size() {
515 // keep it identical to the old behavior
516 let obj = SizeAlign::default();
517 let elem = obj.calculate(&TypeDef {
518 name: None,
519 kind: TypeDefKind::Resource,
520 owner: crate::TypeOwner::None,
521 docs: Default::default(),
522 stability: Default::default(),
523 });
524 assert_eq!(elem.size, ArchitectureSize::new(usize::MAX, 0));
525 assert_eq!(
526 elem.align,
527 Alignment::Bytes(NonZeroUsize::new(usize::MAX).unwrap())
528 );
529 }
530 #[test]
531 fn result_ptr_10() {
532 let mut obj = SizeAlign::default();
533 let mut resolve = Resolve::default();
534 let tuple = crate::Tuple {
535 types: vec![Type::U16, Type::U16, Type::U16, Type::U16, Type::U16],
536 };
537 let id = resolve.types.alloc(TypeDef {
538 name: None,
539 kind: TypeDefKind::Tuple(tuple),
540 owner: crate::TypeOwner::None,
541 docs: Default::default(),
542 stability: Default::default(),
543 });
544 obj.fill(&resolve);
545 let my_result = crate::Result_ {
546 ok: Some(Type::String),
547 err: Some(Type::Id(id)),
548 };
549 let elem = obj.calculate(&TypeDef {
550 name: None,
551 kind: TypeDefKind::Result(my_result),
552 owner: crate::TypeOwner::None,
553 docs: Default::default(),
554 stability: Default::default(),
555 });
556 assert_eq!(elem.size, ArchitectureSize::new(8, 2));
557 assert_eq!(elem.align, Alignment::Pointer);
558 }
559 #[test]
560 fn result_ptr_64bit() {
561 let obj = SizeAlign::default();
562 let my_record = crate::Record {
563 fields: vec![
564 crate::Field {
565 name: String::new(),
566 ty: Type::String,
567 docs: Default::default(),
568 },
569 crate::Field {
570 name: String::new(),
571 ty: Type::U64,
572 docs: Default::default(),
573 },
574 ],
575 };
576 let elem = obj.calculate(&TypeDef {
577 name: None,
578 kind: TypeDefKind::Record(my_record),
579 owner: crate::TypeOwner::None,
580 docs: Default::default(),
581 stability: Default::default(),
582 });
583 assert_eq!(elem.size, ArchitectureSize::new(8, 2));
584 assert_eq!(elem.align, Alignment::Bytes(NonZeroUsize::new(8).unwrap()));
585 }
586}
587