1use super::*;
2
3pub struct File {
4 pub(crate) reader: *const Reader,
5 bytes: Vec<u8>,
6 strings: usize,
7 blobs: usize,
8 tables: [Table; 17],
9}
10
11impl std::fmt::Debug for File {
12 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
13 std::write!(f, "{:?}", self.bytes.as_ptr())
14 }
15}
16
17impl std::hash::Hash for File {
18 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
19 self.bytes.as_ptr().hash(state);
20 }
21}
22
23impl PartialEq for File {
24 fn eq(&self, other: &Self) -> bool {
25 self.bytes.as_ptr() == other.bytes.as_ptr()
26 }
27}
28
29impl Eq for File {}
30
31impl Ord for File {
32 fn cmp(&self, other: &Self) -> Ordering {
33 self.bytes.as_ptr().cmp(&other.bytes.as_ptr())
34 }
35}
36
37impl PartialOrd for File {
38 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
39 Some(self.cmp(other))
40 }
41}
42
43unsafe impl Sync for File {}
44
45impl File {
46 pub fn new(bytes: Vec<u8>) -> Option<Self> {
47 let mut result = File {
48 bytes,
49 reader: std::ptr::null(),
50 strings: 0,
51 blobs: 0,
52 tables: Default::default(),
53 };
54
55 let dos = result.bytes.view_as::<IMAGE_DOS_HEADER>(0)?;
56
57 if dos.e_magic != IMAGE_DOS_SIGNATURE
58 || result.bytes.copy_as::<u32>(dos.e_lfanew as usize)? != IMAGE_NT_SIGNATURE
59 {
60 return None;
61 }
62
63 let file_offset = dos.e_lfanew as usize + std::mem::size_of::<u32>();
64 let file = result.bytes.view_as::<IMAGE_FILE_HEADER>(file_offset)?;
65
66 let optional_offset = file_offset + std::mem::size_of::<IMAGE_FILE_HEADER>();
67
68 let (com_virtual_address, sections) = match result.bytes.copy_as::<u16>(optional_offset)? {
69 IMAGE_NT_OPTIONAL_HDR32_MAGIC => {
70 let optional = result
71 .bytes
72 .view_as::<IMAGE_OPTIONAL_HEADER32>(optional_offset)?;
73 (
74 optional.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR as usize]
75 .VirtualAddress,
76 result.bytes.view_as_slice_of::<IMAGE_SECTION_HEADER>(
77 optional_offset + std::mem::size_of::<IMAGE_OPTIONAL_HEADER32>(),
78 file.NumberOfSections as usize,
79 )?,
80 )
81 }
82 IMAGE_NT_OPTIONAL_HDR64_MAGIC => {
83 let optional = result
84 .bytes
85 .view_as::<IMAGE_OPTIONAL_HEADER64>(optional_offset)?;
86 (
87 optional.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR as usize]
88 .VirtualAddress,
89 result.bytes.view_as_slice_of::<IMAGE_SECTION_HEADER>(
90 optional_offset + std::mem::size_of::<IMAGE_OPTIONAL_HEADER64>(),
91 file.NumberOfSections as usize,
92 )?,
93 )
94 }
95 _ => return None,
96 };
97
98 let clr = result.bytes.view_as::<IMAGE_COR20_HEADER>(offset_from_rva(
99 section_from_rva(sections, com_virtual_address)?,
100 com_virtual_address,
101 ))?;
102
103 if clr.cb != std::mem::size_of::<IMAGE_COR20_HEADER>() as u32 {
104 return None;
105 }
106
107 let metadata_offset = offset_from_rva(
108 section_from_rva(sections, clr.MetaData.VirtualAddress)?,
109 clr.MetaData.VirtualAddress,
110 );
111 let metadata = result.bytes.view_as::<METADATA_HEADER>(metadata_offset)?;
112
113 if metadata.signature != METADATA_SIGNATURE {
114 return None;
115 }
116
117 // The METADATA_HEADER struct is not a fixed size so have to offset a little more carefully.
118 let mut view = metadata_offset + metadata.length as usize + 20;
119 let mut tables_data: (usize, usize) = (0, 0);
120
121 for _ in 0..result
122 .bytes
123 .copy_as::<u16>(metadata_offset + metadata.length as usize + 18)?
124 {
125 let stream_offset = result.bytes.copy_as::<u32>(view)? as usize;
126 let stream_len = result.bytes.copy_as::<u32>(view + 4)? as usize;
127 let stream_name = result.bytes.view_as_str(view + 8)?;
128 match stream_name {
129 b"#Strings" => result.strings = metadata_offset + stream_offset,
130 b"#Blob" => result.blobs = metadata_offset + stream_offset,
131 b"#~" => tables_data = (metadata_offset + stream_offset, stream_len),
132 b"#GUID" => {}
133 b"#US" => {}
134 rest => panic!("{rest:?}"),
135 }
136 let mut padding = 4 - stream_name.len() % 4;
137 if padding == 0 {
138 padding = 4;
139 }
140 view += 8 + stream_name.len() + padding;
141 }
142
143 let heap_sizes = result.bytes.copy_as::<u8>(tables_data.0 + 6)?;
144 let string_index_size = if (heap_sizes & 1) == 1 { 4 } else { 2 };
145 let guid_index_size = if ((heap_sizes >> 1) & 1) == 1 { 4 } else { 2 };
146 let blob_index_size = if ((heap_sizes >> 2) & 1) == 1 { 4 } else { 2 };
147 let valid_bits = result.bytes.copy_as::<u64>(tables_data.0 + 8)?;
148 view = tables_data.0 + 24;
149
150 // These tables are unused by the reader, but needed temporarily to calculate sizes and offsets for subsequent tables.
151 let unused_empty = Table::default();
152 let mut unused_assembly = Table::default();
153 let mut unused_assembly_os = Table::default();
154 let mut unused_assembly_processor = Table::default();
155 let mut unused_assembly_ref_os = Table::default();
156 let mut unused_assembly_ref = Table::default();
157 let mut unused_assembly_ref_processor = Table::default();
158 let mut unused_decl_security = Table::default();
159 let mut unused_event = Table::default();
160 let mut unused_event_map = Table::default();
161 let mut unused_exported_type = Table::default();
162 let mut unused_field_layout = Table::default();
163 let mut unused_field_marshal = Table::default();
164 let mut unused_field_rva = Table::default();
165 let mut unused_file = Table::default();
166 let mut unused_generic_param_constraint = Table::default();
167 let mut unused_manifest_resource = Table::default();
168 let mut unused_method_impl = Table::default();
169 let mut unused_method_semantics = Table::default();
170 let mut unused_method_spec = Table::default();
171 let mut unused_property = Table::default();
172 let mut unused_property_map = Table::default();
173 let mut unused_standalone_sig = Table::default();
174 let mut unused_module = Table::default();
175
176 for i in 0..64 {
177 if ((valid_bits >> i) & 1) == 0 {
178 continue;
179 }
180
181 let len = result.bytes.copy_as::<u32>(view)? as usize;
182 view += 4;
183
184 match i {
185 0x00 => unused_module.len = len,
186 0x01 => result.tables[TypeRef::TABLE].len = len,
187 0x02 => result.tables[TypeDef::TABLE].len = len,
188 0x04 => result.tables[Field::TABLE].len = len,
189 0x06 => result.tables[MethodDef::TABLE].len = len,
190 0x08 => result.tables[Param::TABLE].len = len,
191 0x09 => result.tables[InterfaceImpl::TABLE].len = len,
192 0x0a => result.tables[MemberRef::TABLE].len = len,
193 0x0b => result.tables[Constant::TABLE].len = len,
194 0x0c => result.tables[Attribute::TABLE].len = len,
195 0x0d => unused_field_marshal.len = len,
196 0x0e => unused_decl_security.len = len,
197 0x0f => result.tables[ClassLayout::TABLE].len = len,
198 0x10 => unused_field_layout.len = len,
199 0x11 => unused_standalone_sig.len = len,
200 0x12 => unused_event_map.len = len,
201 0x14 => unused_event.len = len,
202 0x15 => unused_property_map.len = len,
203 0x17 => unused_property.len = len,
204 0x18 => unused_method_semantics.len = len,
205 0x19 => unused_method_impl.len = len,
206 0x1a => result.tables[ModuleRef::TABLE].len = len,
207 0x1b => result.tables[TypeSpec::TABLE].len = len,
208 0x1c => result.tables[ImplMap::TABLE].len = len,
209 0x1d => unused_field_rva.len = len,
210 0x20 => unused_assembly.len = len,
211 0x21 => unused_assembly_processor.len = len,
212 0x22 => unused_assembly_os.len = len,
213 0x23 => unused_assembly_ref.len = len,
214 0x24 => unused_assembly_ref_processor.len = len,
215 0x25 => unused_assembly_ref_os.len = len,
216 0x26 => unused_file.len = len,
217 0x27 => unused_exported_type.len = len,
218 0x28 => unused_manifest_resource.len = len,
219 0x29 => result.tables[NestedClass::TABLE].len = len,
220 0x2a => result.tables[GenericParam::TABLE].len = len,
221 0x2b => unused_method_spec.len = len,
222 0x2c => unused_generic_param_constraint.len = len,
223 _ => unreachable!(),
224 };
225 }
226
227 let tables = &result.tables;
228 let type_def_or_ref = coded_index_size(&[
229 tables[TypeDef::TABLE].len,
230 tables[TypeRef::TABLE].len,
231 tables[TypeSpec::TABLE].len,
232 ]);
233 let has_constant = coded_index_size(&[
234 tables[Field::TABLE].len,
235 tables[Param::TABLE].len,
236 unused_property.len,
237 ]);
238 let has_field_marshal =
239 coded_index_size(&[tables[Field::TABLE].len, tables[Param::TABLE].len]);
240 let has_decl_security = coded_index_size(&[
241 tables[TypeDef::TABLE].len,
242 tables[MethodDef::TABLE].len,
243 unused_assembly.len,
244 ]);
245 let member_ref_parent = coded_index_size(&[
246 tables[TypeDef::TABLE].len,
247 tables[TypeRef::TABLE].len,
248 tables[ModuleRef::TABLE].len,
249 tables[MethodDef::TABLE].len,
250 tables[TypeSpec::TABLE].len,
251 ]);
252 let has_semantics = coded_index_size(&[unused_event.len, unused_property.len]);
253 let method_def_or_ref =
254 coded_index_size(&[tables[MethodDef::TABLE].len, tables[MemberRef::TABLE].len]);
255 let member_forwarded =
256 coded_index_size(&[tables[Field::TABLE].len, tables[MethodDef::TABLE].len]);
257 let implementation = coded_index_size(&[
258 unused_file.len,
259 unused_assembly_ref.len,
260 unused_exported_type.len,
261 ]);
262 let custom_attribute_type = coded_index_size(&[
263 tables[MethodDef::TABLE].len,
264 tables[MemberRef::TABLE].len,
265 unused_empty.len,
266 unused_empty.len,
267 unused_empty.len,
268 ]);
269 let resolution_scope = coded_index_size(&[
270 unused_module.len,
271 tables[ModuleRef::TABLE].len,
272 unused_assembly_ref.len,
273 tables[TypeRef::TABLE].len,
274 ]);
275 let type_or_method_def =
276 coded_index_size(&[tables[TypeDef::TABLE].len, tables[MethodDef::TABLE].len]);
277
278 let has_custom_attribute = coded_index_size(&[
279 tables[MethodDef::TABLE].len,
280 tables[Field::TABLE].len,
281 tables[TypeRef::TABLE].len,
282 tables[TypeDef::TABLE].len,
283 tables[Param::TABLE].len,
284 tables[InterfaceImpl::TABLE].len,
285 tables[MemberRef::TABLE].len,
286 unused_module.len,
287 unused_property.len,
288 unused_event.len,
289 unused_standalone_sig.len,
290 tables[ModuleRef::TABLE].len,
291 tables[TypeSpec::TABLE].len,
292 unused_assembly.len,
293 unused_assembly_ref.len,
294 unused_file.len,
295 unused_exported_type.len,
296 unused_manifest_resource.len,
297 tables[GenericParam::TABLE].len,
298 unused_generic_param_constraint.len,
299 unused_method_spec.len,
300 ]);
301
302 unused_assembly.set_columns(
303 4,
304 8,
305 4,
306 blob_index_size,
307 string_index_size,
308 string_index_size,
309 );
310 unused_assembly_os.set_columns(4, 4, 4, 0, 0, 0);
311 unused_assembly_processor.set_columns(4, 0, 0, 0, 0, 0);
312 unused_assembly_ref.set_columns(
313 8,
314 4,
315 blob_index_size,
316 string_index_size,
317 string_index_size,
318 blob_index_size,
319 );
320 unused_assembly_ref_os.set_columns(4, 4, 4, unused_assembly_ref.index_width(), 0, 0);
321 unused_assembly_ref_processor.set_columns(4, unused_assembly_ref.index_width(), 0, 0, 0, 0);
322 result.tables[ClassLayout::TABLE].set_columns(
323 2,
324 4,
325 result.tables[TypeDef::TABLE].index_width(),
326 0,
327 0,
328 0,
329 );
330 result.tables[Constant::TABLE].set_columns(2, has_constant, blob_index_size, 0, 0, 0);
331 result.tables[Attribute::TABLE].set_columns(
332 has_custom_attribute,
333 custom_attribute_type,
334 blob_index_size,
335 0,
336 0,
337 0,
338 );
339 unused_decl_security.set_columns(2, has_decl_security, blob_index_size, 0, 0, 0);
340 unused_event_map.set_columns(
341 result.tables[TypeDef::TABLE].index_width(),
342 unused_event.index_width(),
343 0,
344 0,
345 0,
346 0,
347 );
348 unused_event.set_columns(2, string_index_size, type_def_or_ref, 0, 0, 0);
349 unused_exported_type.set_columns(
350 4,
351 4,
352 string_index_size,
353 string_index_size,
354 implementation,
355 0,
356 );
357 result.tables[Field::TABLE].set_columns(2, string_index_size, blob_index_size, 0, 0, 0);
358 unused_field_layout.set_columns(4, result.tables[Field::TABLE].index_width(), 0, 0, 0, 0);
359 unused_field_marshal.set_columns(has_field_marshal, blob_index_size, 0, 0, 0, 0);
360 unused_field_rva.set_columns(4, result.tables[Field::TABLE].index_width(), 0, 0, 0, 0);
361 unused_file.set_columns(4, string_index_size, blob_index_size, 0, 0, 0);
362 result.tables[GenericParam::TABLE].set_columns(
363 2,
364 2,
365 type_or_method_def,
366 string_index_size,
367 0,
368 0,
369 );
370 unused_generic_param_constraint.set_columns(
371 result.tables[GenericParam::TABLE].index_width(),
372 type_def_or_ref,
373 0,
374 0,
375 0,
376 0,
377 );
378 result.tables[ImplMap::TABLE].set_columns(
379 2,
380 member_forwarded,
381 string_index_size,
382 result.tables[ModuleRef::TABLE].index_width(),
383 0,
384 0,
385 );
386 result.tables[InterfaceImpl::TABLE].set_columns(
387 result.tables[TypeDef::TABLE].index_width(),
388 type_def_or_ref,
389 0,
390 0,
391 0,
392 0,
393 );
394 unused_manifest_resource.set_columns(4, 4, string_index_size, implementation, 0, 0);
395 result.tables[MemberRef::TABLE].set_columns(
396 member_ref_parent,
397 string_index_size,
398 blob_index_size,
399 0,
400 0,
401 0,
402 );
403 result.tables[MethodDef::TABLE].set_columns(
404 4,
405 2,
406 2,
407 string_index_size,
408 blob_index_size,
409 result.tables[Param::TABLE].index_width(),
410 );
411 unused_method_impl.set_columns(
412 result.tables[TypeDef::TABLE].index_width(),
413 method_def_or_ref,
414 method_def_or_ref,
415 0,
416 0,
417 0,
418 );
419 unused_method_semantics.set_columns(
420 2,
421 result.tables[MethodDef::TABLE].index_width(),
422 has_semantics,
423 0,
424 0,
425 0,
426 );
427 unused_method_spec.set_columns(method_def_or_ref, blob_index_size, 0, 0, 0, 0);
428 unused_module.set_columns(
429 2,
430 string_index_size,
431 guid_index_size,
432 guid_index_size,
433 guid_index_size,
434 0,
435 );
436 result.tables[ModuleRef::TABLE].set_columns(string_index_size, 0, 0, 0, 0, 0);
437 result.tables[NestedClass::TABLE].set_columns(
438 result.tables[TypeDef::TABLE].index_width(),
439 result.tables[TypeDef::TABLE].index_width(),
440 0,
441 0,
442 0,
443 0,
444 );
445 result.tables[Param::TABLE].set_columns(2, 2, string_index_size, 0, 0, 0);
446 unused_property.set_columns(2, string_index_size, blob_index_size, 0, 0, 0);
447 unused_property_map.set_columns(
448 result.tables[TypeDef::TABLE].index_width(),
449 unused_property.index_width(),
450 0,
451 0,
452 0,
453 0,
454 );
455 unused_standalone_sig.set_columns(blob_index_size, 0, 0, 0, 0, 0);
456 result.tables[TypeDef::TABLE].set_columns(
457 4,
458 string_index_size,
459 string_index_size,
460 type_def_or_ref,
461 result.tables[Field::TABLE].index_width(),
462 result.tables[MethodDef::TABLE].index_width(),
463 );
464 result.tables[TypeRef::TABLE].set_columns(
465 resolution_scope,
466 string_index_size,
467 string_index_size,
468 0,
469 0,
470 0,
471 );
472 result.tables[TypeSpec::TABLE].set_columns(blob_index_size, 0, 0, 0, 0, 0);
473
474 unused_module.set_data(&mut view);
475 result.tables[TypeRef::TABLE].set_data(&mut view);
476 result.tables[TypeDef::TABLE].set_data(&mut view);
477 result.tables[Field::TABLE].set_data(&mut view);
478 result.tables[MethodDef::TABLE].set_data(&mut view);
479 result.tables[Param::TABLE].set_data(&mut view);
480 result.tables[InterfaceImpl::TABLE].set_data(&mut view);
481 result.tables[MemberRef::TABLE].set_data(&mut view);
482 result.tables[Constant::TABLE].set_data(&mut view);
483 result.tables[Attribute::TABLE].set_data(&mut view);
484 unused_field_marshal.set_data(&mut view);
485 unused_decl_security.set_data(&mut view);
486 result.tables[ClassLayout::TABLE].set_data(&mut view);
487 unused_field_layout.set_data(&mut view);
488 unused_standalone_sig.set_data(&mut view);
489 unused_event_map.set_data(&mut view);
490 unused_event.set_data(&mut view);
491 unused_property_map.set_data(&mut view);
492 unused_property.set_data(&mut view);
493 unused_method_semantics.set_data(&mut view);
494 unused_method_impl.set_data(&mut view);
495 result.tables[ModuleRef::TABLE].set_data(&mut view);
496 result.tables[TypeSpec::TABLE].set_data(&mut view);
497 result.tables[ImplMap::TABLE].set_data(&mut view);
498 unused_field_rva.set_data(&mut view);
499 unused_assembly.set_data(&mut view);
500 unused_assembly_processor.set_data(&mut view);
501 unused_assembly_os.set_data(&mut view);
502 unused_assembly_ref.set_data(&mut view);
503 unused_assembly_ref_processor.set_data(&mut view);
504 unused_assembly_ref_os.set_data(&mut view);
505 unused_file.set_data(&mut view);
506 unused_exported_type.set_data(&mut view);
507 unused_manifest_resource.set_data(&mut view);
508 result.tables[NestedClass::TABLE].set_data(&mut view);
509 result.tables[GenericParam::TABLE].set_data(&mut view);
510
511 Some(result)
512 }
513
514 pub(crate) fn usize(&self, row: usize, table: usize, column: usize) -> usize {
515 let table = &self.tables[table];
516 let column = &table.columns[column];
517 let offset = table.offset + row * table.width + column.offset;
518 match column.width {
519 1 => self.bytes.copy_as::<u8>(offset).map_or(0, |v| v as usize),
520 2 => self.bytes.copy_as::<u16>(offset).map_or(0, |v| v as usize),
521 4 => self.bytes.copy_as::<u32>(offset).map_or(0, |v| v as usize),
522 _ => self.bytes.copy_as::<u64>(offset).map_or(0, |v| v as usize),
523 }
524 }
525
526 pub(crate) fn str(&'static self, row: usize, table: usize, column: usize) -> &'static str {
527 let offset = self.strings + self.usize(row, table, column);
528 let bytes = &self.bytes[offset..];
529 let nul_pos = bytes
530 .iter()
531 .position(|&c| c == 0)
532 .expect("expected null-terminated C-string");
533 std::str::from_utf8(&bytes[..nul_pos]).expect("expected valid utf-8 C-string")
534 }
535
536 pub(crate) fn blob(&'static self, row: usize, table: usize, column: usize) -> Blob {
537 let offset = self.blobs + self.usize(row, table, column);
538 let initial_byte = self.bytes[offset];
539
540 let (blob_size, blob_size_bytes) = match initial_byte >> 5 {
541 0..=3 => (initial_byte & 0x7f, 1),
542 4..=5 => (initial_byte & 0x3f, 2),
543 6 => (initial_byte & 0x1f, 4),
544 rest => panic!("{rest:?}"),
545 };
546
547 let mut blob_size = blob_size as usize;
548
549 for byte in &self.bytes[offset + 1..offset + blob_size_bytes] {
550 blob_size = blob_size.checked_shl(8).unwrap_or(0) + (*byte as usize);
551 }
552
553 let offset = offset + blob_size_bytes;
554 Blob::new(self, &self.bytes[offset..offset + blob_size])
555 }
556
557 pub(crate) fn list<R: AsRow>(
558 &'static self,
559 row: usize,
560 table: usize,
561 column: usize,
562 ) -> RowIterator<R> {
563 let first = self.usize(row, table, column) - 1;
564 let next = row + 1;
565 let last = if next < self.tables[table].len {
566 self.usize(next, table, column) - 1
567 } else {
568 self.tables[R::TABLE].len
569 };
570 RowIterator::new(self, first..last)
571 }
572
573 pub(crate) fn equal_range<L: AsRow>(
574 &'static self,
575 column: usize,
576 value: usize,
577 ) -> RowIterator<L> {
578 let mut first = 0;
579 let mut last = self.tables[L::TABLE].len;
580 let mut count = last;
581
582 loop {
583 if count == 0 {
584 last = first;
585 break;
586 }
587
588 let count2 = count / 2;
589 let middle = first + count2;
590 let middle_value = self.usize(middle, L::TABLE, column);
591
592 match middle_value.cmp(&value) {
593 Ordering::Less => {
594 first = middle + 1;
595 count -= count2 + 1;
596 }
597 Ordering::Greater => count = count2,
598 Ordering::Equal => {
599 let first2 = self.lower_bound_of(L::TABLE, first, middle, column, value);
600 first += count;
601 last = self.upper_bound_of(L::TABLE, middle + 1, first, column, value);
602 first = first2;
603 break;
604 }
605 }
606 }
607
608 RowIterator::new(self, first..last)
609 }
610
611 fn lower_bound_of(
612 &self,
613 table: usize,
614 mut first: usize,
615 last: usize,
616 column: usize,
617 value: usize,
618 ) -> usize {
619 let mut count = last - first;
620 while count > 0 {
621 let count2 = count / 2;
622 let middle = first + count2;
623 if self.usize(middle, table, column) < value {
624 first = middle + 1;
625 count -= count2 + 1;
626 } else {
627 count = count2;
628 }
629 }
630 first
631 }
632
633 fn upper_bound_of(
634 &self,
635 table: usize,
636 mut first: usize,
637 last: usize,
638 column: usize,
639 value: usize,
640 ) -> usize {
641 let mut count = last - first;
642 while count > 0 {
643 let count2 = count / 2;
644 let middle = first + count2;
645 if value < self.usize(middle, table, column) {
646 count = count2
647 } else {
648 first = middle + 1;
649 count -= count2 + 1;
650 }
651 }
652 first
653 }
654
655 pub(crate) fn table<R: AsRow>(&'static self) -> RowIterator<R> {
656 RowIterator::new(self, 0..self.tables[R::TABLE].len)
657 }
658
659 pub(crate) fn reader(&self) -> &'static Reader {
660 // Safety: At this point the File is already pointing to a valid Reader.
661 unsafe { &*self.reader }
662 }
663}
664
665fn section_from_rva(sections: &[IMAGE_SECTION_HEADER], rva: u32) -> Option<&IMAGE_SECTION_HEADER> {
666 sections.iter().find(|&s: &IMAGE_SECTION_HEADER| {
667 rva >= s.VirtualAddress && rva < s.VirtualAddress + unsafe { s.Misc.VirtualSize }
668 })
669}
670
671fn offset_from_rva(section: &IMAGE_SECTION_HEADER, rva: u32) -> usize {
672 (rva - section.VirtualAddress + section.PointerToRawData) as usize
673}
674
675trait View {
676 fn view_as<T>(&self, offset: usize) -> Option<&T>;
677 fn view_as_slice_of<T>(&self, offset: usize, len: usize) -> Option<&[T]>;
678 fn copy_as<T: Copy>(&self, offset: usize) -> Option<T>;
679 fn view_as_str(&self, offset: usize) -> Option<&[u8]>;
680 fn is_proper_length<T>(&self, offset: usize) -> Option<()>;
681 fn is_proper_length_and_alignment<T>(&self, offset: usize, count: usize) -> Option<*const T>;
682}
683
684impl View for [u8] {
685 fn view_as<T>(&self, offset: usize) -> Option<&T> {
686 unsafe { Some(&*self.is_proper_length_and_alignment(offset, 1)?) }
687 }
688
689 fn view_as_slice_of<T>(&self, offset: usize, len: usize) -> Option<&[T]> {
690 unsafe {
691 Some(std::slice::from_raw_parts(
692 self.is_proper_length_and_alignment(offset, len)?,
693 len,
694 ))
695 }
696 }
697
698 fn copy_as<T>(&self, offset: usize) -> Option<T> {
699 self.is_proper_length::<T>(offset)?;
700
701 unsafe {
702 let mut data = std::mem::MaybeUninit::zeroed().assume_init();
703 core::ptr::copy_nonoverlapping(
704 self[offset..].as_ptr(),
705 &mut data as *mut T as *mut u8,
706 std::mem::size_of::<T>(),
707 );
708 Some(data)
709 }
710 }
711
712 fn view_as_str(&self, offset: usize) -> Option<&[u8]> {
713 let buffer = &self[offset..];
714 let index = buffer.iter().position(|c| *c == b'\0')?;
715 Some(&self[offset..offset + index])
716 }
717
718 fn is_proper_length<T>(&self, offset: usize) -> Option<()> {
719 if offset + std::mem::size_of::<T>() <= self.len() {
720 Some(())
721 } else {
722 None
723 }
724 }
725
726 fn is_proper_length_and_alignment<T>(&self, offset: usize, count: usize) -> Option<*const T> {
727 self.is_proper_length::<T>(offset * count)?;
728 let ptr = &self[offset] as *const u8 as *const T;
729
730 if ptr.align_offset(std::mem::align_of::<T>()) == 0 {
731 Some(ptr)
732 } else {
733 None
734 }
735 }
736}
737
738#[derive(Default)]
739struct Table {
740 offset: usize,
741 len: usize,
742 width: usize,
743 columns: [Column; 6],
744}
745
746impl Table {
747 fn index_width(&self) -> usize {
748 if self.len < (1 << 16) {
749 2
750 } else {
751 4
752 }
753 }
754
755 fn set_columns(&mut self, a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) {
756 self.width = a + b + c + d + e + f;
757 self.columns[0] = Column::new(0, a);
758 if b != 0 {
759 self.columns[1] = Column::new(a, b);
760 }
761 if c != 0 {
762 self.columns[2] = Column::new(a + b, c);
763 }
764 if d != 0 {
765 self.columns[3] = Column::new(a + b + c, d);
766 }
767 if e != 0 {
768 self.columns[4] = Column::new(a + b + c + d, e);
769 }
770 if f != 0 {
771 self.columns[5] = Column::new(a + b + c + d + e, f);
772 }
773 }
774
775 fn set_data(&mut self, offset: &mut usize) {
776 if self.len != 0 {
777 let next = *offset + self.len * self.width;
778 self.offset = *offset;
779 *offset = next;
780 }
781 }
782}
783
784#[derive(Default)]
785struct Column {
786 offset: usize,
787 width: usize,
788}
789
790impl Column {
791 fn new(offset: usize, width: usize) -> Self {
792 Self { offset, width }
793 }
794}
795
796#[repr(C)]
797#[derive(Default)]
798struct METADATA_HEADER {
799 signature: u32,
800 major_version: u16,
801 minor_version: u16,
802 reserved: u32,
803 length: u32,
804 version: [u8; 20],
805 flags: u16,
806 streams: u16,
807}
808
809const METADATA_SIGNATURE: u32 = 0x424A_5342;
810
811// A coded index (see codes.rs) is a table index that may refer to different tables. The size of the column in memory
812// must therefore be large enough to hold an index for a row in the largest possible table. This function determines
813// this size for the given winmd file.
814fn coded_index_size(tables: &[usize]) -> usize {
815 fn small(row_count: usize, bits: u8) -> bool {
816 (row_count as u64) < (1u64 << (16 - bits))
817 }
818
819 fn bits_needed(value: usize) -> u8 {
820 let mut value = value - 1;
821 let mut bits: u8 = 1;
822 while {
823 value >>= 1;
824 value != 0
825 } {
826 bits += 1;
827 }
828 bits
829 }
830
831 let bits_needed = bits_needed(tables.len());
832
833 if tables.iter().all(|table| small(*table, bits_needed)) {
834 2
835 } else {
836 4
837 }
838}
839

Provided by KDAB

Privacy Policy
Learn Rust with the experts
Find out more