1#![cfg_attr(test, allow(dead_code))]
2
3pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
5
6pub struct Handler {
7 data: *mut libc::c_void,
8}
9
10impl Handler {
11 pub unsafe fn new() -> Handler {
12 make_handler(main_thread:false)
13 }
14
15 fn null() -> Handler {
16 Handler { data: crate::ptr::null_mut() }
17 }
18}
19
20impl Drop for Handler {
21 fn drop(&mut self) {
22 unsafe {
23 drop_handler(self.data);
24 }
25 }
26}
27
28#[cfg(any(
29 target_os = "linux",
30 target_os = "freebsd",
31 target_os = "hurd",
32 target_os = "macos",
33 target_os = "netbsd",
34 target_os = "openbsd",
35 target_os = "solaris",
36 target_os = "illumos",
37))]
38mod imp {
39 use libc::{
40 MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
41 SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
42 };
43 #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
44 use libc::{mmap as mmap64, mprotect, munmap};
45 #[cfg(all(target_os = "linux", target_env = "gnu"))]
46 use libc::{mmap64, mprotect, munmap};
47
48 use super::Handler;
49 use crate::cell::Cell;
50 use crate::ops::Range;
51 use crate::sync::OnceLock;
52 use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
53 use crate::sys::pal::unix::os;
54 use crate::{io, mem, ptr, thread};
55
56 // We use a TLS variable to store the address of the guard page. While TLS
57 // variables are not guaranteed to be signal-safe, this works out in practice
58 // since we make sure to write to the variable before the signal stack is
59 // installed, thereby ensuring that the variable is always allocated when
60 // the signal handler is called.
61 thread_local! {
62 // FIXME: use `Range` once that implements `Copy`.
63 static GUARD: Cell<(usize, usize)> = const { Cell::new((0, 0)) };
64 }
65
66 // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
67 // (unmapped pages) at the end of every thread's stack, so if a thread ends
68 // up running into the guard page it'll trigger this handler. We want to
69 // detect these cases and print out a helpful error saying that the stack
70 // has overflowed. All other signals, however, should go back to what they
71 // were originally supposed to do.
72 //
73 // This handler currently exists purely to print an informative message
74 // whenever a thread overflows its stack. We then abort to exit and
75 // indicate a crash, but to avoid a misleading SIGSEGV that might lead
76 // users to believe that unsafe code has accessed an invalid pointer; the
77 // SIGSEGV encountered when overflowing the stack is expected and
78 // well-defined.
79 //
80 // If this is not a stack overflow, the handler un-registers itself and
81 // then returns (to allow the original signal to be delivered again).
82 // Returning from this kind of signal handler is technically not defined
83 // to work when reading the POSIX spec strictly, but in practice it turns
84 // out many large systems and all implementations allow returning from a
85 // signal handler to work. For a more detailed explanation see the
86 // comments on #26458.
87 /// SIGSEGV/SIGBUS entry point
88 /// # Safety
89 /// Rust doesn't call this, it *gets called*.
90 #[forbid(unsafe_op_in_unsafe_fn)]
91 unsafe extern "C" fn signal_handler(
92 signum: libc::c_int,
93 info: *mut libc::siginfo_t,
94 _data: *mut libc::c_void,
95 ) {
96 let (start, end) = GUARD.get();
97 // SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
98 let addr = unsafe { (*info).si_addr().addr() };
99
100 // If the faulting address is within the guard page, then we print a
101 // message saying so and abort.
102 if start <= addr && addr < end {
103 thread::with_current_name(|name| {
104 let name = name.unwrap_or("<unknown>");
105 rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
106 });
107
108 rtabort!("stack overflow");
109 } else {
110 // Unregister ourselves by reverting back to the default behavior.
111 // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
112 let mut action: sigaction = unsafe { mem::zeroed() };
113 action.sa_sigaction = SIG_DFL;
114 // SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
115 unsafe { sigaction(signum, &action, ptr::null_mut()) };
116
117 // See comment above for why this function returns.
118 }
119 }
120
121 static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
122 static MAIN_ALTSTACK: AtomicPtr<libc::c_void> = AtomicPtr::new(ptr::null_mut());
123 static NEED_ALTSTACK: AtomicBool = AtomicBool::new(false);
124
125 /// # Safety
126 /// Must be called only once
127 #[forbid(unsafe_op_in_unsafe_fn)]
128 pub unsafe fn init() {
129 PAGE_SIZE.store(os::page_size(), Ordering::Relaxed);
130
131 // Always write to GUARD to ensure the TLS variable is allocated.
132 let guard = unsafe { install_main_guard().unwrap_or(0..0) };
133 GUARD.set((guard.start, guard.end));
134
135 // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
136 let mut action: sigaction = unsafe { mem::zeroed() };
137 for &signal in &[SIGSEGV, SIGBUS] {
138 // SAFETY: just fetches the current signal handler into action
139 unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
140 // Configure our signal handler if one is not already set.
141 if action.sa_sigaction == SIG_DFL {
142 if !NEED_ALTSTACK.load(Ordering::Relaxed) {
143 // haven't set up our sigaltstack yet
144 NEED_ALTSTACK.store(true, Ordering::Release);
145 let handler = unsafe { make_handler(true) };
146 MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
147 mem::forget(handler);
148 }
149 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
150 action.sa_sigaction = signal_handler as sighandler_t;
151 // SAFETY: only overriding signals if the default is set
152 unsafe { sigaction(signal, &action, ptr::null_mut()) };
153 }
154 }
155 }
156
157 /// # Safety
158 /// Must be called only once
159 #[forbid(unsafe_op_in_unsafe_fn)]
160 pub unsafe fn cleanup() {
161 // FIXME: I probably cause more bugs than I'm worth!
162 // see https://github.com/rust-lang/rust/issues/111272
163 unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
164 }
165
166 unsafe fn get_stack() -> libc::stack_t {
167 // OpenBSD requires this flag for stack mapping
168 // otherwise the said mapping will fail as a no-op on most systems
169 // and has a different meaning on FreeBSD
170 #[cfg(any(
171 target_os = "openbsd",
172 target_os = "netbsd",
173 target_os = "linux",
174 target_os = "dragonfly",
175 ))]
176 let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
177 #[cfg(not(any(
178 target_os = "openbsd",
179 target_os = "netbsd",
180 target_os = "linux",
181 target_os = "dragonfly",
182 )))]
183 let flags = MAP_PRIVATE | MAP_ANON;
184
185 let sigstack_size = sigstack_size();
186 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
187
188 let stackp = mmap64(
189 ptr::null_mut(),
190 sigstack_size + page_size,
191 PROT_READ | PROT_WRITE,
192 flags,
193 -1,
194 0,
195 );
196 if stackp == MAP_FAILED {
197 panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
198 }
199 let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
200 if guard_result != 0 {
201 panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
202 }
203 let stackp = stackp.add(page_size);
204
205 libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
206 }
207
208 /// # Safety
209 /// Mutates the alternate signal stack
210 #[forbid(unsafe_op_in_unsafe_fn)]
211 pub unsafe fn make_handler(main_thread: bool) -> Handler {
212 if !NEED_ALTSTACK.load(Ordering::Acquire) {
213 return Handler::null();
214 }
215
216 if !main_thread {
217 // Always write to GUARD to ensure the TLS variable is allocated.
218 let guard = unsafe { current_guard() }.unwrap_or(0..0);
219 GUARD.set((guard.start, guard.end));
220 }
221
222 // SAFETY: assuming stack_t is zero-initializable
223 let mut stack = unsafe { mem::zeroed() };
224 // SAFETY: reads current stack_t into stack
225 unsafe { sigaltstack(ptr::null(), &mut stack) };
226 // Configure alternate signal stack, if one is not already set.
227 if stack.ss_flags & SS_DISABLE != 0 {
228 // SAFETY: We warned our caller this would happen!
229 unsafe {
230 stack = get_stack();
231 sigaltstack(&stack, ptr::null_mut());
232 }
233 Handler { data: stack.ss_sp as *mut libc::c_void }
234 } else {
235 Handler::null()
236 }
237 }
238
239 /// # Safety
240 /// Must be called
241 /// - only with our handler or nullptr
242 /// - only when done with our altstack
243 /// This disables the alternate signal stack!
244 #[forbid(unsafe_op_in_unsafe_fn)]
245 pub unsafe fn drop_handler(data: *mut libc::c_void) {
246 if !data.is_null() {
247 let sigstack_size = sigstack_size();
248 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
249 let disabling_stack = libc::stack_t {
250 ss_sp: ptr::null_mut(),
251 ss_flags: SS_DISABLE,
252 // Workaround for bug in macOS implementation of sigaltstack
253 // UNIX2003 which returns ENOMEM when disabling a stack while
254 // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
255 // both ss_sp and ss_size should be ignored in this case.
256 ss_size: sigstack_size,
257 };
258 // SAFETY: we warned the caller this disables the alternate signal stack!
259 unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
260 // SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
261 // a mapping that started one page earlier, so walk back a page and unmap from there.
262 unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
263 }
264 }
265
266 /// Modern kernels on modern hardware can have dynamic signal stack sizes.
267 #[cfg(any(target_os = "linux", target_os = "android"))]
268 fn sigstack_size() -> usize {
269 let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
270 // If getauxval couldn't find the entry, it returns 0,
271 // so take the higher of the "constant" and auxval.
272 // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
273 libc::SIGSTKSZ.max(dynamic_sigstksz as _)
274 }
275
276 /// Not all OS support hardware where this is needed.
277 #[cfg(not(any(target_os = "linux", target_os = "android")))]
278 fn sigstack_size() -> usize {
279 libc::SIGSTKSZ
280 }
281
282 #[cfg(any(target_os = "solaris", target_os = "illumos"))]
283 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
284 let mut current_stack: libc::stack_t = crate::mem::zeroed();
285 assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
286 Some(current_stack.ss_sp)
287 }
288
289 #[cfg(target_os = "macos")]
290 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
291 let th = libc::pthread_self();
292 let stackptr = libc::pthread_get_stackaddr_np(th);
293 Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
294 }
295
296 #[cfg(target_os = "openbsd")]
297 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
298 let mut current_stack: libc::stack_t = crate::mem::zeroed();
299 assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
300
301 let stack_ptr = current_stack.ss_sp;
302 let stackaddr = if libc::pthread_main_np() == 1 {
303 // main thread
304 stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
305 } else {
306 // new thread
307 stack_ptr.addr() - current_stack.ss_size
308 };
309 Some(stack_ptr.with_addr(stackaddr))
310 }
311
312 #[cfg(any(
313 target_os = "android",
314 target_os = "freebsd",
315 target_os = "netbsd",
316 target_os = "hurd",
317 target_os = "linux",
318 target_os = "l4re"
319 ))]
320 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
321 let mut ret = None;
322 let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
323 if !cfg!(target_os = "freebsd") {
324 attr = mem::MaybeUninit::zeroed();
325 }
326 #[cfg(target_os = "freebsd")]
327 assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
328 #[cfg(target_os = "freebsd")]
329 let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
330 #[cfg(not(target_os = "freebsd"))]
331 let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
332 if e == 0 {
333 let mut stackaddr = crate::ptr::null_mut();
334 let mut stacksize = 0;
335 assert_eq!(
336 libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr, &mut stacksize),
337 0
338 );
339 ret = Some(stackaddr);
340 }
341 if e == 0 || cfg!(target_os = "freebsd") {
342 assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
343 }
344 ret
345 }
346
347 fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
348 let stackptr = unsafe { get_stack_start()? };
349 let stackaddr = stackptr.addr();
350
351 // Ensure stackaddr is page aligned! A parent process might
352 // have reset RLIMIT_STACK to be non-page aligned. The
353 // pthread_attr_getstack() reports the usable stack area
354 // stackaddr < stackaddr + stacksize, so if stackaddr is not
355 // page-aligned, calculate the fix such that stackaddr <
356 // new_page_aligned_stackaddr < stackaddr + stacksize
357 let remainder = stackaddr % page_size;
358 Some(if remainder == 0 {
359 stackptr
360 } else {
361 stackptr.with_addr(stackaddr + page_size - remainder)
362 })
363 }
364
365 #[forbid(unsafe_op_in_unsafe_fn)]
366 unsafe fn install_main_guard() -> Option<Range<usize>> {
367 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
368
369 unsafe {
370 // this way someone on any unix-y OS can check that all these compile
371 if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
372 install_main_guard_linux(page_size)
373 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
374 install_main_guard_linux_musl(page_size)
375 } else if cfg!(target_os = "freebsd") {
376 install_main_guard_freebsd(page_size)
377 } else if cfg!(any(target_os = "netbsd", target_os = "openbsd")) {
378 install_main_guard_bsds(page_size)
379 } else {
380 install_main_guard_default(page_size)
381 }
382 }
383 }
384
385 #[forbid(unsafe_op_in_unsafe_fn)]
386 unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
387 // Linux doesn't allocate the whole stack right away, and
388 // the kernel has its own stack-guard mechanism to fault
389 // when growing too close to an existing mapping. If we map
390 // our own guard, then the kernel starts enforcing a rather
391 // large gap above that, rendering much of the possible
392 // stack space useless. See #43052.
393 //
394 // Instead, we'll just note where we expect rlimit to start
395 // faulting, so our handler can report "stack overflow", and
396 // trust that the kernel's own stack guard will work.
397 let stackptr = stack_start_aligned(page_size)?;
398 let stackaddr = stackptr.addr();
399 Some(stackaddr - page_size..stackaddr)
400 }
401
402 #[forbid(unsafe_op_in_unsafe_fn)]
403 unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
404 // For the main thread, the musl's pthread_attr_getstack
405 // returns the current stack size, rather than maximum size
406 // it can eventually grow to. It cannot be used to determine
407 // the position of kernel's stack guard.
408 None
409 }
410
411 #[forbid(unsafe_op_in_unsafe_fn)]
412 unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
413 // FreeBSD's stack autogrows, and optionally includes a guard page
414 // at the bottom. If we try to remap the bottom of the stack
415 // ourselves, FreeBSD's guard page moves upwards. So we'll just use
416 // the builtin guard page.
417 let stackptr = stack_start_aligned(page_size)?;
418 let guardaddr = stackptr.addr();
419 // Technically the number of guard pages is tunable and controlled
420 // by the security.bsd.stack_guard_page sysctl.
421 // By default it is 1, checking once is enough since it is
422 // a boot time config value.
423 static PAGES: OnceLock<usize> = OnceLock::new();
424
425 let pages = PAGES.get_or_init(|| {
426 use crate::sys::weak::dlsym;
427 dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int);
428 let mut guard: usize = 0;
429 let mut size = size_of_val(&guard);
430 let oid = c"security.bsd.stack_guard_page";
431 match sysctlbyname.get() {
432 Some(fcn) if unsafe {
433 fcn(oid.as_ptr(),
434 (&raw mut guard).cast(),
435 &raw mut size,
436 ptr::null_mut(),
437 0) == 0
438 } => guard,
439 _ => 1,
440 }
441 });
442 Some(guardaddr..guardaddr + pages * page_size)
443 }
444
445 #[forbid(unsafe_op_in_unsafe_fn)]
446 unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
447 // OpenBSD stack already includes a guard page, and stack is
448 // immutable.
449 // NetBSD stack includes the guard page.
450 //
451 // We'll just note where we expect rlimit to start
452 // faulting, so our handler can report "stack overflow", and
453 // trust that the kernel's own stack guard will work.
454 let stackptr = stack_start_aligned(page_size)?;
455 let stackaddr = stackptr.addr();
456 Some(stackaddr - page_size..stackaddr)
457 }
458
459 #[forbid(unsafe_op_in_unsafe_fn)]
460 unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
461 // Reallocate the last page of the stack.
462 // This ensures SIGBUS will be raised on
463 // stack overflow.
464 // Systems which enforce strict PAX MPROTECT do not allow
465 // to mprotect() a mapping with less restrictive permissions
466 // than the initial mmap() used, so we mmap() here with
467 // read/write permissions and only then mprotect() it to
468 // no permissions at all. See issue #50313.
469 let stackptr = stack_start_aligned(page_size)?;
470 let result = unsafe {
471 mmap64(
472 stackptr,
473 page_size,
474 PROT_READ | PROT_WRITE,
475 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
476 -1,
477 0,
478 )
479 };
480 if result != stackptr || result == MAP_FAILED {
481 panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
482 }
483
484 let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
485 if result != 0 {
486 panic!("failed to protect the guard page: {}", io::Error::last_os_error());
487 }
488
489 let guardaddr = stackptr.addr();
490
491 Some(guardaddr..guardaddr + page_size)
492 }
493
494 #[cfg(any(
495 target_os = "macos",
496 target_os = "openbsd",
497 target_os = "solaris",
498 target_os = "illumos",
499 ))]
500 // FIXME: I am probably not unsafe.
501 unsafe fn current_guard() -> Option<Range<usize>> {
502 let stackptr = get_stack_start()?;
503 let stackaddr = stackptr.addr();
504 Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
505 }
506
507 #[cfg(any(
508 target_os = "android",
509 target_os = "freebsd",
510 target_os = "hurd",
511 target_os = "linux",
512 target_os = "netbsd",
513 target_os = "l4re"
514 ))]
515 // FIXME: I am probably not unsafe.
516 unsafe fn current_guard() -> Option<Range<usize>> {
517 let mut ret = None;
518
519 let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
520 if !cfg!(target_os = "freebsd") {
521 attr = mem::MaybeUninit::zeroed();
522 }
523 #[cfg(target_os = "freebsd")]
524 assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
525 #[cfg(target_os = "freebsd")]
526 let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
527 #[cfg(not(target_os = "freebsd"))]
528 let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
529 if e == 0 {
530 let mut guardsize = 0;
531 assert_eq!(libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), 0);
532 if guardsize == 0 {
533 if cfg!(all(target_os = "linux", target_env = "musl")) {
534 // musl versions before 1.1.19 always reported guard
535 // size obtained from pthread_attr_get_np as zero.
536 // Use page size as a fallback.
537 guardsize = PAGE_SIZE.load(Ordering::Relaxed);
538 } else {
539 panic!("there is no guard page");
540 }
541 }
542 let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
543 let mut size = 0;
544 assert_eq!(libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size), 0);
545
546 let stackaddr = stackptr.addr();
547 ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
548 Some(stackaddr - guardsize..stackaddr)
549 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
550 Some(stackaddr - guardsize..stackaddr)
551 } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
552 {
553 // glibc used to include the guard area within the stack, as noted in the BUGS
554 // section of `man pthread_attr_getguardsize`. This has been corrected starting
555 // with glibc 2.27, and in some distro backports, so the guard is now placed at the
556 // end (below) the stack. There's no easy way for us to know which we have at
557 // runtime, so we'll just match any fault in the range right above or below the
558 // stack base to call that fault a stack overflow.
559 Some(stackaddr - guardsize..stackaddr + guardsize)
560 } else {
561 Some(stackaddr..stackaddr + guardsize)
562 };
563 }
564 if e == 0 || cfg!(target_os = "freebsd") {
565 assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
566 }
567 ret
568 }
569}
570
571// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
572// several symbols that might lead to rejections from the App Store, namely
573// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
574//
575// This might be overly cautious, though it is also what Swift does (and they
576// usually have fewer qualms about forwards compatibility, since the runtime
577// is shipped with the OS):
578// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
579#[cfg(not(any(
580 target_os = "linux",
581 target_os = "freebsd",
582 target_os = "hurd",
583 target_os = "macos",
584 target_os = "netbsd",
585 target_os = "openbsd",
586 target_os = "solaris",
587 target_os = "illumos",
588 target_os = "cygwin",
589)))]
590mod imp {
591 pub unsafe fn init() {}
592
593 pub unsafe fn cleanup() {}
594
595 pub unsafe fn make_handler(_main_thread: bool) -> super::Handler {
596 super::Handler::null()
597 }
598
599 pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
600}
601
602#[cfg(target_os = "cygwin")]
603mod imp {
604 mod c {
605 pub type PVECTORED_EXCEPTION_HANDLER =
606 Option<unsafe extern "system" fn(exceptioninfo: *mut EXCEPTION_POINTERS) -> i32>;
607 pub type NTSTATUS = i32;
608 pub type BOOL = i32;
609
610 unsafe extern "system" {
611 pub fn AddVectoredExceptionHandler(
612 first: u32,
613 handler: PVECTORED_EXCEPTION_HANDLER,
614 ) -> *mut core::ffi::c_void;
615 pub fn SetThreadStackGuarantee(stacksizeinbytes: *mut u32) -> BOOL;
616 }
617
618 pub const EXCEPTION_STACK_OVERFLOW: NTSTATUS = 0xC00000FD_u32 as _;
619 pub const EXCEPTION_CONTINUE_SEARCH: i32 = 1i32;
620
621 #[repr(C)]
622 #[derive(Clone, Copy)]
623 pub struct EXCEPTION_POINTERS {
624 pub ExceptionRecord: *mut EXCEPTION_RECORD,
625 // We don't need this field here
626 // pub Context: *mut CONTEXT,
627 }
628 #[repr(C)]
629 #[derive(Clone, Copy)]
630 pub struct EXCEPTION_RECORD {
631 pub ExceptionCode: NTSTATUS,
632 pub ExceptionFlags: u32,
633 pub ExceptionRecord: *mut EXCEPTION_RECORD,
634 pub ExceptionAddress: *mut core::ffi::c_void,
635 pub NumberParameters: u32,
636 pub ExceptionInformation: [usize; 15],
637 }
638 }
639
640 /// Reserve stack space for use in stack overflow exceptions.
641 fn reserve_stack() {
642 let result = unsafe { c::SetThreadStackGuarantee(&mut 0x5000) };
643 // Reserving stack space is not critical so we allow it to fail in the released build of libstd.
644 // We still use debug assert here so that CI will test that we haven't made a mistake calling the function.
645 debug_assert_ne!(result, 0, "failed to reserve stack space for exception handling");
646 }
647
648 unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> i32 {
649 // SAFETY: It's up to the caller (which in this case is the OS) to ensure that `ExceptionInfo` is valid.
650 unsafe {
651 let rec = &(*(*ExceptionInfo).ExceptionRecord);
652 let code = rec.ExceptionCode;
653
654 if code == c::EXCEPTION_STACK_OVERFLOW {
655 crate::thread::with_current_name(|name| {
656 let name = name.unwrap_or("<unknown>");
657 rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
658 });
659 }
660 c::EXCEPTION_CONTINUE_SEARCH
661 }
662 }
663
664 pub unsafe fn init() {
665 // SAFETY: `vectored_handler` has the correct ABI and is safe to call during exception handling.
666 unsafe {
667 let result = c::AddVectoredExceptionHandler(0, Some(vectored_handler));
668 // Similar to the above, adding the stack overflow handler is allowed to fail
669 // but a debug assert is used so CI will still test that it normally works.
670 debug_assert!(!result.is_null(), "failed to install exception handler");
671 }
672 // Set the thread stack guarantee for the main thread.
673 reserve_stack();
674 }
675
676 pub unsafe fn cleanup() {}
677
678 pub unsafe fn make_handler(main_thread: bool) -> super::Handler {
679 if !main_thread {
680 reserve_stack();
681 }
682 super::Handler::null()
683 }
684
685 pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
686}
687

Provided by KDAB

Privacy Policy