1#![cfg_attr(test, allow(dead_code))]
2
3pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
5
6pub struct Handler {
7 data: *mut libc::c_void,
8}
9
10impl Handler {
11 pub unsafe fn new() -> Handler {
12 make_handler(_main_thread:false)
13 }
14
15 fn null() -> Handler {
16 Handler { data: crate::ptr::null_mut() }
17 }
18}
19
20impl Drop for Handler {
21 fn drop(&mut self) {
22 unsafe {
23 drop_handler(self.data);
24 }
25 }
26}
27
28#[cfg(all(
29 not(miri),
30 any(
31 target_os = "linux",
32 target_os = "freebsd",
33 target_os = "hurd",
34 target_os = "macos",
35 target_os = "netbsd",
36 target_os = "openbsd",
37 target_os = "solaris",
38 target_os = "illumos",
39 ),
40))]
41mod thread_info;
42
43// miri doesn't model signals nor stack overflows and this code has some
44// synchronization properties that we don't want to expose to user code,
45// hence we disable it on miri.
46#[cfg(all(
47 not(miri),
48 any(
49 target_os = "linux",
50 target_os = "freebsd",
51 target_os = "hurd",
52 target_os = "macos",
53 target_os = "netbsd",
54 target_os = "openbsd",
55 target_os = "solaris",
56 target_os = "illumos",
57 )
58))]
59mod imp {
60 use libc::{
61 MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
62 SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
63 };
64 #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
65 use libc::{mmap as mmap64, mprotect, munmap};
66 #[cfg(all(target_os = "linux", target_env = "gnu"))]
67 use libc::{mmap64, mprotect, munmap};
68
69 use super::Handler;
70 use super::thread_info::{delete_current_info, set_current_info, with_current_info};
71 use crate::ops::Range;
72 use crate::sync::OnceLock;
73 use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
74 use crate::sys::pal::unix::os;
75 use crate::thread::with_current_name;
76 use crate::{io, mem, panic, ptr};
77
78 // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
79 // (unmapped pages) at the end of every thread's stack, so if a thread ends
80 // up running into the guard page it'll trigger this handler. We want to
81 // detect these cases and print out a helpful error saying that the stack
82 // has overflowed. All other signals, however, should go back to what they
83 // were originally supposed to do.
84 //
85 // This handler currently exists purely to print an informative message
86 // whenever a thread overflows its stack. We then abort to exit and
87 // indicate a crash, but to avoid a misleading SIGSEGV that might lead
88 // users to believe that unsafe code has accessed an invalid pointer; the
89 // SIGSEGV encountered when overflowing the stack is expected and
90 // well-defined.
91 //
92 // If this is not a stack overflow, the handler un-registers itself and
93 // then returns (to allow the original signal to be delivered again).
94 // Returning from this kind of signal handler is technically not defined
95 // to work when reading the POSIX spec strictly, but in practice it turns
96 // out many large systems and all implementations allow returning from a
97 // signal handler to work. For a more detailed explanation see the
98 // comments on #26458.
99 /// SIGSEGV/SIGBUS entry point
100 /// # Safety
101 /// Rust doesn't call this, it *gets called*.
102 #[forbid(unsafe_op_in_unsafe_fn)]
103 unsafe extern "C" fn signal_handler(
104 signum: libc::c_int,
105 info: *mut libc::siginfo_t,
106 _data: *mut libc::c_void,
107 ) {
108 // SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
109 let fault_addr = unsafe { (*info).si_addr().addr() };
110
111 // `with_current_info` expects that the process aborts after it is
112 // called. If the signal was not caused by a memory access, this might
113 // not be true. We detect this by noticing that the `si_addr` field is
114 // zero if the signal is synthetic.
115 if fault_addr != 0 {
116 with_current_info(|thread_info| {
117 // If the faulting address is within the guard page, then we print a
118 // message saying so and abort.
119 if let Some(thread_info) = thread_info
120 && thread_info.guard_page_range.contains(&fault_addr)
121 {
122 let name = thread_info.thread_name.as_deref().unwrap_or("<unknown>");
123 rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
124 rtabort!("stack overflow");
125 }
126 })
127 }
128
129 // Unregister ourselves by reverting back to the default behavior.
130 // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
131 let mut action: sigaction = unsafe { mem::zeroed() };
132 action.sa_sigaction = SIG_DFL;
133 // SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
134 unsafe { sigaction(signum, &action, ptr::null_mut()) };
135
136 // See comment above for why this function returns.
137 }
138
139 static PAGE_SIZE: Atomic<usize> = AtomicUsize::new(0);
140 static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut());
141 static NEED_ALTSTACK: Atomic<bool> = AtomicBool::new(false);
142
143 /// # Safety
144 /// Must be called only once
145 #[forbid(unsafe_op_in_unsafe_fn)]
146 pub unsafe fn init() {
147 PAGE_SIZE.store(os::page_size(), Ordering::Relaxed);
148
149 let mut guard_page_range = unsafe { install_main_guard() };
150
151 // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
152 let mut action: sigaction = unsafe { mem::zeroed() };
153 for &signal in &[SIGSEGV, SIGBUS] {
154 // SAFETY: just fetches the current signal handler into action
155 unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
156 // Configure our signal handler if one is not already set.
157 if action.sa_sigaction == SIG_DFL {
158 if !NEED_ALTSTACK.load(Ordering::Relaxed) {
159 // haven't set up our sigaltstack yet
160 NEED_ALTSTACK.store(true, Ordering::Release);
161 let handler = unsafe { make_handler(true) };
162 MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
163 mem::forget(handler);
164
165 if let Some(guard_page_range) = guard_page_range.take() {
166 let thread_name = with_current_name(|name| name.map(Box::from));
167 set_current_info(guard_page_range, thread_name);
168 }
169 }
170
171 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
172 action.sa_sigaction = signal_handler as sighandler_t;
173 // SAFETY: only overriding signals if the default is set
174 unsafe { sigaction(signal, &action, ptr::null_mut()) };
175 }
176 }
177 }
178
179 /// # Safety
180 /// Must be called only once
181 #[forbid(unsafe_op_in_unsafe_fn)]
182 pub unsafe fn cleanup() {
183 // FIXME: I probably cause more bugs than I'm worth!
184 // see https://github.com/rust-lang/rust/issues/111272
185 unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
186 }
187
188 unsafe fn get_stack() -> libc::stack_t {
189 // OpenBSD requires this flag for stack mapping
190 // otherwise the said mapping will fail as a no-op on most systems
191 // and has a different meaning on FreeBSD
192 #[cfg(any(
193 target_os = "openbsd",
194 target_os = "netbsd",
195 target_os = "linux",
196 target_os = "dragonfly",
197 ))]
198 let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
199 #[cfg(not(any(
200 target_os = "openbsd",
201 target_os = "netbsd",
202 target_os = "linux",
203 target_os = "dragonfly",
204 )))]
205 let flags = MAP_PRIVATE | MAP_ANON;
206
207 let sigstack_size = sigstack_size();
208 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
209
210 let stackp = mmap64(
211 ptr::null_mut(),
212 sigstack_size + page_size,
213 PROT_READ | PROT_WRITE,
214 flags,
215 -1,
216 0,
217 );
218 if stackp == MAP_FAILED {
219 panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
220 }
221 let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
222 if guard_result != 0 {
223 panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
224 }
225 let stackp = stackp.add(page_size);
226
227 libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
228 }
229
230 /// # Safety
231 /// Mutates the alternate signal stack
232 #[forbid(unsafe_op_in_unsafe_fn)]
233 pub unsafe fn make_handler(main_thread: bool) -> Handler {
234 if !NEED_ALTSTACK.load(Ordering::Acquire) {
235 return Handler::null();
236 }
237
238 if !main_thread {
239 if let Some(guard_page_range) = unsafe { current_guard() } {
240 let thread_name = with_current_name(|name| name.map(Box::from));
241 set_current_info(guard_page_range, thread_name);
242 }
243 }
244
245 // SAFETY: assuming stack_t is zero-initializable
246 let mut stack = unsafe { mem::zeroed() };
247 // SAFETY: reads current stack_t into stack
248 unsafe { sigaltstack(ptr::null(), &mut stack) };
249 // Configure alternate signal stack, if one is not already set.
250 if stack.ss_flags & SS_DISABLE != 0 {
251 // SAFETY: We warned our caller this would happen!
252 unsafe {
253 stack = get_stack();
254 sigaltstack(&stack, ptr::null_mut());
255 }
256 Handler { data: stack.ss_sp as *mut libc::c_void }
257 } else {
258 Handler::null()
259 }
260 }
261
262 /// # Safety
263 /// Must be called
264 /// - only with our handler or nullptr
265 /// - only when done with our altstack
266 /// This disables the alternate signal stack!
267 #[forbid(unsafe_op_in_unsafe_fn)]
268 pub unsafe fn drop_handler(data: *mut libc::c_void) {
269 if !data.is_null() {
270 let sigstack_size = sigstack_size();
271 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
272 let disabling_stack = libc::stack_t {
273 ss_sp: ptr::null_mut(),
274 ss_flags: SS_DISABLE,
275 // Workaround for bug in macOS implementation of sigaltstack
276 // UNIX2003 which returns ENOMEM when disabling a stack while
277 // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
278 // both ss_sp and ss_size should be ignored in this case.
279 ss_size: sigstack_size,
280 };
281 // SAFETY: we warned the caller this disables the alternate signal stack!
282 unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
283 // SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
284 // a mapping that started one page earlier, so walk back a page and unmap from there.
285 unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
286 }
287
288 delete_current_info();
289 }
290
291 /// Modern kernels on modern hardware can have dynamic signal stack sizes.
292 #[cfg(any(target_os = "linux", target_os = "android"))]
293 fn sigstack_size() -> usize {
294 let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
295 // If getauxval couldn't find the entry, it returns 0,
296 // so take the higher of the "constant" and auxval.
297 // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
298 libc::SIGSTKSZ.max(dynamic_sigstksz as _)
299 }
300
301 /// Not all OS support hardware where this is needed.
302 #[cfg(not(any(target_os = "linux", target_os = "android")))]
303 fn sigstack_size() -> usize {
304 libc::SIGSTKSZ
305 }
306
307 #[cfg(any(target_os = "solaris", target_os = "illumos"))]
308 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
309 let mut current_stack: libc::stack_t = crate::mem::zeroed();
310 assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
311 Some(current_stack.ss_sp)
312 }
313
314 #[cfg(target_os = "macos")]
315 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
316 let th = libc::pthread_self();
317 let stackptr = libc::pthread_get_stackaddr_np(th);
318 Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
319 }
320
321 #[cfg(target_os = "openbsd")]
322 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
323 let mut current_stack: libc::stack_t = crate::mem::zeroed();
324 assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
325
326 let stack_ptr = current_stack.ss_sp;
327 let stackaddr = if libc::pthread_main_np() == 1 {
328 // main thread
329 stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
330 } else {
331 // new thread
332 stack_ptr.addr() - current_stack.ss_size
333 };
334 Some(stack_ptr.with_addr(stackaddr))
335 }
336
337 #[cfg(any(
338 target_os = "android",
339 target_os = "freebsd",
340 target_os = "netbsd",
341 target_os = "hurd",
342 target_os = "linux",
343 target_os = "l4re"
344 ))]
345 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
346 let mut ret = None;
347 let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
348 if !cfg!(target_os = "freebsd") {
349 attr = mem::MaybeUninit::zeroed();
350 }
351 #[cfg(target_os = "freebsd")]
352 assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
353 #[cfg(target_os = "freebsd")]
354 let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
355 #[cfg(not(target_os = "freebsd"))]
356 let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
357 if e == 0 {
358 let mut stackaddr = crate::ptr::null_mut();
359 let mut stacksize = 0;
360 assert_eq!(
361 libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr, &mut stacksize),
362 0
363 );
364 ret = Some(stackaddr);
365 }
366 if e == 0 || cfg!(target_os = "freebsd") {
367 assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
368 }
369 ret
370 }
371
372 fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
373 let stackptr = unsafe { get_stack_start()? };
374 let stackaddr = stackptr.addr();
375
376 // Ensure stackaddr is page aligned! A parent process might
377 // have reset RLIMIT_STACK to be non-page aligned. The
378 // pthread_attr_getstack() reports the usable stack area
379 // stackaddr < stackaddr + stacksize, so if stackaddr is not
380 // page-aligned, calculate the fix such that stackaddr <
381 // new_page_aligned_stackaddr < stackaddr + stacksize
382 let remainder = stackaddr % page_size;
383 Some(if remainder == 0 {
384 stackptr
385 } else {
386 stackptr.with_addr(stackaddr + page_size - remainder)
387 })
388 }
389
390 #[forbid(unsafe_op_in_unsafe_fn)]
391 unsafe fn install_main_guard() -> Option<Range<usize>> {
392 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
393
394 unsafe {
395 // this way someone on any unix-y OS can check that all these compile
396 if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
397 install_main_guard_linux(page_size)
398 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
399 install_main_guard_linux_musl(page_size)
400 } else if cfg!(target_os = "freebsd") {
401 install_main_guard_freebsd(page_size)
402 } else if cfg!(any(target_os = "netbsd", target_os = "openbsd")) {
403 install_main_guard_bsds(page_size)
404 } else {
405 install_main_guard_default(page_size)
406 }
407 }
408 }
409
410 #[forbid(unsafe_op_in_unsafe_fn)]
411 unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
412 // Linux doesn't allocate the whole stack right away, and
413 // the kernel has its own stack-guard mechanism to fault
414 // when growing too close to an existing mapping. If we map
415 // our own guard, then the kernel starts enforcing a rather
416 // large gap above that, rendering much of the possible
417 // stack space useless. See #43052.
418 //
419 // Instead, we'll just note where we expect rlimit to start
420 // faulting, so our handler can report "stack overflow", and
421 // trust that the kernel's own stack guard will work.
422 let stackptr = stack_start_aligned(page_size)?;
423 let stackaddr = stackptr.addr();
424 Some(stackaddr - page_size..stackaddr)
425 }
426
427 #[forbid(unsafe_op_in_unsafe_fn)]
428 unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
429 // For the main thread, the musl's pthread_attr_getstack
430 // returns the current stack size, rather than maximum size
431 // it can eventually grow to. It cannot be used to determine
432 // the position of kernel's stack guard.
433 None
434 }
435
436 #[forbid(unsafe_op_in_unsafe_fn)]
437 unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
438 // FreeBSD's stack autogrows, and optionally includes a guard page
439 // at the bottom. If we try to remap the bottom of the stack
440 // ourselves, FreeBSD's guard page moves upwards. So we'll just use
441 // the builtin guard page.
442 let stackptr = stack_start_aligned(page_size)?;
443 let guardaddr = stackptr.addr();
444 // Technically the number of guard pages is tunable and controlled
445 // by the security.bsd.stack_guard_page sysctl.
446 // By default it is 1, checking once is enough since it is
447 // a boot time config value.
448 static PAGES: OnceLock<usize> = OnceLock::new();
449
450 let pages = PAGES.get_or_init(|| {
451 use crate::sys::weak::dlsym;
452 dlsym!(
453 fn sysctlbyname(
454 name: *const libc::c_char,
455 oldp: *mut libc::c_void,
456 oldlenp: *mut libc::size_t,
457 newp: *const libc::c_void,
458 newlen: libc::size_t,
459 ) -> libc::c_int;
460 );
461 let mut guard: usize = 0;
462 let mut size = size_of_val(&guard);
463 let oid = c"security.bsd.stack_guard_page";
464 match sysctlbyname.get() {
465 Some(fcn)
466 if unsafe {
467 fcn(
468 oid.as_ptr(),
469 (&raw mut guard).cast(),
470 &raw mut size,
471 ptr::null_mut(),
472 0,
473 ) == 0
474 } =>
475 {
476 guard
477 }
478 _ => 1,
479 }
480 });
481 Some(guardaddr..guardaddr + pages * page_size)
482 }
483
484 #[forbid(unsafe_op_in_unsafe_fn)]
485 unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
486 // OpenBSD stack already includes a guard page, and stack is
487 // immutable.
488 // NetBSD stack includes the guard page.
489 //
490 // We'll just note where we expect rlimit to start
491 // faulting, so our handler can report "stack overflow", and
492 // trust that the kernel's own stack guard will work.
493 let stackptr = stack_start_aligned(page_size)?;
494 let stackaddr = stackptr.addr();
495 Some(stackaddr - page_size..stackaddr)
496 }
497
498 #[forbid(unsafe_op_in_unsafe_fn)]
499 unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
500 // Reallocate the last page of the stack.
501 // This ensures SIGBUS will be raised on
502 // stack overflow.
503 // Systems which enforce strict PAX MPROTECT do not allow
504 // to mprotect() a mapping with less restrictive permissions
505 // than the initial mmap() used, so we mmap() here with
506 // read/write permissions and only then mprotect() it to
507 // no permissions at all. See issue #50313.
508 let stackptr = stack_start_aligned(page_size)?;
509 let result = unsafe {
510 mmap64(
511 stackptr,
512 page_size,
513 PROT_READ | PROT_WRITE,
514 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
515 -1,
516 0,
517 )
518 };
519 if result != stackptr || result == MAP_FAILED {
520 panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
521 }
522
523 let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
524 if result != 0 {
525 panic!("failed to protect the guard page: {}", io::Error::last_os_error());
526 }
527
528 let guardaddr = stackptr.addr();
529
530 Some(guardaddr..guardaddr + page_size)
531 }
532
533 #[cfg(any(
534 target_os = "macos",
535 target_os = "openbsd",
536 target_os = "solaris",
537 target_os = "illumos",
538 ))]
539 // FIXME: I am probably not unsafe.
540 unsafe fn current_guard() -> Option<Range<usize>> {
541 let stackptr = get_stack_start()?;
542 let stackaddr = stackptr.addr();
543 Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
544 }
545
546 #[cfg(any(
547 target_os = "android",
548 target_os = "freebsd",
549 target_os = "hurd",
550 target_os = "linux",
551 target_os = "netbsd",
552 target_os = "l4re"
553 ))]
554 // FIXME: I am probably not unsafe.
555 unsafe fn current_guard() -> Option<Range<usize>> {
556 let mut ret = None;
557
558 let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
559 if !cfg!(target_os = "freebsd") {
560 attr = mem::MaybeUninit::zeroed();
561 }
562 #[cfg(target_os = "freebsd")]
563 assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
564 #[cfg(target_os = "freebsd")]
565 let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
566 #[cfg(not(target_os = "freebsd"))]
567 let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
568 if e == 0 {
569 let mut guardsize = 0;
570 assert_eq!(libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), 0);
571 if guardsize == 0 {
572 if cfg!(all(target_os = "linux", target_env = "musl")) {
573 // musl versions before 1.1.19 always reported guard
574 // size obtained from pthread_attr_get_np as zero.
575 // Use page size as a fallback.
576 guardsize = PAGE_SIZE.load(Ordering::Relaxed);
577 } else {
578 panic!("there is no guard page");
579 }
580 }
581 let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
582 let mut size = 0;
583 assert_eq!(libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size), 0);
584
585 let stackaddr = stackptr.addr();
586 ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
587 Some(stackaddr - guardsize..stackaddr)
588 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
589 Some(stackaddr - guardsize..stackaddr)
590 } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
591 {
592 // glibc used to include the guard area within the stack, as noted in the BUGS
593 // section of `man pthread_attr_getguardsize`. This has been corrected starting
594 // with glibc 2.27, and in some distro backports, so the guard is now placed at the
595 // end (below) the stack. There's no easy way for us to know which we have at
596 // runtime, so we'll just match any fault in the range right above or below the
597 // stack base to call that fault a stack overflow.
598 Some(stackaddr - guardsize..stackaddr + guardsize)
599 } else {
600 Some(stackaddr..stackaddr + guardsize)
601 };
602 }
603 if e == 0 || cfg!(target_os = "freebsd") {
604 assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
605 }
606 ret
607 }
608}
609
610// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
611// several symbols that might lead to rejections from the App Store, namely
612// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
613//
614// This might be overly cautious, though it is also what Swift does (and they
615// usually have fewer qualms about forwards compatibility, since the runtime
616// is shipped with the OS):
617// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
618#[cfg(any(
619 miri,
620 not(any(
621 target_os = "linux",
622 target_os = "freebsd",
623 target_os = "hurd",
624 target_os = "macos",
625 target_os = "netbsd",
626 target_os = "openbsd",
627 target_os = "solaris",
628 target_os = "illumos",
629 target_os = "cygwin",
630 ))
631))]
632mod imp {
633 pub unsafe fn init() {}
634
635 pub unsafe fn cleanup() {}
636
637 pub unsafe fn make_handler(_main_thread: bool) -> super::Handler {
638 super::Handler::null()
639 }
640
641 pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
642}
643
644#[cfg(target_os = "cygwin")]
645mod imp {
646 mod c {
647 pub type PVECTORED_EXCEPTION_HANDLER =
648 Option<unsafe extern "system" fn(exceptioninfo: *mut EXCEPTION_POINTERS) -> i32>;
649 pub type NTSTATUS = i32;
650 pub type BOOL = i32;
651
652 unsafe extern "system" {
653 pub fn AddVectoredExceptionHandler(
654 first: u32,
655 handler: PVECTORED_EXCEPTION_HANDLER,
656 ) -> *mut core::ffi::c_void;
657 pub fn SetThreadStackGuarantee(stacksizeinbytes: *mut u32) -> BOOL;
658 }
659
660 pub const EXCEPTION_STACK_OVERFLOW: NTSTATUS = 0xC00000FD_u32 as _;
661 pub const EXCEPTION_CONTINUE_SEARCH: i32 = 1i32;
662
663 #[repr(C)]
664 #[derive(Clone, Copy)]
665 pub struct EXCEPTION_POINTERS {
666 pub ExceptionRecord: *mut EXCEPTION_RECORD,
667 // We don't need this field here
668 // pub Context: *mut CONTEXT,
669 }
670 #[repr(C)]
671 #[derive(Clone, Copy)]
672 pub struct EXCEPTION_RECORD {
673 pub ExceptionCode: NTSTATUS,
674 pub ExceptionFlags: u32,
675 pub ExceptionRecord: *mut EXCEPTION_RECORD,
676 pub ExceptionAddress: *mut core::ffi::c_void,
677 pub NumberParameters: u32,
678 pub ExceptionInformation: [usize; 15],
679 }
680 }
681
682 /// Reserve stack space for use in stack overflow exceptions.
683 fn reserve_stack() {
684 let result = unsafe { c::SetThreadStackGuarantee(&mut 0x5000) };
685 // Reserving stack space is not critical so we allow it to fail in the released build of libstd.
686 // We still use debug assert here so that CI will test that we haven't made a mistake calling the function.
687 debug_assert_ne!(result, 0, "failed to reserve stack space for exception handling");
688 }
689
690 unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> i32 {
691 // SAFETY: It's up to the caller (which in this case is the OS) to ensure that `ExceptionInfo` is valid.
692 unsafe {
693 let rec = &(*(*ExceptionInfo).ExceptionRecord);
694 let code = rec.ExceptionCode;
695
696 if code == c::EXCEPTION_STACK_OVERFLOW {
697 crate::thread::with_current_name(|name| {
698 let name = name.unwrap_or("<unknown>");
699 rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
700 });
701 }
702 c::EXCEPTION_CONTINUE_SEARCH
703 }
704 }
705
706 pub unsafe fn init() {
707 // SAFETY: `vectored_handler` has the correct ABI and is safe to call during exception handling.
708 unsafe {
709 let result = c::AddVectoredExceptionHandler(0, Some(vectored_handler));
710 // Similar to the above, adding the stack overflow handler is allowed to fail
711 // but a debug assert is used so CI will still test that it normally works.
712 debug_assert!(!result.is_null(), "failed to install exception handler");
713 }
714 // Set the thread stack guarantee for the main thread.
715 reserve_stack();
716 }
717
718 pub unsafe fn cleanup() {}
719
720 pub unsafe fn make_handler(main_thread: bool) -> super::Handler {
721 if !main_thread {
722 reserve_stack();
723 }
724 super::Handler::null()
725 }
726
727 pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
728}
729