std/sys/pal/unix/
stack_overflow.rs

1#![cfg_attr(test, allow(dead_code))]
2
3pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
5
6pub struct Handler {
7    data: *mut libc::c_void,
8}
9
10impl Handler {
11    pub unsafe fn new(thread_name: Option<Box<str>>) -> Handler {
12        make_handler(false, thread_name)
13    }
14
15    fn null() -> Handler {
16        Handler { data: crate::ptr::null_mut() }
17    }
18}
19
20impl Drop for Handler {
21    fn drop(&mut self) {
22        unsafe {
23            drop_handler(self.data);
24        }
25    }
26}
27
28#[cfg(all(
29    not(miri),
30    any(
31        target_os = "linux",
32        target_os = "freebsd",
33        target_os = "hurd",
34        target_os = "macos",
35        target_os = "netbsd",
36        target_os = "openbsd",
37        target_os = "solaris",
38        target_os = "illumos",
39    ),
40))]
41mod thread_info;
42
43// miri doesn't model signals nor stack overflows and this code has some
44// synchronization properties that we don't want to expose to user code,
45// hence we disable it on miri.
46#[cfg(all(
47    not(miri),
48    any(
49        target_os = "linux",
50        target_os = "freebsd",
51        target_os = "hurd",
52        target_os = "macos",
53        target_os = "netbsd",
54        target_os = "openbsd",
55        target_os = "solaris",
56        target_os = "illumos",
57    )
58))]
59mod imp {
60    use libc::{
61        MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
62        SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
63    };
64    #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
65    use libc::{mmap as mmap64, mprotect, munmap};
66    #[cfg(all(target_os = "linux", target_env = "gnu"))]
67    use libc::{mmap64, mprotect, munmap};
68
69    use super::Handler;
70    use super::thread_info::{delete_current_info, set_current_info, with_current_info};
71    use crate::ops::Range;
72    use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
73    use crate::sys::pal::unix::os;
74    use crate::{io, mem, ptr};
75
76    // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
77    // (unmapped pages) at the end of every thread's stack, so if a thread ends
78    // up running into the guard page it'll trigger this handler. We want to
79    // detect these cases and print out a helpful error saying that the stack
80    // has overflowed. All other signals, however, should go back to what they
81    // were originally supposed to do.
82    //
83    // This handler currently exists purely to print an informative message
84    // whenever a thread overflows its stack. We then abort to exit and
85    // indicate a crash, but to avoid a misleading SIGSEGV that might lead
86    // users to believe that unsafe code has accessed an invalid pointer; the
87    // SIGSEGV encountered when overflowing the stack is expected and
88    // well-defined.
89    //
90    // If this is not a stack overflow, the handler un-registers itself and
91    // then returns (to allow the original signal to be delivered again).
92    // Returning from this kind of signal handler is technically not defined
93    // to work when reading the POSIX spec strictly, but in practice it turns
94    // out many large systems and all implementations allow returning from a
95    // signal handler to work. For a more detailed explanation see the
96    // comments on #26458.
97    /// SIGSEGV/SIGBUS entry point
98    /// # Safety
99    /// Rust doesn't call this, it *gets called*.
100    #[forbid(unsafe_op_in_unsafe_fn)]
101    unsafe extern "C" fn signal_handler(
102        signum: libc::c_int,
103        info: *mut libc::siginfo_t,
104        _data: *mut libc::c_void,
105    ) {
106        // SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
107        let fault_addr = unsafe { (*info).si_addr().addr() };
108
109        // `with_current_info` expects that the process aborts after it is
110        // called. If the signal was not caused by a memory access, this might
111        // not be true. We detect this by noticing that the `si_addr` field is
112        // zero if the signal is synthetic.
113        if fault_addr != 0 {
114            with_current_info(|thread_info| {
115                // If the faulting address is within the guard page, then we print a
116                // message saying so and abort.
117                if let Some(thread_info) = thread_info
118                    && thread_info.guard_page_range.contains(&fault_addr)
119                {
120                    let name = thread_info.thread_name.as_deref().unwrap_or("<unknown>");
121                    let tid = crate::thread::current_os_id();
122                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
123                    rtabort!("stack overflow");
124                }
125            })
126        }
127
128        // Unregister ourselves by reverting back to the default behavior.
129        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
130        let mut action: sigaction = unsafe { mem::zeroed() };
131        action.sa_sigaction = SIG_DFL;
132        // SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
133        unsafe { sigaction(signum, &action, ptr::null_mut()) };
134
135        // See comment above for why this function returns.
136    }
137
138    static PAGE_SIZE: Atomic<usize> = AtomicUsize::new(0);
139    static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut());
140    static NEED_ALTSTACK: Atomic<bool> = AtomicBool::new(false);
141
142    /// # Safety
143    /// Must be called only once
144    #[forbid(unsafe_op_in_unsafe_fn)]
145    pub unsafe fn init() {
146        PAGE_SIZE.store(os::page_size(), Ordering::Relaxed);
147
148        let mut guard_page_range = unsafe { install_main_guard() };
149
150        // Even for panic=immediate-abort, installing the guard pages is important for soundness.
151        // That said, we do not care about giving nice stackoverflow messages via our custom
152        // signal handler, just exit early and let the user enjoy the segfault.
153        if cfg!(panic = "immediate-abort") {
154            return;
155        }
156
157        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
158        let mut action: sigaction = unsafe { mem::zeroed() };
159        for &signal in &[SIGSEGV, SIGBUS] {
160            // SAFETY: just fetches the current signal handler into action
161            unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
162            // Configure our signal handler if one is not already set.
163            if action.sa_sigaction == SIG_DFL {
164                if !NEED_ALTSTACK.load(Ordering::Relaxed) {
165                    // haven't set up our sigaltstack yet
166                    NEED_ALTSTACK.store(true, Ordering::Release);
167                    let handler = unsafe { make_handler(true, None) };
168                    MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
169                    mem::forget(handler);
170
171                    if let Some(guard_page_range) = guard_page_range.take() {
172                        set_current_info(guard_page_range, Some(Box::from("main")));
173                    }
174                }
175
176                action.sa_flags = SA_SIGINFO | SA_ONSTACK;
177                action.sa_sigaction = signal_handler
178                    as unsafe extern "C" fn(i32, *mut libc::siginfo_t, *mut libc::c_void)
179                    as sighandler_t;
180                // SAFETY: only overriding signals if the default is set
181                unsafe { sigaction(signal, &action, ptr::null_mut()) };
182            }
183        }
184    }
185
186    /// # Safety
187    /// Must be called only once
188    #[forbid(unsafe_op_in_unsafe_fn)]
189    pub unsafe fn cleanup() {
190        if cfg!(panic = "immediate-abort") {
191            return;
192        }
193        // FIXME: I probably cause more bugs than I'm worth!
194        // see https://github.com/rust-lang/rust/issues/111272
195        unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
196    }
197
198    unsafe fn get_stack() -> libc::stack_t {
199        // OpenBSD requires this flag for stack mapping
200        // otherwise the said mapping will fail as a no-op on most systems
201        // and has a different meaning on FreeBSD
202        #[cfg(any(
203            target_os = "openbsd",
204            target_os = "netbsd",
205            target_os = "linux",
206            target_os = "dragonfly",
207        ))]
208        let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
209        #[cfg(not(any(
210            target_os = "openbsd",
211            target_os = "netbsd",
212            target_os = "linux",
213            target_os = "dragonfly",
214        )))]
215        let flags = MAP_PRIVATE | MAP_ANON;
216
217        let sigstack_size = sigstack_size();
218        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
219
220        let stackp = mmap64(
221            ptr::null_mut(),
222            sigstack_size + page_size,
223            PROT_READ | PROT_WRITE,
224            flags,
225            -1,
226            0,
227        );
228        if stackp == MAP_FAILED {
229            panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
230        }
231        let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
232        if guard_result != 0 {
233            panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
234        }
235        let stackp = stackp.add(page_size);
236
237        libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
238    }
239
240    /// # Safety
241    /// Mutates the alternate signal stack
242    #[forbid(unsafe_op_in_unsafe_fn)]
243    pub unsafe fn make_handler(main_thread: bool, thread_name: Option<Box<str>>) -> Handler {
244        if cfg!(panic = "immediate-abort") || !NEED_ALTSTACK.load(Ordering::Acquire) {
245            return Handler::null();
246        }
247
248        if !main_thread {
249            if let Some(guard_page_range) = unsafe { current_guard() } {
250                set_current_info(guard_page_range, thread_name);
251            }
252        }
253
254        // SAFETY: assuming stack_t is zero-initializable
255        let mut stack = unsafe { mem::zeroed() };
256        // SAFETY: reads current stack_t into stack
257        unsafe { sigaltstack(ptr::null(), &mut stack) };
258        // Configure alternate signal stack, if one is not already set.
259        if stack.ss_flags & SS_DISABLE != 0 {
260            // SAFETY: We warned our caller this would happen!
261            unsafe {
262                stack = get_stack();
263                sigaltstack(&stack, ptr::null_mut());
264            }
265            Handler { data: stack.ss_sp as *mut libc::c_void }
266        } else {
267            Handler::null()
268        }
269    }
270
271    /// # Safety
272    /// Must be called
273    /// - only with our handler or nullptr
274    /// - only when done with our altstack
275    /// This disables the alternate signal stack!
276    #[forbid(unsafe_op_in_unsafe_fn)]
277    pub unsafe fn drop_handler(data: *mut libc::c_void) {
278        if !data.is_null() {
279            let sigstack_size = sigstack_size();
280            let page_size = PAGE_SIZE.load(Ordering::Relaxed);
281            let disabling_stack = libc::stack_t {
282                ss_sp: ptr::null_mut(),
283                ss_flags: SS_DISABLE,
284                // Workaround for bug in macOS implementation of sigaltstack
285                // UNIX2003 which returns ENOMEM when disabling a stack while
286                // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
287                // both ss_sp and ss_size should be ignored in this case.
288                ss_size: sigstack_size,
289            };
290            // SAFETY: we warned the caller this disables the alternate signal stack!
291            unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
292            // SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
293            // a mapping that started one page earlier, so walk back a page and unmap from there.
294            unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
295        }
296
297        delete_current_info();
298    }
299
300    /// Modern kernels on modern hardware can have dynamic signal stack sizes.
301    #[cfg(any(target_os = "linux", target_os = "android"))]
302    fn sigstack_size() -> usize {
303        let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
304        // If getauxval couldn't find the entry, it returns 0,
305        // so take the higher of the "constant" and auxval.
306        // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
307        libc::SIGSTKSZ.max(dynamic_sigstksz as _)
308    }
309
310    /// Not all OS support hardware where this is needed.
311    #[cfg(not(any(target_os = "linux", target_os = "android")))]
312    fn sigstack_size() -> usize {
313        libc::SIGSTKSZ
314    }
315
316    #[cfg(any(target_os = "solaris", target_os = "illumos"))]
317    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
318        let mut current_stack: libc::stack_t = crate::mem::zeroed();
319        assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
320        Some(current_stack.ss_sp)
321    }
322
323    #[cfg(target_os = "macos")]
324    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
325        let th = libc::pthread_self();
326        let stackptr = libc::pthread_get_stackaddr_np(th);
327        Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
328    }
329
330    #[cfg(target_os = "openbsd")]
331    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
332        let mut current_stack: libc::stack_t = crate::mem::zeroed();
333        assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
334
335        let stack_ptr = current_stack.ss_sp;
336        let stackaddr = if libc::pthread_main_np() == 1 {
337            // main thread
338            stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
339        } else {
340            // new thread
341            stack_ptr.addr() - current_stack.ss_size
342        };
343        Some(stack_ptr.with_addr(stackaddr))
344    }
345
346    #[cfg(any(
347        target_os = "android",
348        target_os = "freebsd",
349        target_os = "netbsd",
350        target_os = "hurd",
351        target_os = "linux",
352        target_os = "l4re"
353    ))]
354    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
355        let mut ret = None;
356        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
357        if !cfg!(target_os = "freebsd") {
358            attr = mem::MaybeUninit::zeroed();
359        }
360        #[cfg(target_os = "freebsd")]
361        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
362        #[cfg(target_os = "freebsd")]
363        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
364        #[cfg(not(target_os = "freebsd"))]
365        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
366        if e == 0 {
367            let mut stackaddr = crate::ptr::null_mut();
368            let mut stacksize = 0;
369            assert_eq!(
370                libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr, &mut stacksize),
371                0
372            );
373            ret = Some(stackaddr);
374        }
375        if e == 0 || cfg!(target_os = "freebsd") {
376            assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
377        }
378        ret
379    }
380
381    fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
382        let stackptr = unsafe { get_stack_start()? };
383        let stackaddr = stackptr.addr();
384
385        // Ensure stackaddr is page aligned! A parent process might
386        // have reset RLIMIT_STACK to be non-page aligned. The
387        // pthread_attr_getstack() reports the usable stack area
388        // stackaddr < stackaddr + stacksize, so if stackaddr is not
389        // page-aligned, calculate the fix such that stackaddr <
390        // new_page_aligned_stackaddr < stackaddr + stacksize
391        let remainder = stackaddr % page_size;
392        Some(if remainder == 0 {
393            stackptr
394        } else {
395            stackptr.with_addr(stackaddr + page_size - remainder)
396        })
397    }
398
399    #[forbid(unsafe_op_in_unsafe_fn)]
400    unsafe fn install_main_guard() -> Option<Range<usize>> {
401        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
402
403        unsafe {
404            // this way someone on any unix-y OS can check that all these compile
405            if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
406                install_main_guard_linux(page_size)
407            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
408                install_main_guard_linux_musl(page_size)
409            } else if cfg!(target_os = "freebsd") {
410                #[cfg(not(target_os = "freebsd"))]
411                return None;
412                // The FreeBSD code cannot be checked on non-BSDs.
413                #[cfg(target_os = "freebsd")]
414                install_main_guard_freebsd(page_size)
415            } else if cfg!(any(target_os = "netbsd", target_os = "openbsd")) {
416                install_main_guard_bsds(page_size)
417            } else {
418                install_main_guard_default(page_size)
419            }
420        }
421    }
422
423    #[forbid(unsafe_op_in_unsafe_fn)]
424    unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
425        // Linux doesn't allocate the whole stack right away, and
426        // the kernel has its own stack-guard mechanism to fault
427        // when growing too close to an existing mapping. If we map
428        // our own guard, then the kernel starts enforcing a rather
429        // large gap above that, rendering much of the possible
430        // stack space useless. See #43052.
431        //
432        // Instead, we'll just note where we expect rlimit to start
433        // faulting, so our handler can report "stack overflow", and
434        // trust that the kernel's own stack guard will work.
435        let stackptr = stack_start_aligned(page_size)?;
436        let stackaddr = stackptr.addr();
437        Some(stackaddr - page_size..stackaddr)
438    }
439
440    #[forbid(unsafe_op_in_unsafe_fn)]
441    unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
442        // For the main thread, the musl's pthread_attr_getstack
443        // returns the current stack size, rather than maximum size
444        // it can eventually grow to. It cannot be used to determine
445        // the position of kernel's stack guard.
446        None
447    }
448
449    #[forbid(unsafe_op_in_unsafe_fn)]
450    #[cfg(target_os = "freebsd")]
451    unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
452        // FreeBSD's stack autogrows, and optionally includes a guard page
453        // at the bottom. If we try to remap the bottom of the stack
454        // ourselves, FreeBSD's guard page moves upwards. So we'll just use
455        // the builtin guard page.
456        let stackptr = stack_start_aligned(page_size)?;
457        let guardaddr = stackptr.addr();
458        // Technically the number of guard pages is tunable and controlled
459        // by the security.bsd.stack_guard_page sysctl.
460        // By default it is 1, checking once is enough since it is
461        // a boot time config value.
462        static PAGES: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
463
464        let pages = PAGES.get_or_init(|| {
465            let mut guard: usize = 0;
466            let mut size = size_of_val(&guard);
467            let oid = c"security.bsd.stack_guard_page";
468
469            let r = unsafe {
470                libc::sysctlbyname(
471                    oid.as_ptr(),
472                    (&raw mut guard).cast(),
473                    &raw mut size,
474                    ptr::null_mut(),
475                    0,
476                )
477            };
478            if r == 0 { guard } else { 1 }
479        });
480        Some(guardaddr..guardaddr + pages * page_size)
481    }
482
483    #[forbid(unsafe_op_in_unsafe_fn)]
484    unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
485        // OpenBSD stack already includes a guard page, and stack is
486        // immutable.
487        // NetBSD stack includes the guard page.
488        //
489        // We'll just note where we expect rlimit to start
490        // faulting, so our handler can report "stack overflow", and
491        // trust that the kernel's own stack guard will work.
492        let stackptr = stack_start_aligned(page_size)?;
493        let stackaddr = stackptr.addr();
494        Some(stackaddr - page_size..stackaddr)
495    }
496
497    #[forbid(unsafe_op_in_unsafe_fn)]
498    unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
499        // Reallocate the last page of the stack.
500        // This ensures SIGBUS will be raised on
501        // stack overflow.
502        // Systems which enforce strict PAX MPROTECT do not allow
503        // to mprotect() a mapping with less restrictive permissions
504        // than the initial mmap() used, so we mmap() here with
505        // read/write permissions and only then mprotect() it to
506        // no permissions at all. See issue #50313.
507        let stackptr = stack_start_aligned(page_size)?;
508        let result = unsafe {
509            mmap64(
510                stackptr,
511                page_size,
512                PROT_READ | PROT_WRITE,
513                MAP_PRIVATE | MAP_ANON | MAP_FIXED,
514                -1,
515                0,
516            )
517        };
518        if result != stackptr || result == MAP_FAILED {
519            panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
520        }
521
522        let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
523        if result != 0 {
524            panic!("failed to protect the guard page: {}", io::Error::last_os_error());
525        }
526
527        let guardaddr = stackptr.addr();
528
529        Some(guardaddr..guardaddr + page_size)
530    }
531
532    #[cfg(any(
533        target_os = "macos",
534        target_os = "openbsd",
535        target_os = "solaris",
536        target_os = "illumos",
537    ))]
538    // FIXME: I am probably not unsafe.
539    unsafe fn current_guard() -> Option<Range<usize>> {
540        let stackptr = get_stack_start()?;
541        let stackaddr = stackptr.addr();
542        Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
543    }
544
545    #[cfg(any(
546        target_os = "android",
547        target_os = "freebsd",
548        target_os = "hurd",
549        target_os = "linux",
550        target_os = "netbsd",
551        target_os = "l4re"
552    ))]
553    // FIXME: I am probably not unsafe.
554    unsafe fn current_guard() -> Option<Range<usize>> {
555        let mut ret = None;
556
557        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
558        if !cfg!(target_os = "freebsd") {
559            attr = mem::MaybeUninit::zeroed();
560        }
561        #[cfg(target_os = "freebsd")]
562        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
563        #[cfg(target_os = "freebsd")]
564        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
565        #[cfg(not(target_os = "freebsd"))]
566        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
567        if e == 0 {
568            let mut guardsize = 0;
569            assert_eq!(libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), 0);
570            if guardsize == 0 {
571                if cfg!(all(target_os = "linux", target_env = "musl")) {
572                    // musl versions before 1.1.19 always reported guard
573                    // size obtained from pthread_attr_get_np as zero.
574                    // Use page size as a fallback.
575                    guardsize = PAGE_SIZE.load(Ordering::Relaxed);
576                } else {
577                    panic!("there is no guard page");
578                }
579            }
580            let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
581            let mut size = 0;
582            assert_eq!(libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size), 0);
583
584            let stackaddr = stackptr.addr();
585            ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
586                Some(stackaddr - guardsize..stackaddr)
587            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
588                Some(stackaddr - guardsize..stackaddr)
589            } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
590            {
591                // glibc used to include the guard area within the stack, as noted in the BUGS
592                // section of `man pthread_attr_getguardsize`. This has been corrected starting
593                // with glibc 2.27, and in some distro backports, so the guard is now placed at the
594                // end (below) the stack. There's no easy way for us to know which we have at
595                // runtime, so we'll just match any fault in the range right above or below the
596                // stack base to call that fault a stack overflow.
597                Some(stackaddr - guardsize..stackaddr + guardsize)
598            } else {
599                Some(stackaddr..stackaddr + guardsize)
600            };
601        }
602        if e == 0 || cfg!(target_os = "freebsd") {
603            assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
604        }
605        ret
606    }
607}
608
609// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
610// several symbols that might lead to rejections from the App Store, namely
611// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
612//
613// This might be overly cautious, though it is also what Swift does (and they
614// usually have fewer qualms about forwards compatibility, since the runtime
615// is shipped with the OS):
616// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
617#[cfg(any(
618    miri,
619    not(any(
620        target_os = "linux",
621        target_os = "freebsd",
622        target_os = "hurd",
623        target_os = "macos",
624        target_os = "netbsd",
625        target_os = "openbsd",
626        target_os = "solaris",
627        target_os = "illumos",
628        target_os = "cygwin",
629    ))
630))]
631mod imp {
632    pub unsafe fn init() {}
633
634    pub unsafe fn cleanup() {}
635
636    pub unsafe fn make_handler(
637        _main_thread: bool,
638        _thread_name: Option<Box<str>>,
639    ) -> super::Handler {
640        super::Handler::null()
641    }
642
643    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
644}
645
646#[cfg(target_os = "cygwin")]
647mod imp {
648    mod c {
649        pub type PVECTORED_EXCEPTION_HANDLER =
650            Option<unsafe extern "system" fn(exceptioninfo: *mut EXCEPTION_POINTERS) -> i32>;
651        pub type NTSTATUS = i32;
652        pub type BOOL = i32;
653
654        unsafe extern "system" {
655            pub fn AddVectoredExceptionHandler(
656                first: u32,
657                handler: PVECTORED_EXCEPTION_HANDLER,
658            ) -> *mut core::ffi::c_void;
659            pub fn SetThreadStackGuarantee(stacksizeinbytes: *mut u32) -> BOOL;
660        }
661
662        pub const EXCEPTION_STACK_OVERFLOW: NTSTATUS = 0xC00000FD_u32 as _;
663        pub const EXCEPTION_CONTINUE_SEARCH: i32 = 1i32;
664
665        #[repr(C)]
666        #[derive(Clone, Copy)]
667        pub struct EXCEPTION_POINTERS {
668            pub ExceptionRecord: *mut EXCEPTION_RECORD,
669            // We don't need this field here
670            // pub Context: *mut CONTEXT,
671        }
672        #[repr(C)]
673        #[derive(Clone, Copy)]
674        pub struct EXCEPTION_RECORD {
675            pub ExceptionCode: NTSTATUS,
676            pub ExceptionFlags: u32,
677            pub ExceptionRecord: *mut EXCEPTION_RECORD,
678            pub ExceptionAddress: *mut core::ffi::c_void,
679            pub NumberParameters: u32,
680            pub ExceptionInformation: [usize; 15],
681        }
682    }
683
684    /// Reserve stack space for use in stack overflow exceptions.
685    fn reserve_stack() {
686        let result = unsafe { c::SetThreadStackGuarantee(&mut 0x5000) };
687        // Reserving stack space is not critical so we allow it to fail in the released build of libstd.
688        // We still use debug assert here so that CI will test that we haven't made a mistake calling the function.
689        debug_assert_ne!(result, 0, "failed to reserve stack space for exception handling");
690    }
691
692    unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> i32 {
693        // SAFETY: It's up to the caller (which in this case is the OS) to ensure that `ExceptionInfo` is valid.
694        unsafe {
695            let rec = &(*(*ExceptionInfo).ExceptionRecord);
696            let code = rec.ExceptionCode;
697
698            if code == c::EXCEPTION_STACK_OVERFLOW {
699                crate::thread::with_current_name(|name| {
700                    let name = name.unwrap_or("<unknown>");
701                    let tid = crate::thread::current_os_id();
702                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
703                });
704            }
705            c::EXCEPTION_CONTINUE_SEARCH
706        }
707    }
708
709    pub unsafe fn init() {
710        // SAFETY: `vectored_handler` has the correct ABI and is safe to call during exception handling.
711        unsafe {
712            let result = c::AddVectoredExceptionHandler(0, Some(vectored_handler));
713            // Similar to the above, adding the stack overflow handler is allowed to fail
714            // but a debug assert is used so CI will still test that it normally works.
715            debug_assert!(!result.is_null(), "failed to install exception handler");
716        }
717        // Set the thread stack guarantee for the main thread.
718        reserve_stack();
719    }
720
721    pub unsafe fn cleanup() {}
722
723    pub unsafe fn make_handler(
724        main_thread: bool,
725        _thread_name: Option<Box<str>>,
726    ) -> super::Handler {
727        if !main_thread {
728            reserve_stack();
729        }
730        super::Handler::null()
731    }
732
733    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
734}