std/sys/pal/unix/
stack_overflow.rs

1#![cfg_attr(test, allow(dead_code))]
2
3pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
5
6pub struct Handler {
7    data: *mut libc::c_void,
8}
9
10impl Handler {
11    pub unsafe fn new(thread_name: Option<Box<str>>) -> Handler {
12        make_handler(false, thread_name)
13    }
14
15    fn null() -> Handler {
16        Handler { data: crate::ptr::null_mut() }
17    }
18}
19
20impl Drop for Handler {
21    fn drop(&mut self) {
22        unsafe {
23            drop_handler(self.data);
24        }
25    }
26}
27
28#[cfg(all(
29    not(miri),
30    any(
31        target_os = "linux",
32        target_os = "freebsd",
33        target_os = "hurd",
34        target_os = "macos",
35        target_os = "netbsd",
36        target_os = "openbsd",
37        target_os = "solaris",
38        target_os = "illumos",
39    ),
40))]
41mod thread_info;
42
43// miri doesn't model signals nor stack overflows and this code has some
44// synchronization properties that we don't want to expose to user code,
45// hence we disable it on miri.
46#[cfg(all(
47    not(miri),
48    any(
49        target_os = "linux",
50        target_os = "freebsd",
51        target_os = "hurd",
52        target_os = "macos",
53        target_os = "netbsd",
54        target_os = "openbsd",
55        target_os = "solaris",
56        target_os = "illumos",
57    )
58))]
59mod imp {
60    use libc::{
61        MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
62        SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
63    };
64    #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
65    use libc::{mmap as mmap64, mprotect, munmap};
66    #[cfg(all(target_os = "linux", target_env = "gnu"))]
67    use libc::{mmap64, mprotect, munmap};
68
69    use super::Handler;
70    use super::thread_info::{delete_current_info, set_current_info, with_current_info};
71    use crate::ops::Range;
72    use crate::sync::OnceLock;
73    use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
74    use crate::sys::pal::unix::os;
75    use crate::{io, mem, ptr};
76
77    // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
78    // (unmapped pages) at the end of every thread's stack, so if a thread ends
79    // up running into the guard page it'll trigger this handler. We want to
80    // detect these cases and print out a helpful error saying that the stack
81    // has overflowed. All other signals, however, should go back to what they
82    // were originally supposed to do.
83    //
84    // This handler currently exists purely to print an informative message
85    // whenever a thread overflows its stack. We then abort to exit and
86    // indicate a crash, but to avoid a misleading SIGSEGV that might lead
87    // users to believe that unsafe code has accessed an invalid pointer; the
88    // SIGSEGV encountered when overflowing the stack is expected and
89    // well-defined.
90    //
91    // If this is not a stack overflow, the handler un-registers itself and
92    // then returns (to allow the original signal to be delivered again).
93    // Returning from this kind of signal handler is technically not defined
94    // to work when reading the POSIX spec strictly, but in practice it turns
95    // out many large systems and all implementations allow returning from a
96    // signal handler to work. For a more detailed explanation see the
97    // comments on #26458.
98    /// SIGSEGV/SIGBUS entry point
99    /// # Safety
100    /// Rust doesn't call this, it *gets called*.
101    #[forbid(unsafe_op_in_unsafe_fn)]
102    unsafe extern "C" fn signal_handler(
103        signum: libc::c_int,
104        info: *mut libc::siginfo_t,
105        _data: *mut libc::c_void,
106    ) {
107        // SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
108        let fault_addr = unsafe { (*info).si_addr().addr() };
109
110        // `with_current_info` expects that the process aborts after it is
111        // called. If the signal was not caused by a memory access, this might
112        // not be true. We detect this by noticing that the `si_addr` field is
113        // zero if the signal is synthetic.
114        if fault_addr != 0 {
115            with_current_info(|thread_info| {
116                // If the faulting address is within the guard page, then we print a
117                // message saying so and abort.
118                if let Some(thread_info) = thread_info
119                    && thread_info.guard_page_range.contains(&fault_addr)
120                {
121                    let name = thread_info.thread_name.as_deref().unwrap_or("<unknown>");
122                    let tid = crate::thread::current_os_id();
123                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
124                    rtabort!("stack overflow");
125                }
126            })
127        }
128
129        // Unregister ourselves by reverting back to the default behavior.
130        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
131        let mut action: sigaction = unsafe { mem::zeroed() };
132        action.sa_sigaction = SIG_DFL;
133        // SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
134        unsafe { sigaction(signum, &action, ptr::null_mut()) };
135
136        // See comment above for why this function returns.
137    }
138
139    static PAGE_SIZE: Atomic<usize> = AtomicUsize::new(0);
140    static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut());
141    static NEED_ALTSTACK: Atomic<bool> = AtomicBool::new(false);
142
143    /// # Safety
144    /// Must be called only once
145    #[forbid(unsafe_op_in_unsafe_fn)]
146    pub unsafe fn init() {
147        PAGE_SIZE.store(os::page_size(), Ordering::Relaxed);
148
149        let mut guard_page_range = unsafe { install_main_guard() };
150
151        // Even for panic=immediate-abort, installing the guard pages is important for soundness.
152        // That said, we do not care about giving nice stackoverflow messages via our custom
153        // signal handler, just exit early and let the user enjoy the segfault.
154        if cfg!(panic = "immediate-abort") {
155            return;
156        }
157
158        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
159        let mut action: sigaction = unsafe { mem::zeroed() };
160        for &signal in &[SIGSEGV, SIGBUS] {
161            // SAFETY: just fetches the current signal handler into action
162            unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
163            // Configure our signal handler if one is not already set.
164            if action.sa_sigaction == SIG_DFL {
165                if !NEED_ALTSTACK.load(Ordering::Relaxed) {
166                    // haven't set up our sigaltstack yet
167                    NEED_ALTSTACK.store(true, Ordering::Release);
168                    let handler = unsafe { make_handler(true, None) };
169                    MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
170                    mem::forget(handler);
171
172                    if let Some(guard_page_range) = guard_page_range.take() {
173                        set_current_info(guard_page_range, Some(Box::from("main")));
174                    }
175                }
176
177                action.sa_flags = SA_SIGINFO | SA_ONSTACK;
178                action.sa_sigaction = signal_handler as sighandler_t;
179                // SAFETY: only overriding signals if the default is set
180                unsafe { sigaction(signal, &action, ptr::null_mut()) };
181            }
182        }
183    }
184
185    /// # Safety
186    /// Must be called only once
187    #[forbid(unsafe_op_in_unsafe_fn)]
188    pub unsafe fn cleanup() {
189        if cfg!(panic = "immediate-abort") {
190            return;
191        }
192        // FIXME: I probably cause more bugs than I'm worth!
193        // see https://github.com/rust-lang/rust/issues/111272
194        unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
195    }
196
197    unsafe fn get_stack() -> libc::stack_t {
198        // OpenBSD requires this flag for stack mapping
199        // otherwise the said mapping will fail as a no-op on most systems
200        // and has a different meaning on FreeBSD
201        #[cfg(any(
202            target_os = "openbsd",
203            target_os = "netbsd",
204            target_os = "linux",
205            target_os = "dragonfly",
206        ))]
207        let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
208        #[cfg(not(any(
209            target_os = "openbsd",
210            target_os = "netbsd",
211            target_os = "linux",
212            target_os = "dragonfly",
213        )))]
214        let flags = MAP_PRIVATE | MAP_ANON;
215
216        let sigstack_size = sigstack_size();
217        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
218
219        let stackp = mmap64(
220            ptr::null_mut(),
221            sigstack_size + page_size,
222            PROT_READ | PROT_WRITE,
223            flags,
224            -1,
225            0,
226        );
227        if stackp == MAP_FAILED {
228            panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
229        }
230        let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
231        if guard_result != 0 {
232            panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
233        }
234        let stackp = stackp.add(page_size);
235
236        libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
237    }
238
239    /// # Safety
240    /// Mutates the alternate signal stack
241    #[forbid(unsafe_op_in_unsafe_fn)]
242    pub unsafe fn make_handler(main_thread: bool, thread_name: Option<Box<str>>) -> Handler {
243        if cfg!(panic = "immediate-abort") || !NEED_ALTSTACK.load(Ordering::Acquire) {
244            return Handler::null();
245        }
246
247        if !main_thread {
248            if let Some(guard_page_range) = unsafe { current_guard() } {
249                set_current_info(guard_page_range, thread_name);
250            }
251        }
252
253        // SAFETY: assuming stack_t is zero-initializable
254        let mut stack = unsafe { mem::zeroed() };
255        // SAFETY: reads current stack_t into stack
256        unsafe { sigaltstack(ptr::null(), &mut stack) };
257        // Configure alternate signal stack, if one is not already set.
258        if stack.ss_flags & SS_DISABLE != 0 {
259            // SAFETY: We warned our caller this would happen!
260            unsafe {
261                stack = get_stack();
262                sigaltstack(&stack, ptr::null_mut());
263            }
264            Handler { data: stack.ss_sp as *mut libc::c_void }
265        } else {
266            Handler::null()
267        }
268    }
269
270    /// # Safety
271    /// Must be called
272    /// - only with our handler or nullptr
273    /// - only when done with our altstack
274    /// This disables the alternate signal stack!
275    #[forbid(unsafe_op_in_unsafe_fn)]
276    pub unsafe fn drop_handler(data: *mut libc::c_void) {
277        if !data.is_null() {
278            let sigstack_size = sigstack_size();
279            let page_size = PAGE_SIZE.load(Ordering::Relaxed);
280            let disabling_stack = libc::stack_t {
281                ss_sp: ptr::null_mut(),
282                ss_flags: SS_DISABLE,
283                // Workaround for bug in macOS implementation of sigaltstack
284                // UNIX2003 which returns ENOMEM when disabling a stack while
285                // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
286                // both ss_sp and ss_size should be ignored in this case.
287                ss_size: sigstack_size,
288            };
289            // SAFETY: we warned the caller this disables the alternate signal stack!
290            unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
291            // SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
292            // a mapping that started one page earlier, so walk back a page and unmap from there.
293            unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
294        }
295
296        delete_current_info();
297    }
298
299    /// Modern kernels on modern hardware can have dynamic signal stack sizes.
300    #[cfg(any(target_os = "linux", target_os = "android"))]
301    fn sigstack_size() -> usize {
302        let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
303        // If getauxval couldn't find the entry, it returns 0,
304        // so take the higher of the "constant" and auxval.
305        // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
306        libc::SIGSTKSZ.max(dynamic_sigstksz as _)
307    }
308
309    /// Not all OS support hardware where this is needed.
310    #[cfg(not(any(target_os = "linux", target_os = "android")))]
311    fn sigstack_size() -> usize {
312        libc::SIGSTKSZ
313    }
314
315    #[cfg(any(target_os = "solaris", target_os = "illumos"))]
316    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
317        let mut current_stack: libc::stack_t = crate::mem::zeroed();
318        assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
319        Some(current_stack.ss_sp)
320    }
321
322    #[cfg(target_os = "macos")]
323    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
324        let th = libc::pthread_self();
325        let stackptr = libc::pthread_get_stackaddr_np(th);
326        Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
327    }
328
329    #[cfg(target_os = "openbsd")]
330    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
331        let mut current_stack: libc::stack_t = crate::mem::zeroed();
332        assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
333
334        let stack_ptr = current_stack.ss_sp;
335        let stackaddr = if libc::pthread_main_np() == 1 {
336            // main thread
337            stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
338        } else {
339            // new thread
340            stack_ptr.addr() - current_stack.ss_size
341        };
342        Some(stack_ptr.with_addr(stackaddr))
343    }
344
345    #[cfg(any(
346        target_os = "android",
347        target_os = "freebsd",
348        target_os = "netbsd",
349        target_os = "hurd",
350        target_os = "linux",
351        target_os = "l4re"
352    ))]
353    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
354        let mut ret = None;
355        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
356        if !cfg!(target_os = "freebsd") {
357            attr = mem::MaybeUninit::zeroed();
358        }
359        #[cfg(target_os = "freebsd")]
360        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
361        #[cfg(target_os = "freebsd")]
362        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
363        #[cfg(not(target_os = "freebsd"))]
364        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
365        if e == 0 {
366            let mut stackaddr = crate::ptr::null_mut();
367            let mut stacksize = 0;
368            assert_eq!(
369                libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr, &mut stacksize),
370                0
371            );
372            ret = Some(stackaddr);
373        }
374        if e == 0 || cfg!(target_os = "freebsd") {
375            assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
376        }
377        ret
378    }
379
380    fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
381        let stackptr = unsafe { get_stack_start()? };
382        let stackaddr = stackptr.addr();
383
384        // Ensure stackaddr is page aligned! A parent process might
385        // have reset RLIMIT_STACK to be non-page aligned. The
386        // pthread_attr_getstack() reports the usable stack area
387        // stackaddr < stackaddr + stacksize, so if stackaddr is not
388        // page-aligned, calculate the fix such that stackaddr <
389        // new_page_aligned_stackaddr < stackaddr + stacksize
390        let remainder = stackaddr % page_size;
391        Some(if remainder == 0 {
392            stackptr
393        } else {
394            stackptr.with_addr(stackaddr + page_size - remainder)
395        })
396    }
397
398    #[forbid(unsafe_op_in_unsafe_fn)]
399    unsafe fn install_main_guard() -> Option<Range<usize>> {
400        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
401
402        unsafe {
403            // this way someone on any unix-y OS can check that all these compile
404            if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
405                install_main_guard_linux(page_size)
406            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
407                install_main_guard_linux_musl(page_size)
408            } else if cfg!(target_os = "freebsd") {
409                install_main_guard_freebsd(page_size)
410            } else if cfg!(any(target_os = "netbsd", target_os = "openbsd")) {
411                install_main_guard_bsds(page_size)
412            } else {
413                install_main_guard_default(page_size)
414            }
415        }
416    }
417
418    #[forbid(unsafe_op_in_unsafe_fn)]
419    unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
420        // Linux doesn't allocate the whole stack right away, and
421        // the kernel has its own stack-guard mechanism to fault
422        // when growing too close to an existing mapping. If we map
423        // our own guard, then the kernel starts enforcing a rather
424        // large gap above that, rendering much of the possible
425        // stack space useless. See #43052.
426        //
427        // Instead, we'll just note where we expect rlimit to start
428        // faulting, so our handler can report "stack overflow", and
429        // trust that the kernel's own stack guard will work.
430        let stackptr = stack_start_aligned(page_size)?;
431        let stackaddr = stackptr.addr();
432        Some(stackaddr - page_size..stackaddr)
433    }
434
435    #[forbid(unsafe_op_in_unsafe_fn)]
436    unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
437        // For the main thread, the musl's pthread_attr_getstack
438        // returns the current stack size, rather than maximum size
439        // it can eventually grow to. It cannot be used to determine
440        // the position of kernel's stack guard.
441        None
442    }
443
444    #[forbid(unsafe_op_in_unsafe_fn)]
445    unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
446        // FreeBSD's stack autogrows, and optionally includes a guard page
447        // at the bottom. If we try to remap the bottom of the stack
448        // ourselves, FreeBSD's guard page moves upwards. So we'll just use
449        // the builtin guard page.
450        let stackptr = stack_start_aligned(page_size)?;
451        let guardaddr = stackptr.addr();
452        // Technically the number of guard pages is tunable and controlled
453        // by the security.bsd.stack_guard_page sysctl.
454        // By default it is 1, checking once is enough since it is
455        // a boot time config value.
456        static PAGES: OnceLock<usize> = OnceLock::new();
457
458        let pages = PAGES.get_or_init(|| {
459            use crate::sys::weak::dlsym;
460            dlsym!(
461                fn sysctlbyname(
462                    name: *const libc::c_char,
463                    oldp: *mut libc::c_void,
464                    oldlenp: *mut libc::size_t,
465                    newp: *const libc::c_void,
466                    newlen: libc::size_t,
467                ) -> libc::c_int;
468            );
469            let mut guard: usize = 0;
470            let mut size = size_of_val(&guard);
471            let oid = c"security.bsd.stack_guard_page";
472            match sysctlbyname.get() {
473                Some(fcn)
474                    if unsafe {
475                        fcn(
476                            oid.as_ptr(),
477                            (&raw mut guard).cast(),
478                            &raw mut size,
479                            ptr::null_mut(),
480                            0,
481                        ) == 0
482                    } =>
483                {
484                    guard
485                }
486                _ => 1,
487            }
488        });
489        Some(guardaddr..guardaddr + pages * page_size)
490    }
491
492    #[forbid(unsafe_op_in_unsafe_fn)]
493    unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
494        // OpenBSD stack already includes a guard page, and stack is
495        // immutable.
496        // NetBSD stack includes the guard page.
497        //
498        // We'll just note where we expect rlimit to start
499        // faulting, so our handler can report "stack overflow", and
500        // trust that the kernel's own stack guard will work.
501        let stackptr = stack_start_aligned(page_size)?;
502        let stackaddr = stackptr.addr();
503        Some(stackaddr - page_size..stackaddr)
504    }
505
506    #[forbid(unsafe_op_in_unsafe_fn)]
507    unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
508        // Reallocate the last page of the stack.
509        // This ensures SIGBUS will be raised on
510        // stack overflow.
511        // Systems which enforce strict PAX MPROTECT do not allow
512        // to mprotect() a mapping with less restrictive permissions
513        // than the initial mmap() used, so we mmap() here with
514        // read/write permissions and only then mprotect() it to
515        // no permissions at all. See issue #50313.
516        let stackptr = stack_start_aligned(page_size)?;
517        let result = unsafe {
518            mmap64(
519                stackptr,
520                page_size,
521                PROT_READ | PROT_WRITE,
522                MAP_PRIVATE | MAP_ANON | MAP_FIXED,
523                -1,
524                0,
525            )
526        };
527        if result != stackptr || result == MAP_FAILED {
528            panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
529        }
530
531        let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
532        if result != 0 {
533            panic!("failed to protect the guard page: {}", io::Error::last_os_error());
534        }
535
536        let guardaddr = stackptr.addr();
537
538        Some(guardaddr..guardaddr + page_size)
539    }
540
541    #[cfg(any(
542        target_os = "macos",
543        target_os = "openbsd",
544        target_os = "solaris",
545        target_os = "illumos",
546    ))]
547    // FIXME: I am probably not unsafe.
548    unsafe fn current_guard() -> Option<Range<usize>> {
549        let stackptr = get_stack_start()?;
550        let stackaddr = stackptr.addr();
551        Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
552    }
553
554    #[cfg(any(
555        target_os = "android",
556        target_os = "freebsd",
557        target_os = "hurd",
558        target_os = "linux",
559        target_os = "netbsd",
560        target_os = "l4re"
561    ))]
562    // FIXME: I am probably not unsafe.
563    unsafe fn current_guard() -> Option<Range<usize>> {
564        let mut ret = None;
565
566        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
567        if !cfg!(target_os = "freebsd") {
568            attr = mem::MaybeUninit::zeroed();
569        }
570        #[cfg(target_os = "freebsd")]
571        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
572        #[cfg(target_os = "freebsd")]
573        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
574        #[cfg(not(target_os = "freebsd"))]
575        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
576        if e == 0 {
577            let mut guardsize = 0;
578            assert_eq!(libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), 0);
579            if guardsize == 0 {
580                if cfg!(all(target_os = "linux", target_env = "musl")) {
581                    // musl versions before 1.1.19 always reported guard
582                    // size obtained from pthread_attr_get_np as zero.
583                    // Use page size as a fallback.
584                    guardsize = PAGE_SIZE.load(Ordering::Relaxed);
585                } else {
586                    panic!("there is no guard page");
587                }
588            }
589            let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
590            let mut size = 0;
591            assert_eq!(libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size), 0);
592
593            let stackaddr = stackptr.addr();
594            ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
595                Some(stackaddr - guardsize..stackaddr)
596            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
597                Some(stackaddr - guardsize..stackaddr)
598            } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
599            {
600                // glibc used to include the guard area within the stack, as noted in the BUGS
601                // section of `man pthread_attr_getguardsize`. This has been corrected starting
602                // with glibc 2.27, and in some distro backports, so the guard is now placed at the
603                // end (below) the stack. There's no easy way for us to know which we have at
604                // runtime, so we'll just match any fault in the range right above or below the
605                // stack base to call that fault a stack overflow.
606                Some(stackaddr - guardsize..stackaddr + guardsize)
607            } else {
608                Some(stackaddr..stackaddr + guardsize)
609            };
610        }
611        if e == 0 || cfg!(target_os = "freebsd") {
612            assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
613        }
614        ret
615    }
616}
617
618// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
619// several symbols that might lead to rejections from the App Store, namely
620// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
621//
622// This might be overly cautious, though it is also what Swift does (and they
623// usually have fewer qualms about forwards compatibility, since the runtime
624// is shipped with the OS):
625// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
626#[cfg(any(
627    miri,
628    not(any(
629        target_os = "linux",
630        target_os = "freebsd",
631        target_os = "hurd",
632        target_os = "macos",
633        target_os = "netbsd",
634        target_os = "openbsd",
635        target_os = "solaris",
636        target_os = "illumos",
637        target_os = "cygwin",
638    ))
639))]
640mod imp {
641    pub unsafe fn init() {}
642
643    pub unsafe fn cleanup() {}
644
645    pub unsafe fn make_handler(
646        _main_thread: bool,
647        _thread_name: Option<Box<str>>,
648    ) -> super::Handler {
649        super::Handler::null()
650    }
651
652    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
653}
654
655#[cfg(target_os = "cygwin")]
656mod imp {
657    mod c {
658        pub type PVECTORED_EXCEPTION_HANDLER =
659            Option<unsafe extern "system" fn(exceptioninfo: *mut EXCEPTION_POINTERS) -> i32>;
660        pub type NTSTATUS = i32;
661        pub type BOOL = i32;
662
663        unsafe extern "system" {
664            pub fn AddVectoredExceptionHandler(
665                first: u32,
666                handler: PVECTORED_EXCEPTION_HANDLER,
667            ) -> *mut core::ffi::c_void;
668            pub fn SetThreadStackGuarantee(stacksizeinbytes: *mut u32) -> BOOL;
669        }
670
671        pub const EXCEPTION_STACK_OVERFLOW: NTSTATUS = 0xC00000FD_u32 as _;
672        pub const EXCEPTION_CONTINUE_SEARCH: i32 = 1i32;
673
674        #[repr(C)]
675        #[derive(Clone, Copy)]
676        pub struct EXCEPTION_POINTERS {
677            pub ExceptionRecord: *mut EXCEPTION_RECORD,
678            // We don't need this field here
679            // pub Context: *mut CONTEXT,
680        }
681        #[repr(C)]
682        #[derive(Clone, Copy)]
683        pub struct EXCEPTION_RECORD {
684            pub ExceptionCode: NTSTATUS,
685            pub ExceptionFlags: u32,
686            pub ExceptionRecord: *mut EXCEPTION_RECORD,
687            pub ExceptionAddress: *mut core::ffi::c_void,
688            pub NumberParameters: u32,
689            pub ExceptionInformation: [usize; 15],
690        }
691    }
692
693    /// Reserve stack space for use in stack overflow exceptions.
694    fn reserve_stack() {
695        let result = unsafe { c::SetThreadStackGuarantee(&mut 0x5000) };
696        // Reserving stack space is not critical so we allow it to fail in the released build of libstd.
697        // We still use debug assert here so that CI will test that we haven't made a mistake calling the function.
698        debug_assert_ne!(result, 0, "failed to reserve stack space for exception handling");
699    }
700
701    unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> i32 {
702        // SAFETY: It's up to the caller (which in this case is the OS) to ensure that `ExceptionInfo` is valid.
703        unsafe {
704            let rec = &(*(*ExceptionInfo).ExceptionRecord);
705            let code = rec.ExceptionCode;
706
707            if code == c::EXCEPTION_STACK_OVERFLOW {
708                crate::thread::with_current_name(|name| {
709                    let name = name.unwrap_or("<unknown>");
710                    let tid = crate::thread::current_os_id();
711                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
712                });
713            }
714            c::EXCEPTION_CONTINUE_SEARCH
715        }
716    }
717
718    pub unsafe fn init() {
719        // SAFETY: `vectored_handler` has the correct ABI and is safe to call during exception handling.
720        unsafe {
721            let result = c::AddVectoredExceptionHandler(0, Some(vectored_handler));
722            // Similar to the above, adding the stack overflow handler is allowed to fail
723            // but a debug assert is used so CI will still test that it normally works.
724            debug_assert!(!result.is_null(), "failed to install exception handler");
725        }
726        // Set the thread stack guarantee for the main thread.
727        reserve_stack();
728    }
729
730    pub unsafe fn cleanup() {}
731
732    pub unsafe fn make_handler(
733        main_thread: bool,
734        _thread_name: Option<Box<str>>,
735    ) -> super::Handler {
736        if !main_thread {
737            reserve_stack();
738        }
739        super::Handler::null()
740    }
741
742    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
743}