Skip to main content

std/sys/pal/unix/
stack_overflow.rs

1#![cfg_attr(test, allow(dead_code))]
2
3pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
5
6pub struct Handler {
7    data: *mut libc::c_void,
8}
9
10impl Handler {
11    pub unsafe fn new() -> Handler {
12        make_handler(false)
13    }
14
15    fn null() -> Handler {
16        Handler { data: crate::ptr::null_mut() }
17    }
18}
19
20impl Drop for Handler {
21    fn drop(&mut self) {
22        unsafe {
23            drop_handler(self.data);
24        }
25    }
26}
27
28#[cfg(all(
29    not(miri),
30    any(
31        target_os = "linux",
32        target_os = "freebsd",
33        target_os = "hurd",
34        target_os = "macos",
35        target_os = "netbsd",
36        target_os = "openbsd",
37        target_os = "solaris",
38        target_os = "illumos",
39    ),
40))]
41mod thread_info;
42
43// miri doesn't model signals nor stack overflows and this code has some
44// synchronization properties that we don't want to expose to user code,
45// hence we disable it on miri.
46#[cfg(all(
47    not(miri),
48    any(
49        target_os = "linux",
50        target_os = "freebsd",
51        target_os = "hurd",
52        target_os = "macos",
53        target_os = "netbsd",
54        target_os = "openbsd",
55        target_os = "solaris",
56        target_os = "illumos",
57    )
58))]
59mod imp {
60    use libc::{
61        MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
62        SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
63    };
64    #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
65    use libc::{mmap as mmap64, mprotect, munmap};
66    #[cfg(all(target_os = "linux", target_env = "gnu"))]
67    use libc::{mmap64, mprotect, munmap};
68
69    use super::Handler;
70    use super::thread_info::{delete_current_info, set_current_info, with_current_info};
71    use crate::ops::Range;
72    use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
73    use crate::sys::pal::unix::conf;
74    use crate::{io, mem, ptr};
75
76    // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
77    // (unmapped pages) at the end of every thread's stack, so if a thread ends
78    // up running into the guard page it'll trigger this handler. We want to
79    // detect these cases and print out a helpful error saying that the stack
80    // has overflowed. All other signals, however, should go back to what they
81    // were originally supposed to do.
82    //
83    // This handler currently exists purely to print an informative message
84    // whenever a thread overflows its stack. We then abort to exit and
85    // indicate a crash, but to avoid a misleading SIGSEGV that might lead
86    // users to believe that unsafe code has accessed an invalid pointer; the
87    // SIGSEGV encountered when overflowing the stack is expected and
88    // well-defined.
89    //
90    // If this is not a stack overflow, the handler un-registers itself and
91    // then returns (to allow the original signal to be delivered again).
92    // Returning from this kind of signal handler is technically not defined
93    // to work when reading the POSIX spec strictly, but in practice it turns
94    // out many large systems and all implementations allow returning from a
95    // signal handler to work. For a more detailed explanation see the
96    // comments on #26458.
97    /// SIGSEGV/SIGBUS entry point
98    /// # Safety
99    /// Rust doesn't call this, it *gets called*.
100    #[forbid(unsafe_op_in_unsafe_fn)]
101    unsafe extern "C" fn signal_handler(
102        signum: libc::c_int,
103        info: *mut libc::siginfo_t,
104        _data: *mut libc::c_void,
105    ) {
106        // SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
107        let fault_addr = unsafe { (*info).si_addr().addr() };
108
109        // `with_current_info` expects that the process aborts after it is
110        // called. If the signal was not caused by a memory access, this might
111        // not be true. We detect this by noticing that the `si_addr` field is
112        // zero if the signal is synthetic.
113        if fault_addr != 0 {
114            with_current_info(|thread_info| {
115                // If the faulting address is within the guard page, then we print a
116                // message saying so and abort.
117                if let Some(thread_info) = thread_info
118                    && thread_info.guard_page_range.contains(&fault_addr)
119                {
120                    // Hey you! Yes, you modifying the stack overflow message!
121                    // Please make sure that all functions called here are
122                    // actually async-signal-safe. If they're not, try retrieving
123                    // the information beforehand and storing it in `ThreadInfo`.
124                    // Thank you!
125                    // - says Jonas after having had to watch his carefully
126                    //   written code get made unsound again.
127                    let tid = thread_info.tid;
128                    let name = thread_info.name.as_deref().unwrap_or("<unknown>");
129                    if let Some(mut out) = crate::sys::stdio::panic_output() {
    let _ =
        crate::io::Write::write_fmt(&mut out,
            format_args!("\nthread \'{0}\' ({1}) has overflowed its stack\n",
                name, tid));
};rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
130                    {
    if let Some(mut out) = crate::sys::stdio::panic_output() {
        let _ =
            crate::io::Write::write_fmt(&mut out,
                format_args!("fatal runtime error: {0}, aborting\n",
                    format_args!("stack overflow")));
    };
    crate::process::abort();
};rtabort!("stack overflow");
131                }
132            })
133        }
134
135        // Unregister ourselves by reverting back to the default behavior.
136        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
137        let mut action: sigaction = unsafe { mem::zeroed() };
138        action.sa_sigaction = SIG_DFL;
139        // SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
140        unsafe { sigaction(signum, &action, ptr::null_mut()) };
141
142        // See comment above for why this function returns.
143    }
144
145    static PAGE_SIZE: Atomic<usize> = AtomicUsize::new(0);
146    static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut());
147    static NEED_ALTSTACK: Atomic<bool> = AtomicBool::new(false);
148
149    /// # Safety
150    /// Must be called only once
151    #[forbid(unsafe_op_in_unsafe_fn)]
152    pub unsafe fn init() {
153        PAGE_SIZE.store(conf::page_size(), Ordering::Relaxed);
154
155        let mut guard_page_range = unsafe { install_main_guard() };
156
157        // Even for panic=immediate-abort, installing the guard pages is important for soundness.
158        // That said, we do not care about giving nice stackoverflow messages via our custom
159        // signal handler, just exit early and let the user enjoy the segfault.
160        if falsecfg!(panic = "immediate-abort") {
161            return;
162        }
163
164        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
165        let mut action: sigaction = unsafe { mem::zeroed() };
166        for &signal in &[SIGSEGV, SIGBUS] {
167            // SAFETY: just fetches the current signal handler into action
168            unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
169            // Configure our signal handler if one is not already set.
170            if action.sa_sigaction == SIG_DFL {
171                if !NEED_ALTSTACK.load(Ordering::Relaxed) {
172                    // haven't set up our sigaltstack yet
173                    NEED_ALTSTACK.store(true, Ordering::Release);
174                    let handler = unsafe { make_handler(true) };
175                    MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
176                    mem::forget(handler);
177
178                    if let Some(guard_page_range) = guard_page_range.take() {
179                        set_current_info(guard_page_range);
180                    }
181                }
182
183                action.sa_flags = SA_SIGINFO | SA_ONSTACK;
184                action.sa_sigaction = signal_handler
185                    as unsafe extern "C" fn(i32, *mut libc::siginfo_t, *mut libc::c_void)
186                    as sighandler_t;
187                // SAFETY: only overriding signals if the default is set
188                unsafe { sigaction(signal, &action, ptr::null_mut()) };
189            }
190        }
191    }
192
193    /// # Safety
194    /// Must be called only once
195    #[forbid(unsafe_op_in_unsafe_fn)]
196    pub unsafe fn cleanup() {
197        if falsecfg!(panic = "immediate-abort") {
198            return;
199        }
200        // FIXME: I probably cause more bugs than I'm worth!
201        // see https://github.com/rust-lang/rust/issues/111272
202        unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
203    }
204
205    unsafe fn get_stack() -> libc::stack_t {
206        // OpenBSD requires this flag for stack mapping
207        // otherwise the said mapping will fail as a no-op on most systems
208        // and has a different meaning on FreeBSD
209        #[cfg(any(
210            target_os = "openbsd",
211            target_os = "netbsd",
212            target_os = "linux",
213            target_os = "dragonfly",
214        ))]
215        let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
216        #[cfg(not(any(
217            target_os = "openbsd",
218            target_os = "netbsd",
219            target_os = "linux",
220            target_os = "dragonfly",
221        )))]
222        let flags = MAP_PRIVATE | MAP_ANON;
223
224        let sigstack_size = sigstack_size();
225        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
226
227        let stackp = mmap64(
228            ptr::null_mut(),
229            sigstack_size + page_size,
230            PROT_READ | PROT_WRITE,
231            flags,
232            -1,
233            0,
234        );
235        if stackp == MAP_FAILED {
236            {
    ::core::panicking::panic_fmt(format_args!("failed to allocate an alternative stack: {0}",
            io::Error::last_os_error()));
};panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
237        }
238        let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
239        if guard_result != 0 {
240            {
    ::core::panicking::panic_fmt(format_args!("failed to set up alternative stack guard page: {0}",
            io::Error::last_os_error()));
};panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
241        }
242        let stackp = stackp.add(page_size);
243
244        libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
245    }
246
247    /// # Safety
248    /// Mutates the alternate signal stack
249    #[forbid(unsafe_op_in_unsafe_fn)]
250    pub unsafe fn make_handler(main_thread: bool) -> Handler {
251        if falsecfg!(panic = "immediate-abort") || !NEED_ALTSTACK.load(Ordering::Acquire) {
252            return Handler::null();
253        }
254
255        if !main_thread {
256            if let Some(guard_page_range) = unsafe { current_guard() } {
257                set_current_info(guard_page_range);
258            }
259        }
260
261        // SAFETY: assuming stack_t is zero-initializable
262        let mut stack = unsafe { mem::zeroed() };
263        // SAFETY: reads current stack_t into stack
264        unsafe { sigaltstack(ptr::null(), &mut stack) };
265        // Configure alternate signal stack, if one is not already set.
266        if stack.ss_flags & SS_DISABLE != 0 {
267            // SAFETY: We warned our caller this would happen!
268            unsafe {
269                stack = get_stack();
270                sigaltstack(&stack, ptr::null_mut());
271            }
272            Handler { data: stack.ss_sp as *mut libc::c_void }
273        } else {
274            Handler::null()
275        }
276    }
277
278    /// # Safety
279    /// Must be called
280    /// - only with our handler or nullptr
281    /// - only when done with our altstack
282    /// This disables the alternate signal stack!
283    #[forbid(unsafe_op_in_unsafe_fn)]
284    pub unsafe fn drop_handler(data: *mut libc::c_void) {
285        if !data.is_null() {
286            let sigstack_size = sigstack_size();
287            let page_size = PAGE_SIZE.load(Ordering::Relaxed);
288            let disabling_stack = libc::stack_t {
289                ss_sp: ptr::null_mut(),
290                ss_flags: SS_DISABLE,
291                // Workaround for bug in macOS implementation of sigaltstack
292                // UNIX2003 which returns ENOMEM when disabling a stack while
293                // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
294                // both ss_sp and ss_size should be ignored in this case.
295                ss_size: sigstack_size,
296            };
297            // SAFETY: we warned the caller this disables the alternate signal stack!
298            unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
299            // SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
300            // a mapping that started one page earlier, so walk back a page and unmap from there.
301            unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
302        }
303
304        delete_current_info();
305    }
306
307    /// Modern kernels on modern hardware can have dynamic signal stack sizes.
308    #[cfg(all(any(target_os = "linux", target_os = "android"), not(target_env = "uclibc")))]
309    fn sigstack_size() -> usize {
310        let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
311        // If getauxval couldn't find the entry, it returns 0,
312        // so take the higher of the "constant" and auxval.
313        // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
314        libc::SIGSTKSZ.max(dynamic_sigstksz as _)
315    }
316
317    /// Not all OS support hardware where this is needed.
318    #[cfg(not(all(any(target_os = "linux", target_os = "android"), not(target_env = "uclibc"))))]
319    fn sigstack_size() -> usize {
320        libc::SIGSTKSZ
321    }
322
323    #[cfg(any(target_os = "solaris", target_os = "illumos"))]
324    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
325        let mut current_stack: libc::stack_t = crate::mem::zeroed();
326        assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
327        Some(current_stack.ss_sp)
328    }
329
330    #[cfg(target_os = "macos")]
331    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
332        let th = libc::pthread_self();
333        let stackptr = libc::pthread_get_stackaddr_np(th);
334        Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
335    }
336
337    #[cfg(target_os = "openbsd")]
338    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
339        let mut current_stack: libc::stack_t = crate::mem::zeroed();
340        assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
341
342        let stack_ptr = current_stack.ss_sp;
343        let stackaddr = if libc::pthread_main_np() == 1 {
344            // main thread
345            stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
346        } else {
347            // new thread
348            stack_ptr.addr() - current_stack.ss_size
349        };
350        Some(stack_ptr.with_addr(stackaddr))
351    }
352
353    #[cfg(any(
354        target_os = "android",
355        target_os = "freebsd",
356        target_os = "netbsd",
357        target_os = "hurd",
358        target_os = "linux",
359        target_os = "l4re"
360    ))]
361    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
362        let mut ret = None;
363        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
364        if !falsecfg!(target_os = "freebsd") {
365            attr = mem::MaybeUninit::zeroed();
366        }
367        #[cfg(target_os = "freebsd")]
368        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
369        #[cfg(target_os = "freebsd")]
370        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
371        #[cfg(not(target_os = "freebsd"))]
372        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
373        if e == 0 {
374            let mut stackaddr = crate::ptr::null_mut();
375            let mut stacksize = 0;
376            match (&libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr,
                &mut stacksize), &0) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(
377                libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr, &mut stacksize),
378                0
379            );
380            ret = Some(stackaddr);
381        }
382        if e == 0 || falsecfg!(target_os = "freebsd") {
383            match (&libc::pthread_attr_destroy(attr.as_mut_ptr()), &0) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
384        }
385        ret
386    }
387
388    fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
389        let stackptr = unsafe { get_stack_start()? };
390        let stackaddr = stackptr.addr();
391
392        // Ensure stackaddr is page aligned! A parent process might
393        // have reset RLIMIT_STACK to be non-page aligned. The
394        // pthread_attr_getstack() reports the usable stack area
395        // stackaddr < stackaddr + stacksize, so if stackaddr is not
396        // page-aligned, calculate the fix such that stackaddr <
397        // new_page_aligned_stackaddr < stackaddr + stacksize
398        let remainder = stackaddr % page_size;
399        Some(if remainder == 0 {
400            stackptr
401        } else {
402            stackptr.with_addr(stackaddr + page_size - remainder)
403        })
404    }
405
406    #[forbid(unsafe_op_in_unsafe_fn)]
407    unsafe fn install_main_guard() -> Option<Range<usize>> {
408        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
409
410        unsafe {
411            // this way someone on any unix-y OS can check that all these compile
412            if truecfg!(all(target_os = "linux", not(target_env = "musl"))) {
413                install_main_guard_linux(page_size)
414            } else if falsecfg!(all(target_os = "linux", target_env = "musl")) {
415                install_main_guard_linux_musl(page_size)
416            } else if falsecfg!(target_os = "freebsd") {
417                #[cfg(not(target_os = "freebsd"))]
418                return None;
419                // The FreeBSD code cannot be checked on non-BSDs.
420                #[cfg(target_os = "freebsd")]
421                install_main_guard_freebsd(page_size)
422            } else if falsecfg!(any(target_os = "netbsd", target_os = "openbsd")) {
423                install_main_guard_bsds(page_size)
424            } else {
425                install_main_guard_default(page_size)
426            }
427        }
428    }
429
430    #[forbid(unsafe_op_in_unsafe_fn)]
431    unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
432        // See the corresponding conditional in init().
433        // Avoid stack_start_aligned, which makes slow syscalls to read /proc/self/maps
434        if falsecfg!(panic = "immediate-abort") {
435            return None;
436        }
437        // Linux doesn't allocate the whole stack right away, and
438        // the kernel has its own stack-guard mechanism to fault
439        // when growing too close to an existing mapping. If we map
440        // our own guard, then the kernel starts enforcing a rather
441        // large gap above that, rendering much of the possible
442        // stack space useless. See #43052.
443        //
444        // Instead, we'll just note where we expect rlimit to start
445        // faulting, so our handler can report "stack overflow", and
446        // trust that the kernel's own stack guard will work.
447        let stackptr = stack_start_aligned(page_size)?;
448        let stackaddr = stackptr.addr();
449        Some(stackaddr - page_size..stackaddr)
450    }
451
452    #[forbid(unsafe_op_in_unsafe_fn)]
453    unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
454        // For the main thread, the musl's pthread_attr_getstack
455        // returns the current stack size, rather than maximum size
456        // it can eventually grow to. It cannot be used to determine
457        // the position of kernel's stack guard.
458        None
459    }
460
461    #[forbid(unsafe_op_in_unsafe_fn)]
462    #[cfg(target_os = "freebsd")]
463    unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
464        // See the corresponding conditional in install_main_guard_linux().
465        if cfg!(panic = "immediate-abort") {
466            return None;
467        }
468        // FreeBSD's stack autogrows, and optionally includes a guard page
469        // at the bottom. If we try to remap the bottom of the stack
470        // ourselves, FreeBSD's guard page moves upwards. So we'll just use
471        // the builtin guard page.
472        let stackptr = stack_start_aligned(page_size)?;
473        let guardaddr = stackptr.addr();
474        // Technically the number of guard pages is tunable and controlled
475        // by the security.bsd.stack_guard_page sysctl.
476        // By default it is 1, checking once is enough since it is
477        // a boot time config value.
478        static PAGES: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
479
480        let pages = PAGES.get_or_init(|| {
481            let mut guard: usize = 0;
482            let mut size = size_of_val(&guard);
483            let oid = c"security.bsd.stack_guard_page";
484
485            let r = unsafe {
486                libc::sysctlbyname(
487                    oid.as_ptr(),
488                    (&raw mut guard).cast(),
489                    &raw mut size,
490                    ptr::null_mut(),
491                    0,
492                )
493            };
494            if r == 0 { guard } else { 1 }
495        });
496        Some(guardaddr..guardaddr + pages * page_size)
497    }
498
499    #[forbid(unsafe_op_in_unsafe_fn)]
500    unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
501        // See the corresponding conditional in install_main_guard_linux().
502        if falsecfg!(panic = "immediate-abort") {
503            return None;
504        }
505        // OpenBSD stack already includes a guard page, and stack is
506        // immutable.
507        // NetBSD stack includes the guard page.
508        //
509        // We'll just note where we expect rlimit to start
510        // faulting, so our handler can report "stack overflow", and
511        // trust that the kernel's own stack guard will work.
512        let stackptr = stack_start_aligned(page_size)?;
513        let stackaddr = stackptr.addr();
514        Some(stackaddr - page_size..stackaddr)
515    }
516
517    #[forbid(unsafe_op_in_unsafe_fn)]
518    unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
519        // Reallocate the last page of the stack.
520        // This ensures SIGBUS will be raised on
521        // stack overflow.
522        // Systems which enforce strict PAX MPROTECT do not allow
523        // to mprotect() a mapping with less restrictive permissions
524        // than the initial mmap() used, so we mmap() here with
525        // read/write permissions and only then mprotect() it to
526        // no permissions at all. See issue #50313.
527        let stackptr = stack_start_aligned(page_size)?;
528        let result = unsafe {
529            mmap64(
530                stackptr,
531                page_size,
532                PROT_READ | PROT_WRITE,
533                MAP_PRIVATE | MAP_ANON | MAP_FIXED,
534                -1,
535                0,
536            )
537        };
538        if result != stackptr || result == MAP_FAILED {
539            {
    ::core::panicking::panic_fmt(format_args!("failed to allocate a guard page: {0}",
            io::Error::last_os_error()));
};panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
540        }
541
542        let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
543        if result != 0 {
544            {
    ::core::panicking::panic_fmt(format_args!("failed to protect the guard page: {0}",
            io::Error::last_os_error()));
};panic!("failed to protect the guard page: {}", io::Error::last_os_error());
545        }
546
547        let guardaddr = stackptr.addr();
548
549        Some(guardaddr..guardaddr + page_size)
550    }
551
552    #[cfg(any(
553        target_os = "macos",
554        target_os = "openbsd",
555        target_os = "solaris",
556        target_os = "illumos",
557    ))]
558    // FIXME: I am probably not unsafe.
559    unsafe fn current_guard() -> Option<Range<usize>> {
560        let stackptr = get_stack_start()?;
561        let stackaddr = stackptr.addr();
562        Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
563    }
564
565    #[cfg(any(
566        target_os = "android",
567        target_os = "freebsd",
568        target_os = "hurd",
569        target_os = "linux",
570        target_os = "netbsd",
571        target_os = "l4re"
572    ))]
573    // FIXME: I am probably not unsafe.
574    unsafe fn current_guard() -> Option<Range<usize>> {
575        let mut ret = None;
576
577        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
578        if !falsecfg!(target_os = "freebsd") {
579            attr = mem::MaybeUninit::zeroed();
580        }
581        #[cfg(target_os = "freebsd")]
582        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
583        #[cfg(target_os = "freebsd")]
584        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
585        #[cfg(not(target_os = "freebsd"))]
586        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
587        if e == 0 {
588            let mut guardsize = 0;
589            match (&libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), &0) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), 0);
590            if guardsize == 0 {
591                if falsecfg!(all(target_os = "linux", target_env = "musl")) {
592                    // musl versions before 1.1.19 always reported guard
593                    // size obtained from pthread_attr_get_np as zero.
594                    // Use page size as a fallback.
595                    guardsize = PAGE_SIZE.load(Ordering::Relaxed);
596                } else {
597                    { ::core::panicking::panic_fmt(format_args!("there is no guard page")); };panic!("there is no guard page");
598                }
599            }
600            let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
601            let mut size = 0;
602            match (&libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size),
        &0) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size), 0);
603
604            let stackaddr = stackptr.addr();
605            ret = if falsecfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
606                Some(stackaddr - guardsize..stackaddr)
607            } else if falsecfg!(all(target_os = "linux", target_env = "musl")) {
608                Some(stackaddr - guardsize..stackaddr)
609            } else if truecfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
610            {
611                // glibc used to include the guard area within the stack, as noted in the BUGS
612                // section of `man pthread_attr_getguardsize`. This has been corrected starting
613                // with glibc 2.27, and in some distro backports, so the guard is now placed at the
614                // end (below) the stack. There's no easy way for us to know which we have at
615                // runtime, so we'll just match any fault in the range right above or below the
616                // stack base to call that fault a stack overflow.
617                Some(stackaddr - guardsize..stackaddr + guardsize)
618            } else {
619                Some(stackaddr..stackaddr + guardsize)
620            };
621        }
622        if e == 0 || falsecfg!(target_os = "freebsd") {
623            match (&libc::pthread_attr_destroy(attr.as_mut_ptr()), &0) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
624        }
625        ret
626    }
627}
628
629// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
630// several symbols that might lead to rejections from the App Store, namely
631// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
632//
633// This might be overly cautious, though it is also what Swift does (and they
634// usually have fewer qualms about forwards compatibility, since the runtime
635// is shipped with the OS):
636// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
637#[cfg(any(
638    miri,
639    not(any(
640        target_os = "linux",
641        target_os = "freebsd",
642        target_os = "hurd",
643        target_os = "macos",
644        target_os = "netbsd",
645        target_os = "openbsd",
646        target_os = "solaris",
647        target_os = "illumos",
648        target_os = "cygwin",
649    ))
650))]
651mod imp {
652    pub unsafe fn init() {}
653
654    pub unsafe fn cleanup() {}
655
656    pub unsafe fn make_handler(_main_thread: bool) -> super::Handler {
657        super::Handler::null()
658    }
659
660    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
661}
662
663#[cfg(target_os = "cygwin")]
664mod imp {
665    mod c {
666        pub type PVECTORED_EXCEPTION_HANDLER =
667            Option<unsafe extern "system" fn(exceptioninfo: *mut EXCEPTION_POINTERS) -> i32>;
668        pub type NTSTATUS = i32;
669        pub type BOOL = i32;
670
671        unsafe extern "system" {
672            pub fn AddVectoredExceptionHandler(
673                first: u32,
674                handler: PVECTORED_EXCEPTION_HANDLER,
675            ) -> *mut core::ffi::c_void;
676            pub fn SetThreadStackGuarantee(stacksizeinbytes: *mut u32) -> BOOL;
677        }
678
679        pub const EXCEPTION_STACK_OVERFLOW: NTSTATUS = 0xC00000FD_u32 as _;
680        pub const EXCEPTION_CONTINUE_SEARCH: i32 = 1i32;
681
682        #[repr(C)]
683        #[derive(Clone, Copy)]
684        pub struct EXCEPTION_POINTERS {
685            pub ExceptionRecord: *mut EXCEPTION_RECORD,
686            // We don't need this field here
687            // pub Context: *mut CONTEXT,
688        }
689        #[repr(C)]
690        #[derive(Clone, Copy)]
691        pub struct EXCEPTION_RECORD {
692            pub ExceptionCode: NTSTATUS,
693            pub ExceptionFlags: u32,
694            pub ExceptionRecord: *mut EXCEPTION_RECORD,
695            pub ExceptionAddress: *mut core::ffi::c_void,
696            pub NumberParameters: u32,
697            pub ExceptionInformation: [usize; 15],
698        }
699    }
700
701    /// Reserve stack space for use in stack overflow exceptions.
702    fn reserve_stack() {
703        let result = unsafe { c::SetThreadStackGuarantee(&mut 0x5000) };
704        // Reserving stack space is not critical so we allow it to fail in the released build of libstd.
705        // We still use debug assert here so that CI will test that we haven't made a mistake calling the function.
706        debug_assert_ne!(result, 0, "failed to reserve stack space for exception handling");
707    }
708
709    unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> i32 {
710        // SAFETY: It's up to the caller (which in this case is the OS) to ensure that `ExceptionInfo` is valid.
711        unsafe {
712            let rec = &(*(*ExceptionInfo).ExceptionRecord);
713            let code = rec.ExceptionCode;
714
715            if code == c::EXCEPTION_STACK_OVERFLOW {
716                crate::thread::with_current_name(|name| {
717                    let name = name.unwrap_or("<unknown>");
718                    let tid = crate::thread::current_os_id();
719                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
720                });
721            }
722            c::EXCEPTION_CONTINUE_SEARCH
723        }
724    }
725
726    pub unsafe fn init() {
727        // SAFETY: `vectored_handler` has the correct ABI and is safe to call during exception handling.
728        unsafe {
729            let result = c::AddVectoredExceptionHandler(0, Some(vectored_handler));
730            // Similar to the above, adding the stack overflow handler is allowed to fail
731            // but a debug assert is used so CI will still test that it normally works.
732            debug_assert!(!result.is_null(), "failed to install exception handler");
733        }
734        // Set the thread stack guarantee for the main thread.
735        reserve_stack();
736    }
737
738    pub unsafe fn cleanup() {}
739
740    pub unsafe fn make_handler(main_thread: bool) -> super::Handler {
741        if !main_thread {
742            reserve_stack();
743        }
744        super::Handler::null()
745    }
746
747    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
748}