1#![cfg_attr(test, allow(dead_code))]
23pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
56pub struct Handler {
7 data: *mut libc::c_void,
8}
910impl Handler {
11pub unsafe fn new() -> Handler {
12make_handler(false)
13 }
1415fn null() -> Handler {
16Handler { data: crate::ptr::null_mut() }
17 }
18}
1920impl Dropfor Handler {
21fn drop(&mut self) {
22unsafe {
23drop_handler(self.data);
24 }
25 }
26}
2728#[cfg(all(
29 not(miri),
30 any(
31 target_os = "linux",
32 target_os = "freebsd",
33 target_os = "hurd",
34 target_os = "macos",
35 target_os = "netbsd",
36 target_os = "openbsd",
37 target_os = "solaris",
38 target_os = "illumos",
39 ),
40))]
41mod thread_info;
4243// miri doesn't model signals nor stack overflows and this code has some
44// synchronization properties that we don't want to expose to user code,
45// hence we disable it on miri.
46#[cfg(all(
47 not(miri),
48 any(
49 target_os = "linux",
50 target_os = "freebsd",
51 target_os = "hurd",
52 target_os = "macos",
53 target_os = "netbsd",
54 target_os = "openbsd",
55 target_os = "solaris",
56 target_os = "illumos",
57 )
58))]
59mod imp {
60use libc::{
61MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
62SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
63 };
64#[cfg(not(all(target_os = "linux", target_env = "gnu")))]
65use libc::{mmap as mmap64, mprotect, munmap};
66#[cfg(all(target_os = "linux", target_env = "gnu"))]
67use libc::{mmap64, mprotect, munmap};
6869use super::Handler;
70use super::thread_info::{delete_current_info, set_current_info, with_current_info};
71use crate::ops::Range;
72use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
73use crate::sys::pal::unix::conf;
74use crate::{io, mem, ptr};
7576// Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
77 // (unmapped pages) at the end of every thread's stack, so if a thread ends
78 // up running into the guard page it'll trigger this handler. We want to
79 // detect these cases and print out a helpful error saying that the stack
80 // has overflowed. All other signals, however, should go back to what they
81 // were originally supposed to do.
82 //
83 // This handler currently exists purely to print an informative message
84 // whenever a thread overflows its stack. We then abort to exit and
85 // indicate a crash, but to avoid a misleading SIGSEGV that might lead
86 // users to believe that unsafe code has accessed an invalid pointer; the
87 // SIGSEGV encountered when overflowing the stack is expected and
88 // well-defined.
89 //
90 // If this is not a stack overflow, the handler un-registers itself and
91 // then returns (to allow the original signal to be delivered again).
92 // Returning from this kind of signal handler is technically not defined
93 // to work when reading the POSIX spec strictly, but in practice it turns
94 // out many large systems and all implementations allow returning from a
95 // signal handler to work. For a more detailed explanation see the
96 // comments on #26458.
97/// SIGSEGV/SIGBUS entry point
98 /// # Safety
99 /// Rust doesn't call this, it *gets called*.
100#[forbid(unsafe_op_in_unsafe_fn)]
101unsafe extern "C" fn signal_handler(
102 signum: libc::c_int,
103 info: *mut libc::siginfo_t,
104 _data: *mut libc::c_void,
105 ) {
106// SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
107let fault_addr = unsafe { (*info).si_addr().addr() };
108109// `with_current_info` expects that the process aborts after it is
110 // called. If the signal was not caused by a memory access, this might
111 // not be true. We detect this by noticing that the `si_addr` field is
112 // zero if the signal is synthetic.
113if fault_addr != 0 {
114with_current_info(|thread_info| {
115// If the faulting address is within the guard page, then we print a
116 // message saying so and abort.
117if let Some(thread_info) = thread_info118 && thread_info.guard_page_range.contains(&fault_addr)
119 {
120// Hey you! Yes, you modifying the stack overflow message!
121 // Please make sure that all functions called here are
122 // actually async-signal-safe. If they're not, try retrieving
123 // the information beforehand and storing it in `ThreadInfo`.
124 // Thank you!
125 // - says Jonas after having had to watch his carefully
126 // written code get made unsound again.
127let tid = thread_info.tid;
128let name = thread_info.name.as_deref().unwrap_or("<unknown>");
129if let Some(mut out) = crate::sys::stdio::panic_output() {
let _ =
crate::io::Write::write_fmt(&mut out,
format_args!("\nthread \'{0}\' ({1}) has overflowed its stack\n",
name, tid));
};rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
130{
if let Some(mut out) = crate::sys::stdio::panic_output() {
let _ =
crate::io::Write::write_fmt(&mut out,
format_args!("fatal runtime error: {0}, aborting\n",
format_args!("stack overflow")));
};
crate::process::abort();
};rtabort!("stack overflow");
131 }
132 })
133 }
134135// Unregister ourselves by reverting back to the default behavior.
136 // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
137let mut action: sigaction = unsafe { mem::zeroed() };
138action.sa_sigaction = SIG_DFL;
139// SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
140unsafe { sigaction(signum, &action, ptr::null_mut()) };
141142// See comment above for why this function returns.
143}
144145static PAGE_SIZE: Atomic<usize> = AtomicUsize::new(0);
146static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut());
147static NEED_ALTSTACK: Atomic<bool> = AtomicBool::new(false);
148149/// # Safety
150 /// Must be called only once
151#[forbid(unsafe_op_in_unsafe_fn)]
152pub unsafe fn init() {
153PAGE_SIZE.store(conf::page_size(), Ordering::Relaxed);
154155let mut guard_page_range = unsafe { install_main_guard() };
156157// Even for panic=immediate-abort, installing the guard pages is important for soundness.
158 // That said, we do not care about giving nice stackoverflow messages via our custom
159 // signal handler, just exit early and let the user enjoy the segfault.
160if falsecfg!(panic = "immediate-abort") {
161return;
162 }
163164// SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
165let mut action: sigaction = unsafe { mem::zeroed() };
166for &signal in &[SIGSEGV, SIGBUS] {
167// SAFETY: just fetches the current signal handler into action
168unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
169// Configure our signal handler if one is not already set.
170if action.sa_sigaction == SIG_DFL {
171if !NEED_ALTSTACK.load(Ordering::Relaxed) {
172// haven't set up our sigaltstack yet
173NEED_ALTSTACK.store(true, Ordering::Release);
174let handler = unsafe { make_handler(true) };
175 MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
176 mem::forget(handler);
177178if let Some(guard_page_range) = guard_page_range.take() {
179 set_current_info(guard_page_range);
180 }
181 }
182183 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
184 action.sa_sigaction = signal_handler
185as unsafe extern "C" fn(i32, *mut libc::siginfo_t, *mut libc::c_void)
186as sighandler_t;
187// SAFETY: only overriding signals if the default is set
188unsafe { sigaction(signal, &action, ptr::null_mut()) };
189 }
190 }
191 }
192193/// # Safety
194 /// Must be called only once
195#[forbid(unsafe_op_in_unsafe_fn)]
196pub unsafe fn cleanup() {
197if falsecfg!(panic = "immediate-abort") {
198return;
199 }
200// FIXME: I probably cause more bugs than I'm worth!
201 // see https://github.com/rust-lang/rust/issues/111272
202unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
203 }
204205unsafe fn get_stack() -> libc::stack_t {
206// OpenBSD requires this flag for stack mapping
207 // otherwise the said mapping will fail as a no-op on most systems
208 // and has a different meaning on FreeBSD
209#[cfg(any(
210 target_os = "openbsd",
211 target_os = "netbsd",
212 target_os = "linux",
213 target_os = "dragonfly",
214 ))]
215let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
216#[cfg(not(any(
217 target_os = "openbsd",
218 target_os = "netbsd",
219 target_os = "linux",
220 target_os = "dragonfly",
221 )))]
222let flags = MAP_PRIVATE | MAP_ANON;
223224let sigstack_size = sigstack_size();
225let page_size = PAGE_SIZE.load(Ordering::Relaxed);
226227let stackp = mmap64(
228 ptr::null_mut(),
229sigstack_size + page_size,
230PROT_READ | PROT_WRITE,
231flags,
232 -1,
2330,
234 );
235if stackp == MAP_FAILED {
236{
::core::panicking::panic_fmt(format_args!("failed to allocate an alternative stack: {0}",
io::Error::last_os_error()));
};panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
237 }
238let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
239if guard_result != 0 {
240{
::core::panicking::panic_fmt(format_args!("failed to set up alternative stack guard page: {0}",
io::Error::last_os_error()));
};panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
241 }
242let stackp = stackp.add(page_size);
243244 libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
245 }
246247/// # Safety
248 /// Mutates the alternate signal stack
249#[forbid(unsafe_op_in_unsafe_fn)]
250pub unsafe fn make_handler(main_thread: bool) -> Handler {
251if falsecfg!(panic = "immediate-abort") || !NEED_ALTSTACK.load(Ordering::Acquire) {
252return Handler::null();
253 }
254255if !main_thread {
256if let Some(guard_page_range) = unsafe { current_guard() } {
257set_current_info(guard_page_range);
258 }
259 }
260261// SAFETY: assuming stack_t is zero-initializable
262let mut stack = unsafe { mem::zeroed() };
263// SAFETY: reads current stack_t into stack
264unsafe { sigaltstack(ptr::null(), &mut stack) };
265// Configure alternate signal stack, if one is not already set.
266if stack.ss_flags & SS_DISABLE != 0 {
267// SAFETY: We warned our caller this would happen!
268unsafe {
269stack = get_stack();
270sigaltstack(&stack, ptr::null_mut());
271 }
272Handler { data: stack.ss_sp as *mut libc::c_void }
273 } else {
274Handler::null()
275 }
276 }
277278/// # Safety
279 /// Must be called
280 /// - only with our handler or nullptr
281 /// - only when done with our altstack
282 /// This disables the alternate signal stack!
283#[forbid(unsafe_op_in_unsafe_fn)]
284pub unsafe fn drop_handler(data: *mut libc::c_void) {
285if !data.is_null() {
286let sigstack_size = sigstack_size();
287let page_size = PAGE_SIZE.load(Ordering::Relaxed);
288let disabling_stack = libc::stack_t {
289 ss_sp: ptr::null_mut(),
290 ss_flags: SS_DISABLE,
291// Workaround for bug in macOS implementation of sigaltstack
292 // UNIX2003 which returns ENOMEM when disabling a stack while
293 // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
294 // both ss_sp and ss_size should be ignored in this case.
295ss_size: sigstack_size,
296 };
297// SAFETY: we warned the caller this disables the alternate signal stack!
298unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
299// SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
300 // a mapping that started one page earlier, so walk back a page and unmap from there.
301unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
302 }
303304delete_current_info();
305 }
306307/// Modern kernels on modern hardware can have dynamic signal stack sizes.
308#[cfg(all(any(target_os = "linux", target_os = "android"), not(target_env = "uclibc")))]
309fn sigstack_size() -> usize {
310let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
311// If getauxval couldn't find the entry, it returns 0,
312 // so take the higher of the "constant" and auxval.
313 // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
314libc::SIGSTKSZ.max(dynamic_sigstkszas _)
315 }
316317/// Not all OS support hardware where this is needed.
318#[cfg(not(all(any(target_os = "linux", target_os = "android"), not(target_env = "uclibc"))))]
319fn sigstack_size() -> usize {
320 libc::SIGSTKSZ
321 }
322323#[cfg(any(target_os = "solaris", target_os = "illumos"))]
324unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
325let mut current_stack: libc::stack_t = crate::mem::zeroed();
326assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
327Some(current_stack.ss_sp)
328 }
329330#[cfg(target_os = "macos")]
331unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
332let th = libc::pthread_self();
333let stackptr = libc::pthread_get_stackaddr_np(th);
334Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
335 }
336337#[cfg(target_os = "openbsd")]
338unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
339let mut current_stack: libc::stack_t = crate::mem::zeroed();
340assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
341342let stack_ptr = current_stack.ss_sp;
343let stackaddr = if libc::pthread_main_np() == 1 {
344// main thread
345stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
346 } else {
347// new thread
348stack_ptr.addr() - current_stack.ss_size
349 };
350Some(stack_ptr.with_addr(stackaddr))
351 }
352353#[cfg(any(
354 target_os = "android",
355 target_os = "freebsd",
356 target_os = "netbsd",
357 target_os = "hurd",
358 target_os = "linux",
359 target_os = "l4re"
360))]
361unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
362let mut ret = None;
363let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
364if !falsecfg!(target_os = "freebsd") {
365attr = mem::MaybeUninit::zeroed();
366 }
367#[cfg(target_os = "freebsd")]
368assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
369#[cfg(target_os = "freebsd")]
370let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
371#[cfg(not(target_os = "freebsd"))]
372let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
373if e == 0 {
374let mut stackaddr = crate::ptr::null_mut();
375let mut stacksize = 0;
376match (&libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr,
&mut stacksize), &0) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(
377 libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr, &mut stacksize),
3780
379);
380ret = Some(stackaddr);
381 }
382if e == 0 || falsecfg!(target_os = "freebsd") {
383match (&libc::pthread_attr_destroy(attr.as_mut_ptr()), &0) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
384 }
385ret386 }
387388fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
389let stackptr = unsafe { get_stack_start()? };
390let stackaddr = stackptr.addr();
391392// Ensure stackaddr is page aligned! A parent process might
393 // have reset RLIMIT_STACK to be non-page aligned. The
394 // pthread_attr_getstack() reports the usable stack area
395 // stackaddr < stackaddr + stacksize, so if stackaddr is not
396 // page-aligned, calculate the fix such that stackaddr <
397 // new_page_aligned_stackaddr < stackaddr + stacksize
398let remainder = stackaddr % page_size;
399Some(if remainder == 0 {
400stackptr401 } else {
402stackptr.with_addr(stackaddr + page_size - remainder)
403 })
404 }
405406#[forbid(unsafe_op_in_unsafe_fn)]
407unsafe fn install_main_guard() -> Option<Range<usize>> {
408let page_size = PAGE_SIZE.load(Ordering::Relaxed);
409410unsafe {
411// this way someone on any unix-y OS can check that all these compile
412if truecfg!(all(target_os = "linux", not(target_env = "musl"))) {
413install_main_guard_linux(page_size)
414 } else if falsecfg!(all(target_os = "linux", target_env = "musl")) {
415install_main_guard_linux_musl(page_size)
416 } else if falsecfg!(target_os = "freebsd") {
417#[cfg(not(target_os = "freebsd"))]
418return None;
419// The FreeBSD code cannot be checked on non-BSDs.
420#[cfg(target_os = "freebsd")]
421install_main_guard_freebsd(page_size)
422 } else if falsecfg!(any(target_os = "netbsd", target_os = "openbsd")) {
423install_main_guard_bsds(page_size)
424 } else {
425install_main_guard_default(page_size)
426 }
427 }
428 }
429430#[forbid(unsafe_op_in_unsafe_fn)]
431unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
432// See the corresponding conditional in init().
433 // Avoid stack_start_aligned, which makes slow syscalls to read /proc/self/maps
434if falsecfg!(panic = "immediate-abort") {
435return None;
436 }
437// Linux doesn't allocate the whole stack right away, and
438 // the kernel has its own stack-guard mechanism to fault
439 // when growing too close to an existing mapping. If we map
440 // our own guard, then the kernel starts enforcing a rather
441 // large gap above that, rendering much of the possible
442 // stack space useless. See #43052.
443 //
444 // Instead, we'll just note where we expect rlimit to start
445 // faulting, so our handler can report "stack overflow", and
446 // trust that the kernel's own stack guard will work.
447let stackptr = stack_start_aligned(page_size)?;
448let stackaddr = stackptr.addr();
449Some(stackaddr - page_size..stackaddr)
450 }
451452#[forbid(unsafe_op_in_unsafe_fn)]
453unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
454// For the main thread, the musl's pthread_attr_getstack
455 // returns the current stack size, rather than maximum size
456 // it can eventually grow to. It cannot be used to determine
457 // the position of kernel's stack guard.
458None459 }
460461#[forbid(unsafe_op_in_unsafe_fn)]
462 #[cfg(target_os = "freebsd")]
463unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
464// See the corresponding conditional in install_main_guard_linux().
465if cfg!(panic = "immediate-abort") {
466return None;
467 }
468// FreeBSD's stack autogrows, and optionally includes a guard page
469 // at the bottom. If we try to remap the bottom of the stack
470 // ourselves, FreeBSD's guard page moves upwards. So we'll just use
471 // the builtin guard page.
472let stackptr = stack_start_aligned(page_size)?;
473let guardaddr = stackptr.addr();
474// Technically the number of guard pages is tunable and controlled
475 // by the security.bsd.stack_guard_page sysctl.
476 // By default it is 1, checking once is enough since it is
477 // a boot time config value.
478static PAGES: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
479480let pages = PAGES.get_or_init(|| {
481let mut guard: usize = 0;
482let mut size = size_of_val(&guard);
483let oid = c"security.bsd.stack_guard_page";
484485let r = unsafe {
486 libc::sysctlbyname(
487 oid.as_ptr(),
488 (&raw mut guard).cast(),
489&raw mut size,
490 ptr::null_mut(),
4910,
492 )
493 };
494if r == 0 { guard } else { 1 }
495 });
496Some(guardaddr..guardaddr + pages * page_size)
497 }
498499#[forbid(unsafe_op_in_unsafe_fn)]
500unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
501// See the corresponding conditional in install_main_guard_linux().
502if falsecfg!(panic = "immediate-abort") {
503return None;
504 }
505// OpenBSD stack already includes a guard page, and stack is
506 // immutable.
507 // NetBSD stack includes the guard page.
508 //
509 // We'll just note where we expect rlimit to start
510 // faulting, so our handler can report "stack overflow", and
511 // trust that the kernel's own stack guard will work.
512let stackptr = stack_start_aligned(page_size)?;
513let stackaddr = stackptr.addr();
514Some(stackaddr - page_size..stackaddr)
515 }
516517#[forbid(unsafe_op_in_unsafe_fn)]
518unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
519// Reallocate the last page of the stack.
520 // This ensures SIGBUS will be raised on
521 // stack overflow.
522 // Systems which enforce strict PAX MPROTECT do not allow
523 // to mprotect() a mapping with less restrictive permissions
524 // than the initial mmap() used, so we mmap() here with
525 // read/write permissions and only then mprotect() it to
526 // no permissions at all. See issue #50313.
527let stackptr = stack_start_aligned(page_size)?;
528let result = unsafe {
529mmap64(
530stackptr,
531page_size,
532PROT_READ | PROT_WRITE,
533MAP_PRIVATE | MAP_ANON | MAP_FIXED,
534 -1,
5350,
536 )
537 };
538if result != stackptr || result == MAP_FAILED {
539{
::core::panicking::panic_fmt(format_args!("failed to allocate a guard page: {0}",
io::Error::last_os_error()));
};panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
540 }
541542let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
543if result != 0 {
544{
::core::panicking::panic_fmt(format_args!("failed to protect the guard page: {0}",
io::Error::last_os_error()));
};panic!("failed to protect the guard page: {}", io::Error::last_os_error());
545 }
546547let guardaddr = stackptr.addr();
548549Some(guardaddr..guardaddr + page_size)
550 }
551552#[cfg(any(
553 target_os = "macos",
554 target_os = "openbsd",
555 target_os = "solaris",
556 target_os = "illumos",
557 ))]
558// FIXME: I am probably not unsafe.
559unsafe fn current_guard() -> Option<Range<usize>> {
560let stackptr = get_stack_start()?;
561let stackaddr = stackptr.addr();
562Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
563 }
564565#[cfg(any(
566 target_os = "android",
567 target_os = "freebsd",
568 target_os = "hurd",
569 target_os = "linux",
570 target_os = "netbsd",
571 target_os = "l4re"
572))]
573// FIXME: I am probably not unsafe.
574unsafe fn current_guard() -> Option<Range<usize>> {
575let mut ret = None;
576577let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
578if !falsecfg!(target_os = "freebsd") {
579attr = mem::MaybeUninit::zeroed();
580 }
581#[cfg(target_os = "freebsd")]
582assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
583#[cfg(target_os = "freebsd")]
584let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
585#[cfg(not(target_os = "freebsd"))]
586let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
587if e == 0 {
588let mut guardsize = 0;
589match (&libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), &0) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), 0);
590if guardsize == 0 {
591if falsecfg!(all(target_os = "linux", target_env = "musl")) {
592// musl versions before 1.1.19 always reported guard
593 // size obtained from pthread_attr_get_np as zero.
594 // Use page size as a fallback.
595guardsize = PAGE_SIZE.load(Ordering::Relaxed);
596 } else {
597{ ::core::panicking::panic_fmt(format_args!("there is no guard page")); };panic!("there is no guard page");
598 }
599 }
600let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
601let mut size = 0;
602match (&libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size),
&0) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size), 0);
603604let stackaddr = stackptr.addr();
605ret = if falsecfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
606Some(stackaddr - guardsize..stackaddr)
607 } else if falsecfg!(all(target_os = "linux", target_env = "musl")) {
608Some(stackaddr - guardsize..stackaddr)
609 } else if truecfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))610 {
611// glibc used to include the guard area within the stack, as noted in the BUGS
612 // section of `man pthread_attr_getguardsize`. This has been corrected starting
613 // with glibc 2.27, and in some distro backports, so the guard is now placed at the
614 // end (below) the stack. There's no easy way for us to know which we have at
615 // runtime, so we'll just match any fault in the range right above or below the
616 // stack base to call that fault a stack overflow.
617Some(stackaddr - guardsize..stackaddr + guardsize)
618 } else {
619Some(stackaddr..stackaddr + guardsize)
620 };
621 }
622if e == 0 || falsecfg!(target_os = "freebsd") {
623match (&libc::pthread_attr_destroy(attr.as_mut_ptr()), &0) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
624 }
625ret626 }
627}
628629// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
630// several symbols that might lead to rejections from the App Store, namely
631// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
632//
633// This might be overly cautious, though it is also what Swift does (and they
634// usually have fewer qualms about forwards compatibility, since the runtime
635// is shipped with the OS):
636// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
637#[cfg(any(
638 miri,
639 not(any(
640 target_os = "linux",
641 target_os = "freebsd",
642 target_os = "hurd",
643 target_os = "macos",
644 target_os = "netbsd",
645 target_os = "openbsd",
646 target_os = "solaris",
647 target_os = "illumos",
648 target_os = "cygwin",
649 ))
650))]
651mod imp {
652pub unsafe fn init() {}
653654pub unsafe fn cleanup() {}
655656pub unsafe fn make_handler(_main_thread: bool) -> super::Handler {
657super::Handler::null()
658 }
659660pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
661}
662663#[cfg(target_os = "cygwin")]
664mod imp {
665mod c {
666pub type PVECTORED_EXCEPTION_HANDLER =
667Option<unsafe extern "system" fn(exceptioninfo: *mut EXCEPTION_POINTERS) -> i32>;
668pub type NTSTATUS = i32;
669pub type BOOL = i32;
670671unsafe extern "system" {
672pub fn AddVectoredExceptionHandler(
673 first: u32,
674 handler: PVECTORED_EXCEPTION_HANDLER,
675 ) -> *mut core::ffi::c_void;
676pub fn SetThreadStackGuarantee(stacksizeinbytes: *mut u32) -> BOOL;
677 }
678679pub const EXCEPTION_STACK_OVERFLOW: NTSTATUS = 0xC00000FD_u32 as _;
680pub const EXCEPTION_CONTINUE_SEARCH: i32 = 1i32;
681682#[repr(C)]
683 #[derive(Clone, Copy)]
684pub struct EXCEPTION_POINTERS {
685pub ExceptionRecord: *mut EXCEPTION_RECORD,
686// We don't need this field here
687 // pub Context: *mut CONTEXT,
688}
689#[repr(C)]
690 #[derive(Clone, Copy)]
691pub struct EXCEPTION_RECORD {
692pub ExceptionCode: NTSTATUS,
693pub ExceptionFlags: u32,
694pub ExceptionRecord: *mut EXCEPTION_RECORD,
695pub ExceptionAddress: *mut core::ffi::c_void,
696pub NumberParameters: u32,
697pub ExceptionInformation: [usize; 15],
698 }
699 }
700701/// Reserve stack space for use in stack overflow exceptions.
702fn reserve_stack() {
703let result = unsafe { c::SetThreadStackGuarantee(&mut 0x5000) };
704// Reserving stack space is not critical so we allow it to fail in the released build of libstd.
705 // We still use debug assert here so that CI will test that we haven't made a mistake calling the function.
706debug_assert_ne!(result, 0, "failed to reserve stack space for exception handling");
707 }
708709unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> i32 {
710// SAFETY: It's up to the caller (which in this case is the OS) to ensure that `ExceptionInfo` is valid.
711unsafe {
712let rec = &(*(*ExceptionInfo).ExceptionRecord);
713let code = rec.ExceptionCode;
714715if code == c::EXCEPTION_STACK_OVERFLOW {
716crate::thread::with_current_name(|name| {
717let name = name.unwrap_or("<unknown>");
718let tid = crate::thread::current_os_id();
719rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
720 });
721 }
722 c::EXCEPTION_CONTINUE_SEARCH
723 }
724 }
725726pub unsafe fn init() {
727// SAFETY: `vectored_handler` has the correct ABI and is safe to call during exception handling.
728unsafe {
729let result = c::AddVectoredExceptionHandler(0, Some(vectored_handler));
730// Similar to the above, adding the stack overflow handler is allowed to fail
731 // but a debug assert is used so CI will still test that it normally works.
732debug_assert!(!result.is_null(), "failed to install exception handler");
733 }
734// Set the thread stack guarantee for the main thread.
735reserve_stack();
736 }
737738pub unsafe fn cleanup() {}
739740pub unsafe fn make_handler(main_thread: bool) -> super::Handler {
741if !main_thread {
742 reserve_stack();
743 }
744super::Handler::null()
745 }
746747pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
748}