Skip to main content

std/sys/thread/
unix.rs

1#[cfg(not(any(
2    target_env = "newlib",
3    target_os = "l4re",
4    target_os = "emscripten",
5    target_os = "redox",
6    target_os = "hurd",
7    target_os = "aix",
8    target_os = "wasi",
9)))]
10use crate::ffi::CStr;
11use crate::mem::{self, DropGuard, ManuallyDrop};
12use crate::num::NonZero;
13#[cfg(all(target_os = "linux", target_env = "gnu"))]
14use crate::sys::weak::dlsym;
15#[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto",))]
16use crate::sys::weak::weak;
17use crate::thread::ThreadInit;
18use crate::time::Duration;
19use crate::{cmp, io, ptr, sys};
20#[cfg(not(any(
21    target_os = "l4re",
22    target_os = "vxworks",
23    target_os = "espidf",
24    target_os = "nuttx"
25)))]
26pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
27#[cfg(target_os = "l4re")]
28pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024;
29#[cfg(target_os = "vxworks")]
30pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024;
31#[cfg(any(target_os = "espidf", target_os = "nuttx"))]
32pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF/NuttX menuconfig system should be used
33
34pub struct Thread {
35    id: libc::pthread_t,
36}
37
38// Some platforms may have pthread_t as a pointer in which case we still want
39// a thread to be Send/Sync
40unsafe impl Send for Thread {}
41unsafe impl Sync for Thread {}
42
43impl Thread {
44    // unsafe: see thread::Builder::spawn_unchecked for safety requirements
45    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
46    pub unsafe fn new(stack: usize, init: Box<ThreadInit>) -> io::Result<Thread> {
47        let data = init;
48        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
49        match (&libc::pthread_attr_init(attr.as_mut_ptr()), &0) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
50        let mut attr = DropGuard::new(&mut attr, |attr| {
51            match (&libc::pthread_attr_destroy(attr.as_mut_ptr()), &0) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
}assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0)
52        });
53
54        #[cfg(any(target_os = "espidf", target_os = "nuttx"))]
55        if stack > 0 {
56            // Only set the stack if a non-zero value is passed
57            // 0 is used as an indication that the default stack size configured in the ESP-IDF/NuttX menuconfig system should be used
58            assert_eq!(
59                libc::pthread_attr_setstacksize(
60                    attr.as_mut_ptr(),
61                    cmp::max(stack, min_stack_size(attr.as_ptr()))
62                ),
63                0
64            );
65        }
66
67        #[cfg(not(any(target_os = "espidf", target_os = "nuttx")))]
68        {
69            let stack_size = cmp::max(stack, min_stack_size(attr.as_ptr()));
70
71            match libc::pthread_attr_setstacksize(attr.as_mut_ptr(), stack_size) {
72                0 => {}
73                n => {
74                    match (&n, &libc::EINVAL) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(n, libc::EINVAL);
75                    // EINVAL means |stack_size| is either too small or not a
76                    // multiple of the system page size. Because it's definitely
77                    // >= PTHREAD_STACK_MIN, it must be an alignment issue.
78                    // Round up to the nearest page and try again.
79                    let page_size = sys::pal::conf::page_size();
80                    let stack_size =
81                        (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
82
83                    // Some libc implementations, e.g. musl, place an upper bound
84                    // on the stack size, in which case we can only gracefully return
85                    // an error here.
86                    if libc::pthread_attr_setstacksize(attr.as_mut_ptr(), stack_size) != 0 {
87                        return Err(crate::hint::must_use(crate::io::Error::from_static_message(const {
                &crate::io::SimpleMessage {
                        kind: io::ErrorKind::InvalidInput,
                        message: "invalid stack size",
                    }
            }))io::const_error!(
88                            io::ErrorKind::InvalidInput,
89                            "invalid stack size"
90                        ));
91                    }
92                }
93            };
94        }
95
96        let data = Box::into_raw(data);
97        let mut native: libc::pthread_t = mem::zeroed();
98        let ret = libc::pthread_create(&mut native, attr.as_ptr(), thread_start, data as *mut _);
99        return if ret == 0 {
100            Ok(Thread { id: native })
101        } else {
102            // The thread failed to start and as a result `data` was not consumed.
103            // Therefore, it is safe to reconstruct the box so that it gets deallocated.
104            drop(Box::from_raw(data));
105            Err(io::Error::from_raw_os_error(ret))
106        };
107
108        extern "C" fn thread_start(data: *mut libc::c_void) -> *mut libc::c_void {
109            unsafe {
110                // SAFETY: we are simply recreating the box that was leaked earlier.
111                let init = Box::from_raw(data as *mut ThreadInit);
112                let rust_start = init.init();
113
114                // Now that the thread information is set, set up our stack
115                // overflow handler.
116                let _handler = sys::stack_overflow::Handler::new();
117
118                rust_start();
119            }
120            ptr::null_mut()
121        }
122    }
123
124    pub fn join(self) {
125        let id = self.into_id();
126        let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) };
127        if !(ret == 0) {
    {
        ::core::panicking::panic_fmt(format_args!("failed to join thread: {0}",
                io::Error::from_raw_os_error(ret)));
    }
};assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
128    }
129
130    #[cfg(not(target_os = "wasi"))]
131    pub fn id(&self) -> libc::pthread_t {
132        self.id
133    }
134
135    pub fn into_id(self) -> libc::pthread_t {
136        ManuallyDrop::new(self).id
137    }
138}
139
140impl Drop for Thread {
141    fn drop(&mut self) {
142        let ret = unsafe { libc::pthread_detach(self.id) };
143        if true {
    match (&ret, &0) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(ret, 0);
144    }
145}
146
147pub fn available_parallelism() -> io::Result<NonZero<usize>> {
148    cfg_select! {
149        any(
150            target_os = "android",
151            target_os = "emscripten",
152            target_os = "fuchsia",
153            target_os = "hurd",
154            target_os = "linux",
155            target_os = "aix",
156            target_vendor = "apple",
157            target_os = "cygwin",
158            target_os = "wasi",
159        ) => {
160            #[allow(unused_assignments)]
161            #[allow(unused_mut)]
162            let mut quota = usize::MAX;
163
164            #[cfg(any(target_os = "android", target_os = "linux"))]
165            {
166                quota = cgroups::quota().max(1);
167                let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
168                unsafe {
169                    if libc::sched_getaffinity(0, size_of::<libc::cpu_set_t>(), &mut set) == 0 {
170                        let count = libc::CPU_COUNT(&set) as usize;
171                        let count = count.min(quota);
172
173                        // According to sched_getaffinity's API it should always be non-zero, but
174                        // some old MIPS kernels were buggy and zero-initialized the mask if
175                        // none was explicitly set.
176                        // In that case we use the sysconf fallback.
177                        if let Some(count) = NonZero::new(count) {
178                            return Ok(count)
179                        }
180                    }
181                }
182            }
183            match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
184                -1 => Err(io::Error::last_os_error()),
185                0 => Err(io::Error::UNKNOWN_THREAD_COUNT),
186                cpus => {
187                    let count = cpus as usize;
188                    // Cover the unusual situation where we were able to get the quota but not the affinity mask
189                    let count = count.min(quota);
190                    Ok(unsafe { NonZero::new_unchecked(count) })
191                }
192            }
193        }
194        any(
195           target_os = "freebsd",
196           target_os = "dragonfly",
197           target_os = "openbsd",
198           target_os = "netbsd",
199        ) => {
200            use crate::ptr;
201
202            #[cfg(target_os = "freebsd")]
203            {
204                let mut set: libc::cpuset_t = unsafe { mem::zeroed() };
205                unsafe {
206                    if libc::cpuset_getaffinity(
207                        libc::CPU_LEVEL_WHICH,
208                        libc::CPU_WHICH_PID,
209                        -1,
210                        size_of::<libc::cpuset_t>(),
211                        &mut set,
212                    ) == 0 {
213                        let count = libc::CPU_COUNT(&set) as usize;
214                        if count > 0 {
215                            return Ok(NonZero::new_unchecked(count));
216                        }
217                    }
218                }
219            }
220
221            #[cfg(target_os = "netbsd")]
222            {
223                unsafe {
224                    let set = libc::_cpuset_create();
225                    if !set.is_null() {
226                        let mut count: usize = 0;
227                        if libc::pthread_getaffinity_np(libc::pthread_self(), libc::_cpuset_size(set), set) == 0 {
228                            for i in 0..libc::cpuid_t::MAX {
229                                match libc::_cpuset_isset(i, set) {
230                                    -1 => break,
231                                    0 => continue,
232                                    _ => count = count + 1,
233                                }
234                            }
235                        }
236                        libc::_cpuset_destroy(set);
237                        if let Some(count) = NonZero::new(count) {
238                            return Ok(count);
239                        }
240                    }
241                }
242            }
243
244            let mut cpus: libc::c_uint = 0;
245            let mut cpus_size = size_of_val(&cpus);
246
247            unsafe {
248                cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
249            }
250
251            // Fallback approach in case of errors or no hardware threads.
252            if cpus < 1 {
253                let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
254                let res = unsafe {
255                    libc::sysctl(
256                        mib.as_mut_ptr(),
257                        2,
258                        (&raw mut cpus) as *mut _,
259                        (&raw mut cpus_size) as *mut _,
260                        ptr::null_mut(),
261                        0,
262                    )
263                };
264
265                // Handle errors if any.
266                if res == -1 {
267                    return Err(io::Error::last_os_error());
268                } else if cpus == 0 {
269                    return Err(io::Error::UNKNOWN_THREAD_COUNT);
270                }
271            }
272
273            Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
274        }
275        target_os = "nto" => {
276            unsafe {
277                use libc::_syspage_ptr;
278                if _syspage_ptr.is_null() {
279                    Err(io::const_error!(io::ErrorKind::NotFound, "no syspage available"))
280                } else {
281                    let cpus = (*_syspage_ptr).num_cpu;
282                    NonZero::new(cpus as usize)
283                        .ok_or(io::Error::UNKNOWN_THREAD_COUNT)
284                }
285            }
286        }
287        any(target_os = "solaris", target_os = "illumos") => {
288            let mut cpus = 0u32;
289            if unsafe { libc::pset_info(libc::PS_MYID, core::ptr::null_mut(), &mut cpus, core::ptr::null_mut()) } != 0 {
290                return Err(io::Error::UNKNOWN_THREAD_COUNT);
291            }
292            Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
293        }
294        target_os = "haiku" => {
295            // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus`
296            // `get_system_info` calls then `smp_get_num_cpus`
297            unsafe {
298                let mut sinfo: libc::system_info = crate::mem::zeroed();
299                let res = libc::get_system_info(&mut sinfo);
300
301                if res != libc::B_OK {
302                    return Err(io::Error::UNKNOWN_THREAD_COUNT);
303                }
304
305                Ok(NonZero::new_unchecked(sinfo.cpu_count as usize))
306            }
307        }
308        target_os = "vxworks" => {
309            // Note: there is also `vxCpuConfiguredGet`, closer to _SC_NPROCESSORS_CONF
310            // expectations than the actual cores availability.
311
312            // SAFETY: `vxCpuEnabledGet` always fetches a mask with at least one bit set
313            unsafe{
314                let set = libc::vxCpuEnabledGet();
315                Ok(NonZero::new_unchecked(set.count_ones() as usize))
316            }
317        }
318        _ => {
319            // FIXME: implement on Redox, l4re
320            Err(io::const_error!(io::ErrorKind::Unsupported, "getting the number of hardware threads is not supported on the target platform"))
321        }
322    }
323}
324
325pub fn current_os_id() -> Option<u64> {
326    // Most Unix platforms have a way to query an integer ID of the current thread, all with
327    // slightly different spellings.
328    //
329    // The OS thread ID is used rather than `pthread_self` so as to match what will be displayed
330    // for process inspection (debuggers, trace, `top`, etc.).
331    cfg_select! {
332        // Most platforms have a function returning a `pid_t` or int, which is an `i32`.
333        any(target_os = "android", target_os = "linux") => {
334            use crate::sys::pal::weak::syscall;
335
336            // `libc::gettid` is only available on glibc 2.30+, but the syscall is available
337            // since Linux 2.4.11.
338            unsafe fn gettid() -> libc::pid_t {
    let ref gettid: ExternWeak<unsafe extern "C" fn() -> libc::pid_t> =
        {
            unsafe extern "C" {
                #[linkage = "extern_weak"]
                static gettid: Option<unsafe extern "C" fn() -> libc::pid_t>;
            }

            #[allow(unused_unsafe)]
            ExternWeak::new(unsafe { gettid })
        };
    if let Some(fun) = gettid.get() {
        unsafe { fun() }
    } else { unsafe { libc::syscall(libc::SYS_gettid) as libc::pid_t } }
}syscall!(fn gettid() -> libc::pid_t;);
339
340            // SAFETY: FFI call with no preconditions.
341            let id: libc::pid_t = unsafe { gettid() };
342            Some(id as u64)
343        }
344        target_os = "nto" => {
345            // SAFETY: FFI call with no preconditions.
346            let id: libc::pid_t = unsafe { libc::gettid() };
347            Some(id as u64)
348        }
349        target_os = "openbsd" => {
350            // SAFETY: FFI call with no preconditions.
351            let id: libc::pid_t = unsafe { libc::getthrid() };
352            Some(id as u64)
353        }
354        target_os = "freebsd" => {
355            // SAFETY: FFI call with no preconditions.
356            let id: libc::c_int = unsafe { libc::pthread_getthreadid_np() };
357            Some(id as u64)
358        }
359        target_os = "netbsd" => {
360            // SAFETY: FFI call with no preconditions.
361            let id: libc::lwpid_t = unsafe { libc::_lwp_self() };
362            Some(id as u64)
363        }
364        any(target_os = "illumos", target_os = "solaris") => {
365            // On Illumos and Solaris, the `pthread_t` is the same as the OS thread ID.
366            // SAFETY: FFI call with no preconditions.
367            let id: libc::pthread_t = unsafe { libc::pthread_self() };
368            Some(id as u64)
369        }
370        target_vendor = "apple" => {
371            // Apple allows querying arbitrary thread IDs, `thread=NULL` queries the current thread.
372            let mut id = 0u64;
373            // SAFETY: `thread_id` is a valid pointer, no other preconditions.
374            let status: libc::c_int = unsafe { libc::pthread_threadid_np(0, &mut id) };
375            if status == 0 {
376                Some(id)
377            } else {
378                None
379            }
380        }
381        // Other platforms don't have an OS thread ID or don't have a way to access it.
382        _ => None,
383    }
384}
385
386#[cfg(any(
387    target_os = "linux",
388    target_os = "nto",
389    target_os = "solaris",
390    target_os = "illumos",
391    target_os = "vxworks",
392    target_os = "cygwin",
393    target_vendor = "apple",
394))]
395fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] {
396    let mut result = [0; MAX_WITH_NUL];
397    for (src, dst) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) {
398        *dst = *src as libc::c_char;
399    }
400    result
401}
402
403#[cfg(target_os = "android")]
404pub fn set_name(name: &CStr) {
405    const PR_SET_NAME: libc::c_int = 15;
406    unsafe {
407        let res = libc::prctl(
408            PR_SET_NAME,
409            name.as_ptr(),
410            0 as libc::c_ulong,
411            0 as libc::c_ulong,
412            0 as libc::c_ulong,
413        );
414        // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
415        debug_assert_eq!(res, 0);
416    }
417}
418
419#[cfg(any(
420    target_os = "linux",
421    target_os = "freebsd",
422    target_os = "dragonfly",
423    target_os = "nuttx",
424    target_os = "cygwin"
425))]
426pub fn set_name(name: &CStr) {
427    unsafe {
428        cfg_select! {
429            any(target_os = "linux", target_os = "cygwin") => {
430                // Linux and Cygwin limits the allowed length of the name.
431                const TASK_COMM_LEN: usize = 16;
432                let name = truncate_cstr::<{ TASK_COMM_LEN }>(name);
433            }
434            _ => {
435                // FreeBSD, DragonFly BSD and NuttX do not enforce length limits.
436            }
437        };
438        // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20 for Linux,
439        // FreeBSD 12.2 and 13.0, and DragonFly BSD 6.0.
440        let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
441        // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
442        if true {
    match (&res, &0) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(res, 0);
443    }
444}
445
446#[cfg(target_os = "openbsd")]
447pub fn set_name(name: &CStr) {
448    unsafe {
449        libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
450    }
451}
452
453#[cfg(target_vendor = "apple")]
454pub fn set_name(name: &CStr) {
455    unsafe {
456        let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name);
457        let res = libc::pthread_setname_np(name.as_ptr());
458        // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
459        debug_assert_eq!(res, 0);
460    }
461}
462
463#[cfg(target_os = "netbsd")]
464pub fn set_name(name: &CStr) {
465    unsafe {
466        let res = libc::pthread_setname_np(
467            libc::pthread_self(),
468            c"%s".as_ptr(),
469            name.as_ptr() as *mut libc::c_void,
470        );
471        debug_assert_eq!(res, 0);
472    }
473}
474
475#[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))]
476pub fn set_name(name: &CStr) {
477    weak!(
478        fn pthread_setname_np(thread: libc::pthread_t, name: *const libc::c_char) -> libc::c_int;
479    );
480
481    if let Some(f) = pthread_setname_np.get() {
482        #[cfg(target_os = "nto")]
483        const THREAD_NAME_MAX: usize = libc::_NTO_THREAD_NAME_MAX as usize;
484        #[cfg(any(target_os = "solaris", target_os = "illumos"))]
485        const THREAD_NAME_MAX: usize = 32;
486
487        let name = truncate_cstr::<{ THREAD_NAME_MAX }>(name);
488        let res = unsafe { f(libc::pthread_self(), name.as_ptr()) };
489        debug_assert_eq!(res, 0);
490    }
491}
492
493#[cfg(target_os = "fuchsia")]
494pub fn set_name(name: &CStr) {
495    use crate::sys::pal::fuchsia::*;
496    unsafe {
497        zx_object_set_property(
498            zx_thread_self(),
499            ZX_PROP_NAME,
500            name.as_ptr() as *const libc::c_void,
501            name.to_bytes().len(),
502        );
503    }
504}
505
506#[cfg(target_os = "haiku")]
507pub fn set_name(name: &CStr) {
508    unsafe {
509        let thread_self = libc::find_thread(ptr::null_mut());
510        let res = libc::rename_thread(thread_self, name.as_ptr());
511        // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
512        debug_assert_eq!(res, libc::B_OK);
513    }
514}
515
516#[cfg(target_os = "vxworks")]
517pub fn set_name(name: &CStr) {
518    let mut name = truncate_cstr::<{ (libc::VX_TASK_RENAME_LENGTH - 1) as usize }>(name);
519    let res = unsafe { libc::taskNameSet(libc::taskIdSelf(), name.as_mut_ptr()) };
520    debug_assert_eq!(res, libc::OK);
521}
522
523#[cfg(not(target_os = "espidf"))]
524pub fn sleep(dur: Duration) {
525    cfg_select! {
526        // Any unix that has clock_nanosleep
527        // If this list changes update the MIRI chock_nanosleep shim
528        any(
529            target_os = "freebsd",
530            target_os = "netbsd",
531            target_os = "linux",
532            target_os = "android",
533            target_os = "solaris",
534            target_os = "illumos",
535            target_os = "dragonfly",
536            target_os = "hurd",
537            target_os = "vxworks",
538            target_os = "wasi",
539        ) => {
540            // POSIX specifies that `nanosleep` uses CLOCK_REALTIME, but is not
541            // affected by clock adjustments. The timing of `sleep` however should
542            // be tied to `Instant` where possible. Thus, we use `clock_nanosleep`
543            // with a relative time interval instead, which allows explicitly
544            // specifying the clock.
545            //
546            // In practice, most systems (like e.g. Linux) actually use
547            // CLOCK_MONOTONIC for `nanosleep` anyway, but others like FreeBSD don't
548            // so it's better to be safe.
549            //
550            // wasi-libc prior to WebAssembly/wasi-libc#696 has a broken implementation
551            // of `nanosleep` which used `CLOCK_REALTIME` even though it is unsupported
552            // on WASIp2. Using `clock_nanosleep` directly bypasses the issue.
553            unsafe fn nanosleep(rqtp: *const libc::timespec, rmtp: *mut libc::timespec) -> libc::c_int {
554                unsafe { libc::clock_nanosleep(crate::sys::time::Instant::CLOCK_ID, 0, rqtp, rmtp) }
555            }
556        }
557        _ => {
558            unsafe fn nanosleep(rqtp: *const libc::timespec, rmtp: *mut libc::timespec) -> libc::c_int {
559                let r = unsafe { libc::nanosleep(rqtp, rmtp) };
560                // `clock_nanosleep` returns the error number directly, so mimic
561                // that behaviour to make the shared code below simpler.
562                if r == 0 { 0 } else { sys::io::errno() }
563            }
564        }
565    }
566
567    let mut secs = dur.as_secs();
568    let mut nsecs = dur.subsec_nanos() as _;
569
570    // If we're awoken with a signal then the return value will be -1 and
571    // nanosleep will fill in `ts` with the remaining time.
572    unsafe {
573        while secs > 0 || nsecs > 0 {
574            let mut ts = libc::timespec::default();
575            ts.tv_sec = cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t;
576            ts.tv_nsec = nsecs;
577
578            secs -= ts.tv_sec as u64;
579            let ts_ptr = &raw mut ts;
580            let r = nanosleep(ts_ptr, ts_ptr);
581            if r != 0 {
582                match (&r, &libc::EINTR) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(r, libc::EINTR);
583                secs += ts.tv_sec as u64;
584                nsecs = ts.tv_nsec;
585            } else {
586                nsecs = 0;
587            }
588        }
589    }
590}
591
592#[cfg(target_os = "espidf")]
593pub fn sleep(dur: Duration) {
594    // ESP-IDF does not have `nanosleep`, so we use `usleep` instead.
595    // As per the documentation of `usleep`, it is expected to support
596    // sleep times as big as at least up to 1 second.
597    //
598    // ESP-IDF does support almost up to `u32::MAX`, but due to a potential integer overflow in its
599    // `usleep` implementation
600    // (https://github.com/espressif/esp-idf/blob/d7ca8b94c852052e3bc33292287ef4dd62c9eeb1/components/newlib/time.c#L210),
601    // we limit the sleep time to the maximum one that would not cause the underlying `usleep` implementation to overflow
602    // (`portTICK_PERIOD_MS` can be anything between 1 to 1000, and is 10 by default).
603    const MAX_MICROS: u32 = u32::MAX - 1_000_000 - 1;
604
605    // Add any nanoseconds smaller than a microsecond as an extra microsecond
606    // so as to comply with the `std::thread::sleep` contract which mandates
607    // implementations to sleep for _at least_ the provided `dur`.
608    // We can't overflow `micros` as it is a `u128`, while `Duration` is a pair of
609    // (`u64` secs, `u32` nanos), where the nanos are strictly smaller than 1 second
610    // (i.e. < 1_000_000_000)
611    let mut micros = dur.as_micros() + if dur.subsec_nanos() % 1_000 > 0 { 1 } else { 0 };
612
613    while micros > 0 {
614        let st = if micros > MAX_MICROS as u128 { MAX_MICROS } else { micros as u32 };
615        unsafe {
616            libc::usleep(st);
617        }
618
619        micros -= st as u128;
620    }
621}
622
623// Any unix that has clock_nanosleep
624// If this list changes update the MIRI chock_nanosleep shim
625#[cfg(any(
626    target_os = "freebsd",
627    target_os = "netbsd",
628    target_os = "linux",
629    target_os = "android",
630    target_os = "solaris",
631    target_os = "illumos",
632    target_os = "dragonfly",
633    target_os = "hurd",
634    target_os = "vxworks",
635    target_os = "wasi",
636))]
637pub fn sleep_until(deadline: crate::time::Instant) {
638    use crate::time::Instant;
639
640    #[cfg(all(
641        target_os = "linux",
642        target_env = "gnu",
643        target_pointer_width = "32",
644        not(target_arch = "riscv32")
645    ))]
646    {
647        use crate::sys::pal::time::__timespec64;
648        use crate::sys::pal::weak::weak;
649
650        // This got added in glibc 2.31, along with a 64-bit `clock_gettime`
651        // function.
652        weak! {
653            fn __clock_nanosleep_time64(
654                clock_id: libc::clockid_t,
655                flags: libc::c_int,
656                req: *const __timespec64,
657                rem: *mut __timespec64,
658            ) -> libc::c_int;
659        }
660
661        if let Some(clock_nanosleep) = __clock_nanosleep_time64.get() {
662            let ts = deadline.into_inner().into_timespec().to_timespec64();
663            loop {
664                let r = unsafe {
665                    clock_nanosleep(
666                        crate::sys::time::Instant::CLOCK_ID,
667                        libc::TIMER_ABSTIME,
668                        &ts,
669                        core::ptr::null_mut(),
670                    )
671                };
672
673                match r {
674                    0 => return,
675                    libc::EINTR => continue,
676                    // If the underlying kernel doesn't support the 64-bit
677                    // syscall, `__clock_nanosleep_time64` will fail. The
678                    // error code nowadays is EOVERFLOW, but it used to be
679                    // ENOSYS – so just don't rely on any particular value.
680                    // The parameters are all valid, so the only reasons
681                    // why the call might fail are EINTR and the call not
682                    // being supported. Fall through to the clamping version
683                    // in that case.
684                    _ => break,
685                }
686            }
687        }
688    }
689
690    let Some(ts) = deadline.into_inner().into_timespec().to_timespec() else {
691        // The deadline is further in the future then can be passed to
692        // clock_nanosleep. We have to use Self::sleep instead. This might
693        // happen on 32 bit platforms, especially closer to 2038.
694        let now = Instant::now();
695        if let Some(delay) = deadline.checked_duration_since(now) {
696            sleep(delay);
697        }
698        return;
699    };
700
701    unsafe {
702        // When we get interrupted (res = EINTR) call clock_nanosleep again
703        loop {
704            let res = libc::clock_nanosleep(
705                crate::sys::time::Instant::CLOCK_ID,
706                libc::TIMER_ABSTIME,
707                &ts,
708                core::ptr::null_mut(), // not required with TIMER_ABSTIME
709            );
710
711            if res == 0 {
712                break;
713            } else {
714                match (&res, &libc::EINTR) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::Some(format_args!("timespec is in range,\n                         clockid is valid and kernel should support it")));
        }
    }
};assert_eq!(
715                    res,
716                    libc::EINTR,
717                    "timespec is in range,
718                         clockid is valid and kernel should support it"
719                );
720            }
721        }
722    }
723}
724
725#[cfg(target_vendor = "apple")]
726pub fn sleep_until(deadline: crate::time::Instant) {
727    unsafe extern "C" {
728        // This is defined in the public header mach/mach_time.h alongside
729        // `mach_absolute_time`, and like it has been available since the very
730        // beginning.
731        //
732        // There isn't really any documentation on this function, except for a
733        // short reference in technical note 2169:
734        // https://developer.apple.com/library/archive/technotes/tn2169/_index.html
735        safe fn mach_wait_until(deadline: u64) -> libc::kern_return_t;
736    }
737
738    // Make sure to round up to ensure that we definitely sleep until after
739    // the deadline has elapsed.
740    let Some(deadline) = deadline.into_inner().into_mach_absolute_time_ceil() else {
741        // Since the deadline is before the system boot time, it has already
742        // passed, so we can return immediately.
743        return;
744    };
745
746    // If the deadline is not representable, then sleep for the maximum duration
747    // possible and worry about the potential clock issues later (in ca. 600 years).
748    let deadline = deadline.try_into().unwrap_or(u64::MAX);
749    loop {
750        match mach_wait_until(deadline) {
751            // Success! The deadline has passed.
752            libc::KERN_SUCCESS => break,
753            // If the sleep gets interrupted by a signal, `mach_wait_until`
754            // returns KERN_ABORTED, so we need to restart the syscall.
755            // Also see Apple's implementation of the POSIX `nanosleep`, which
756            // converts this error to the POSIX equivalent EINTR:
757            // https://github.com/apple-oss-distributions/Libc/blob/55b54c0a0c37b3b24393b42b90a4c561d6c606b1/gen/nanosleep.c#L281-L306
758            libc::KERN_ABORTED => continue,
759            // All other errors indicate that something has gone wrong...
760            error => {
761                let description = unsafe { CStr::from_ptr(libc::mach_error_string(error)) };
762                panic!("mach_wait_until failed: {} (code {error})", description.display())
763            }
764        }
765    }
766}
767
768pub fn yield_now() {
769    let ret = unsafe { libc::sched_yield() };
770    if true {
    match (&ret, &0) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(ret, 0);
771}
772
773#[cfg(any(target_os = "android", target_os = "linux"))]
774mod cgroups {
775    //! Currently not covered
776    //! * cgroup v2 in non-standard mountpoints
777    //! * paths containing control characters or spaces, since those would be escaped in procfs
778    //!   output and we don't unescape
779
780    use crate::borrow::Cow;
781    use crate::ffi::OsString;
782    use crate::fs::{File, exists};
783    use crate::io::{BufRead, Read};
784    use crate::os::unix::ffi::OsStringExt;
785    use crate::path::{Path, PathBuf};
786    use crate::str::from_utf8;
787
788    #[derive(#[automatically_derived]
impl ::core::cmp::PartialEq for Cgroup {
    #[inline]
    fn eq(&self, other: &Cgroup) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq)]
789    enum Cgroup {
790        V1,
791        V2,
792    }
793
794    /// Returns cgroup CPU quota in core-equivalents, rounded down or usize::MAX if the quota cannot
795    /// be determined or is not set.
796    pub(super) fn quota() -> usize {
797        let mut quota = usize::MAX;
798        if falsecfg!(miri) {
799            // Attempting to open a file fails under default flags due to isolation.
800            // And Miri does not have parallelism anyway.
801            return quota;
802        }
803
804        let _: Option<()> = try {
805            let mut buf = Vec::with_capacity(128);
806            // find our place in the cgroup hierarchy
807            File::open("/proc/self/cgroup").ok()?.read_to_end(&mut buf).ok()?;
808            let (cgroup_path, version) =
809                buf.split(|&c| c == b'\n').fold(None, |previous, line| {
810                    let mut fields = line.splitn(3, |&c| c == b':');
811                    // 2nd field is a list of controllers for v1 or empty for v2
812                    let version = match fields.nth(1) {
813                        Some(b"") => Cgroup::V2,
814                        Some(controllers)
815                            if from_utf8(controllers)
816                                .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
817                        {
818                            Cgroup::V1
819                        }
820                        _ => return previous,
821                    };
822
823                    // already-found v1 trumps v2 since it explicitly specifies its controllers
824                    if previous.is_some() && version == Cgroup::V2 {
825                        return previous;
826                    }
827
828                    let path = fields.last()?;
829                    // skip leading slash
830                    Some((path[1..].to_owned(), version))
831                })?;
832            let cgroup_path = PathBuf::from(OsString::from_vec(cgroup_path));
833
834            quota = match version {
835                Cgroup::V1 => quota_v1(cgroup_path),
836                Cgroup::V2 => quota_v2(cgroup_path),
837            };
838        };
839
840        quota
841    }
842
843    fn quota_v2(group_path: PathBuf) -> usize {
844        let mut quota = usize::MAX;
845
846        let mut path = PathBuf::with_capacity(128);
847        let mut read_buf = String::with_capacity(20);
848
849        // standard mount location defined in file-hierarchy(7) manpage
850        let cgroup_mount = "/sys/fs/cgroup";
851
852        path.push(cgroup_mount);
853        path.push(&group_path);
854
855        path.push("cgroup.controllers");
856
857        // skip if we're not looking at cgroup2
858        if #[allow(non_exhaustive_omitted_patterns)] match exists(&path) {
    Err(_) | Ok(false) => true,
    _ => false,
}matches!(exists(&path), Err(_) | Ok(false)) {
859            return usize::MAX;
860        };
861
862        path.pop();
863
864        let _: Option<()> = try {
865            while path.starts_with(cgroup_mount) {
866                path.push("cpu.max");
867
868                read_buf.clear();
869
870                if File::open(&path).and_then(|mut f| f.read_to_string(&mut read_buf)).is_ok() {
871                    let raw_quota = read_buf.lines().next()?;
872                    let mut raw_quota = raw_quota.split(' ');
873                    let limit = raw_quota.next()?;
874                    let period = raw_quota.next()?;
875                    match (limit.parse::<usize>(), period.parse::<usize>()) {
876                        (Ok(limit), Ok(period)) if period > 0 => {
877                            quota = quota.min(limit / period);
878                        }
879                        _ => {}
880                    }
881                }
882
883                path.pop(); // pop filename
884                path.pop(); // pop dir
885            }
886        };
887
888        quota
889    }
890
891    fn quota_v1(group_path: PathBuf) -> usize {
892        let mut quota = usize::MAX;
893        let mut path = PathBuf::with_capacity(128);
894        let mut read_buf = String::with_capacity(20);
895
896        // Hardcode commonly used locations mentioned in the cgroups(7) manpage
897        // if that doesn't work scan mountinfo and adjust `group_path` for bind-mounts
898        let mounts: &[fn(&Path) -> Option<(_, &Path)>] = &[
899            |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu"), p)),
900            |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu,cpuacct"), p)),
901            // this can be expensive on systems with tons of mountpoints
902            // but we only get to this point when /proc/self/cgroups explicitly indicated
903            // this process belongs to a cpu-controller cgroup v1 and the defaults didn't work
904            find_mountpoint,
905        ];
906
907        for mount in mounts {
908            let Some((mount, group_path)) = mount(&group_path) else { continue };
909
910            path.clear();
911            path.push(mount.as_ref());
912            path.push(&group_path);
913
914            // skip if we guessed the mount incorrectly
915            if #[allow(non_exhaustive_omitted_patterns)] match exists(&path) {
    Err(_) | Ok(false) => true,
    _ => false,
}matches!(exists(&path), Err(_) | Ok(false)) {
916                continue;
917            }
918
919            while path.starts_with(mount.as_ref()) {
920                let mut parse_file = |name| {
921                    path.push(name);
922                    read_buf.clear();
923
924                    let f = File::open(&path);
925                    path.pop(); // restore buffer before any early returns
926                    f.ok()?.read_to_string(&mut read_buf).ok()?;
927                    let parsed = read_buf.trim().parse::<usize>().ok()?;
928
929                    Some(parsed)
930                };
931
932                let limit = parse_file("cpu.cfs_quota_us");
933                let period = parse_file("cpu.cfs_period_us");
934
935                match (limit, period) {
936                    (Some(limit), Some(period)) if period > 0 => quota = quota.min(limit / period),
937                    _ => {}
938                }
939
940                path.pop();
941            }
942
943            // we passed the try_exists above so we should have traversed the correct hierarchy
944            // when reaching this line
945            break;
946        }
947
948        quota
949    }
950
951    /// Scan mountinfo for cgroup v1 mountpoint with a cpu controller
952    ///
953    /// If the cgroupfs is a bind mount then `group_path` is adjusted to skip
954    /// over the already-included prefix
955    fn find_mountpoint(group_path: &Path) -> Option<(Cow<'static, str>, &Path)> {
956        let mut reader = File::open_buffered("/proc/self/mountinfo").ok()?;
957        let mut line = String::with_capacity(256);
958        loop {
959            line.clear();
960            if reader.read_line(&mut line).ok()? == 0 {
961                break;
962            }
963
964            let line = line.trim();
965            let mut items = line.split(' ');
966
967            let sub_path = items.nth(3)?;
968            let mount_point = items.next()?;
969            let mount_opts = items.next_back()?;
970            let filesystem_type = items.nth_back(1)?;
971
972            if filesystem_type != "cgroup" || !mount_opts.split(',').any(|opt| opt == "cpu") {
973                // not a cgroup / not a cpu-controller
974                continue;
975            }
976
977            let sub_path = Path::new(sub_path).strip_prefix("/").ok()?;
978
979            if !group_path.starts_with(sub_path) {
980                // this is a bind-mount and the bound subdirectory
981                // does not contain the cgroup this process belongs to
982                continue;
983            }
984
985            let trimmed_group_path = group_path.strip_prefix(sub_path).ok()?;
986
987            return Some((Cow::Owned(mount_point.to_owned()), trimmed_group_path));
988        }
989
990        None
991    }
992}
993
994// glibc >= 2.15 has a __pthread_get_minstack() function that returns
995// PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
996// We need that information to avoid blowing up when a small stack
997// is created in an application with big thread-local storage requirements.
998// See #6233 for rationale and details.
999#[cfg(all(target_os = "linux", target_env = "gnu"))]
1000unsafe fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
1001    // We use dlsym to avoid an ELF version dependency on GLIBC_PRIVATE. (#23628)
1002    // We shouldn't really be using such an internal symbol, but there's currently
1003    // no other way to account for the TLS size.
1004    static DLSYM:
    DlsymWeak<unsafe extern "C" fn(*const libc::pthread_attr_t)
        -> libc::size_t> =
    {
        let Ok(name) =
            CStr::from_bytes_with_nul("__pthread_get_minstack\u{0}".as_bytes()) else {
                {
                    ::core::panicking::panic_fmt(format_args!("symbol name may not contain NUL"));
                }
            };
        unsafe { DlsymWeak::new(name) }
    };
let __pthread_get_minstack = &DLSYM;dlsym!(
1005        fn __pthread_get_minstack(attr: *const libc::pthread_attr_t) -> libc::size_t;
1006    );
1007
1008    match __pthread_get_minstack.get() {
1009        None => libc::PTHREAD_STACK_MIN,
1010        Some(f) => unsafe { f(attr) },
1011    }
1012}
1013
1014// No point in looking up __pthread_get_minstack() on non-glibc platforms.
1015#[cfg(all(
1016    not(all(target_os = "linux", target_env = "gnu")),
1017    not(any(target_os = "netbsd", target_os = "nuttx"))
1018))]
1019unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
1020    libc::PTHREAD_STACK_MIN
1021}
1022
1023#[cfg(any(target_os = "netbsd", target_os = "nuttx"))]
1024unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
1025    static STACK: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
1026
1027    *STACK.get_or_init(|| {
1028        let mut stack = unsafe { libc::sysconf(libc::_SC_THREAD_STACK_MIN) };
1029        if stack < 0 {
1030            stack = 2048; // just a guess
1031        }
1032
1033        stack as usize
1034    })
1035}