1use crate::cmp;
2use crate::ffi::CStr;
3use crate::io;
4use crate::mem;
5use crate::num::NonZero;
6use crate::ptr;
7use crate::sys::{os, stack_overflow};
8use crate::time::Duration;
9
10#[cfg(all(target_os = "linux", target_env = "gnu"))]
11use crate::sys::weak::dlsym;
12#[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))]
13use crate::sys::weak::weak;
14#[cfg(not(any(target_os = "l4re", target_os = "vxworks", target_os = "espidf")))]
15pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
16#[cfg(target_os = "l4re")]
17pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024;
18#[cfg(target_os = "vxworks")]
19pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024;
20#[cfg(target_os = "espidf")]
21pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF menuconfig system should be used
22
23#[cfg(target_os = "fuchsia")]
24mod zircon {
25 type zx_handle_t = u32;
26 type zx_status_t = i32;
27 pub const ZX_PROP_NAME: u32 = 3;
28
29 extern "C" {
30 pub fn zx_object_set_property(
31 handle: zx_handle_t,
32 property: u32,
33 value: *const libc::c_void,
34 value_size: libc::size_t,
35 ) -> zx_status_t;
36 pub fn zx_thread_self() -> zx_handle_t;
37 }
38}
39
40pub struct Thread {
41 id: libc::pthread_t,
42}
43
44// Some platforms may have pthread_t as a pointer in which case we still want
45// a thread to be Send/Sync
46unsafe impl Send for Thread {}
47unsafe impl Sync for Thread {}
48
49impl Thread {
50 // unsafe: see thread::Builder::spawn_unchecked for safety requirements
51 pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
52 let p = Box::into_raw(Box::new(p));
53 let mut native: libc::pthread_t = mem::zeroed();
54 let mut attr: libc::pthread_attr_t = mem::zeroed();
55 assert_eq!(libc::pthread_attr_init(&mut attr), 0);
56
57 #[cfg(target_os = "espidf")]
58 if stack > 0 {
59 // Only set the stack if a non-zero value is passed
60 // 0 is used as an indication that the default stack size configured in the ESP-IDF menuconfig system should be used
61 assert_eq!(
62 libc::pthread_attr_setstacksize(&mut attr, cmp::max(stack, min_stack_size(&attr))),
63 0
64 );
65 }
66
67 #[cfg(not(target_os = "espidf"))]
68 {
69 let stack_size = cmp::max(stack, min_stack_size(&attr));
70
71 match libc::pthread_attr_setstacksize(&mut attr, stack_size) {
72 0 => {}
73 n => {
74 assert_eq!(n, libc::EINVAL);
75 // EINVAL means |stack_size| is either too small or not a
76 // multiple of the system page size. Because it's definitely
77 // >= PTHREAD_STACK_MIN, it must be an alignment issue.
78 // Round up to the nearest page and try again.
79 let page_size = os::page_size();
80 let stack_size =
81 (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
82 assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0);
83 }
84 };
85 }
86
87 let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _);
88 // Note: if the thread creation fails and this assert fails, then p will
89 // be leaked. However, an alternative design could cause double-free
90 // which is clearly worse.
91 assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
92
93 return if ret != 0 {
94 // The thread failed to start and as a result p was not consumed. Therefore, it is
95 // safe to reconstruct the box so that it gets deallocated.
96 drop(Box::from_raw(p));
97 Err(io::Error::from_raw_os_error(ret))
98 } else {
99 Ok(Thread { id: native })
100 };
101
102 extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
103 unsafe {
104 // Next, set up our stack overflow handler which may get triggered if we run
105 // out of stack.
106 let _handler = stack_overflow::Handler::new();
107 // Finally, let's run some code.
108 Box::from_raw(main as *mut Box<dyn FnOnce()>)();
109 }
110 ptr::null_mut()
111 }
112 }
113
114 pub fn yield_now() {
115 let ret = unsafe { libc::sched_yield() };
116 debug_assert_eq!(ret, 0);
117 }
118
119 #[cfg(target_os = "android")]
120 pub fn set_name(name: &CStr) {
121 const PR_SET_NAME: libc::c_int = 15;
122 unsafe {
123 libc::prctl(
124 PR_SET_NAME,
125 name.as_ptr(),
126 0 as libc::c_ulong,
127 0 as libc::c_ulong,
128 0 as libc::c_ulong,
129 );
130 }
131 }
132
133 #[cfg(target_os = "linux")]
134 pub fn set_name(name: &CStr) {
135 const TASK_COMM_LEN: usize = 16;
136
137 unsafe {
138 // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20.
139 let name = truncate_cstr::<{ TASK_COMM_LEN }>(name);
140 let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
141 // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
142 debug_assert_eq!(res, 0);
143 }
144 }
145
146 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))]
147 pub fn set_name(name: &CStr) {
148 unsafe {
149 libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
150 }
151 }
152
153 #[cfg(any(
154 target_os = "macos",
155 target_os = "ios",
156 target_os = "watchos",
157 target_os = "visionos",
158 target_os = "tvos"
159 ))]
160 pub fn set_name(name: &CStr) {
161 unsafe {
162 let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name);
163 let res = libc::pthread_setname_np(name.as_ptr());
164 // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
165 debug_assert_eq!(res, 0);
166 }
167 }
168
169 #[cfg(target_os = "netbsd")]
170 pub fn set_name(name: &CStr) {
171 unsafe {
172 let res = libc::pthread_setname_np(
173 libc::pthread_self(),
174 c"%s".as_ptr(),
175 name.as_ptr() as *mut libc::c_void,
176 );
177 debug_assert_eq!(res, 0);
178 }
179 }
180
181 #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))]
182 pub fn set_name(name: &CStr) {
183 weak! {
184 fn pthread_setname_np(
185 libc::pthread_t, *const libc::c_char
186 ) -> libc::c_int
187 }
188
189 if let Some(f) = pthread_setname_np.get() {
190 #[cfg(target_os = "nto")]
191 const THREAD_NAME_MAX: usize = libc::_NTO_THREAD_NAME_MAX as usize;
192 #[cfg(any(target_os = "solaris", target_os = "illumos"))]
193 const THREAD_NAME_MAX: usize = 32;
194
195 let name = truncate_cstr::<{ THREAD_NAME_MAX }>(name);
196 let res = unsafe { f(libc::pthread_self(), name.as_ptr()) };
197 debug_assert_eq!(res, 0);
198 }
199 }
200
201 #[cfg(target_os = "fuchsia")]
202 pub fn set_name(name: &CStr) {
203 use self::zircon::*;
204 unsafe {
205 zx_object_set_property(
206 zx_thread_self(),
207 ZX_PROP_NAME,
208 name.as_ptr() as *const libc::c_void,
209 name.to_bytes().len(),
210 );
211 }
212 }
213
214 #[cfg(target_os = "haiku")]
215 pub fn set_name(name: &CStr) {
216 unsafe {
217 let thread_self = libc::find_thread(ptr::null_mut());
218 let res = libc::rename_thread(thread_self, name.as_ptr());
219 // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
220 debug_assert_eq!(res, libc::B_OK);
221 }
222 }
223
224 #[cfg(any(
225 target_env = "newlib",
226 target_os = "l4re",
227 target_os = "emscripten",
228 target_os = "redox",
229 target_os = "vxworks",
230 target_os = "hurd",
231 target_os = "aix",
232 ))]
233 pub fn set_name(_name: &CStr) {
234 // Newlib, Emscripten, and VxWorks have no way to set a thread name.
235 }
236
237 #[cfg(not(target_os = "espidf"))]
238 pub fn sleep(dur: Duration) {
239 let mut secs = dur.as_secs();
240 let mut nsecs = dur.subsec_nanos() as _;
241
242 // If we're awoken with a signal then the return value will be -1 and
243 // nanosleep will fill in `ts` with the remaining time.
244 unsafe {
245 while secs > 0 || nsecs > 0 {
246 let mut ts = libc::timespec {
247 tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t,
248 tv_nsec: nsecs,
249 };
250 secs -= ts.tv_sec as u64;
251 let ts_ptr = core::ptr::addr_of_mut!(ts);
252 if libc::nanosleep(ts_ptr, ts_ptr) == -1 {
253 assert_eq!(os::errno(), libc::EINTR);
254 secs += ts.tv_sec as u64;
255 nsecs = ts.tv_nsec;
256 } else {
257 nsecs = 0;
258 }
259 }
260 }
261 }
262
263 #[cfg(target_os = "espidf")]
264 pub fn sleep(dur: Duration) {
265 let mut micros = dur.as_micros();
266 unsafe {
267 while micros > 0 {
268 let st = if micros > u32::MAX as u128 { u32::MAX } else { micros as u32 };
269 libc::usleep(st);
270
271 micros -= st as u128;
272 }
273 }
274 }
275
276 pub fn join(self) {
277 unsafe {
278 let ret = libc::pthread_join(self.id, ptr::null_mut());
279 mem::forget(self);
280 assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
281 }
282 }
283
284 pub fn id(&self) -> libc::pthread_t {
285 self.id
286 }
287
288 pub fn into_id(self) -> libc::pthread_t {
289 let id = self.id;
290 mem::forget(self);
291 id
292 }
293}
294
295impl Drop for Thread {
296 fn drop(&mut self) {
297 let ret = unsafe { libc::pthread_detach(self.id) };
298 debug_assert_eq!(ret, 0);
299 }
300}
301
302#[cfg(any(
303 target_os = "linux",
304 target_os = "macos",
305 target_os = "ios",
306 target_os = "tvos",
307 target_os = "watchos",
308 target_os = "visionos",
309 target_os = "nto",
310 target_os = "solaris",
311 target_os = "illumos",
312))]
313fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] {
314 let mut result: [i32; MAX_WITH_NUL] = [0; MAX_WITH_NUL];
315 for (src: &u8, dst: &mut i32) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) {
316 *dst = *src as libc::c_char;
317 }
318 result
319}
320
321pub fn available_parallelism() -> io::Result<NonZero<usize>> {
322 cfg_if::cfg_if! {
323 if #[cfg(any(
324 target_os = "android",
325 target_os = "emscripten",
326 target_os = "fuchsia",
327 target_os = "hurd",
328 target_os = "ios",
329 target_os = "tvos",
330 target_os = "linux",
331 target_os = "macos",
332 target_os = "aix",
333 ))] {
334 #[allow(unused_assignments)]
335 #[allow(unused_mut)]
336 let mut quota = usize::MAX;
337
338 #[cfg(any(target_os = "android", target_os = "linux"))]
339 {
340 quota = cgroups::quota().max(1);
341 let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
342 unsafe {
343 if libc::sched_getaffinity(0, mem::size_of::<libc::cpu_set_t>(), &mut set) == 0 {
344 let count = libc::CPU_COUNT(&set) as usize;
345 let count = count.min(quota);
346
347 // According to sched_getaffinity's API it should always be non-zero, but
348 // some old MIPS kernels were buggy and zero-initialized the mask if
349 // none was explicitly set.
350 // In that case we use the sysconf fallback.
351 if let Some(count) = NonZero::new(count) {
352 return Ok(count)
353 }
354 }
355 }
356 }
357 match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
358 -1 => Err(io::Error::last_os_error()),
359 0 => Err(io::Error::UNKNOWN_THREAD_COUNT),
360 cpus => {
361 let count = cpus as usize;
362 // Cover the unusual situation where we were able to get the quota but not the affinity mask
363 let count = count.min(quota);
364 Ok(unsafe { NonZero::new_unchecked(count) })
365 }
366 }
367 } else if #[cfg(any(
368 target_os = "freebsd",
369 target_os = "dragonfly",
370 target_os = "openbsd",
371 target_os = "netbsd",
372 ))] {
373 use crate::ptr;
374
375 #[cfg(target_os = "freebsd")]
376 {
377 let mut set: libc::cpuset_t = unsafe { mem::zeroed() };
378 unsafe {
379 if libc::cpuset_getaffinity(
380 libc::CPU_LEVEL_WHICH,
381 libc::CPU_WHICH_PID,
382 -1,
383 mem::size_of::<libc::cpuset_t>(),
384 &mut set,
385 ) == 0 {
386 let count = libc::CPU_COUNT(&set) as usize;
387 if count > 0 {
388 return Ok(NonZero::new_unchecked(count));
389 }
390 }
391 }
392 }
393
394 #[cfg(target_os = "netbsd")]
395 {
396 unsafe {
397 let set = libc::_cpuset_create();
398 if !set.is_null() {
399 let mut count: usize = 0;
400 if libc::pthread_getaffinity_np(libc::pthread_self(), libc::_cpuset_size(set), set) == 0 {
401 for i in 0..libc::cpuid_t::MAX {
402 match libc::_cpuset_isset(i, set) {
403 -1 => break,
404 0 => continue,
405 _ => count = count + 1,
406 }
407 }
408 }
409 libc::_cpuset_destroy(set);
410 if let Some(count) = NonZero::new(count) {
411 return Ok(count);
412 }
413 }
414 }
415 }
416
417 let mut cpus: libc::c_uint = 0;
418 let mut cpus_size = crate::mem::size_of_val(&cpus);
419
420 unsafe {
421 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
422 }
423
424 // Fallback approach in case of errors or no hardware threads.
425 if cpus < 1 {
426 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
427 let res = unsafe {
428 libc::sysctl(
429 mib.as_mut_ptr(),
430 2,
431 core::ptr::addr_of_mut!(cpus) as *mut _,
432 core::ptr::addr_of_mut!(cpus_size) as *mut _,
433 ptr::null_mut(),
434 0,
435 )
436 };
437
438 // Handle errors if any.
439 if res == -1 {
440 return Err(io::Error::last_os_error());
441 } else if cpus == 0 {
442 return Err(io::Error::UNKNOWN_THREAD_COUNT);
443 }
444 }
445
446 Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
447 } else if #[cfg(target_os = "nto")] {
448 unsafe {
449 use libc::_syspage_ptr;
450 if _syspage_ptr.is_null() {
451 Err(io::const_io_error!(io::ErrorKind::NotFound, "No syspage available"))
452 } else {
453 let cpus = (*_syspage_ptr).num_cpu;
454 NonZero::new(cpus as usize)
455 .ok_or(io::Error::UNKNOWN_THREAD_COUNT)
456 }
457 }
458 } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] {
459 let mut cpus = 0u32;
460 if unsafe { libc::pset_info(libc::PS_MYID, core::ptr::null_mut(), &mut cpus, core::ptr::null_mut()) } != 0 {
461 return Err(io::Error::UNKNOWN_THREAD_COUNT);
462 }
463 Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
464 } else if #[cfg(target_os = "haiku")] {
465 // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus`
466 // `get_system_info` calls then `smp_get_num_cpus`
467 unsafe {
468 let mut sinfo: libc::system_info = crate::mem::zeroed();
469 let res = libc::get_system_info(&mut sinfo);
470
471 if res != libc::B_OK {
472 return Err(io::Error::UNKNOWN_THREAD_COUNT);
473 }
474
475 Ok(NonZero::new_unchecked(sinfo.cpu_count as usize))
476 }
477 } else {
478 // FIXME: implement on vxWorks, Redox, l4re
479 Err(io::const_io_error!(io::ErrorKind::Unsupported, "Getting the number of hardware threads is not supported on the target platform"))
480 }
481 }
482}
483
484#[cfg(any(target_os = "android", target_os = "linux"))]
485mod cgroups {
486 //! Currently not covered
487 //! * cgroup v2 in non-standard mountpoints
488 //! * paths containing control characters or spaces, since those would be escaped in procfs
489 //! output and we don't unescape
490 use crate::borrow::Cow;
491 use crate::ffi::OsString;
492 use crate::fs::{try_exists, File};
493 use crate::io::Read;
494 use crate::io::{BufRead, BufReader};
495 use crate::os::unix::ffi::OsStringExt;
496 use crate::path::Path;
497 use crate::path::PathBuf;
498 use crate::str::from_utf8;
499
500 #[derive(PartialEq)]
501 enum Cgroup {
502 V1,
503 V2,
504 }
505
506 /// Returns cgroup CPU quota in core-equivalents, rounded down or usize::MAX if the quota cannot
507 /// be determined or is not set.
508 pub(super) fn quota() -> usize {
509 let mut quota = usize::MAX;
510 if cfg!(miri) {
511 // Attempting to open a file fails under default flags due to isolation.
512 // And Miri does not have parallelism anyway.
513 return quota;
514 }
515
516 let _: Option<()> = try {
517 let mut buf = Vec::with_capacity(128);
518 // find our place in the cgroup hierarchy
519 File::open("/proc/self/cgroup").ok()?.read_to_end(&mut buf).ok()?;
520 let (cgroup_path, version) =
521 buf.split(|&c| c == b'\n').fold(None, |previous, line| {
522 let mut fields = line.splitn(3, |&c| c == b':');
523 // 2nd field is a list of controllers for v1 or empty for v2
524 let version = match fields.nth(1) {
525 Some(b"") => Cgroup::V2,
526 Some(controllers)
527 if from_utf8(controllers)
528 .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
529 {
530 Cgroup::V1
531 }
532 _ => return previous,
533 };
534
535 // already-found v1 trumps v2 since it explicitly specifies its controllers
536 if previous.is_some() && version == Cgroup::V2 {
537 return previous;
538 }
539
540 let path = fields.last()?;
541 // skip leading slash
542 Some((path[1..].to_owned(), version))
543 })?;
544 let cgroup_path = PathBuf::from(OsString::from_vec(cgroup_path));
545
546 quota = match version {
547 Cgroup::V1 => quota_v1(cgroup_path),
548 Cgroup::V2 => quota_v2(cgroup_path),
549 };
550 };
551
552 quota
553 }
554
555 fn quota_v2(group_path: PathBuf) -> usize {
556 let mut quota = usize::MAX;
557
558 let mut path = PathBuf::with_capacity(128);
559 let mut read_buf = String::with_capacity(20);
560
561 // standard mount location defined in file-hierarchy(7) manpage
562 let cgroup_mount = "/sys/fs/cgroup";
563
564 path.push(cgroup_mount);
565 path.push(&group_path);
566
567 path.push("cgroup.controllers");
568
569 // skip if we're not looking at cgroup2
570 if matches!(try_exists(&path), Err(_) | Ok(false)) {
571 return usize::MAX;
572 };
573
574 path.pop();
575
576 let _: Option<()> = try {
577 while path.starts_with(cgroup_mount) {
578 path.push("cpu.max");
579
580 read_buf.clear();
581
582 if File::open(&path).and_then(|mut f| f.read_to_string(&mut read_buf)).is_ok() {
583 let raw_quota = read_buf.lines().next()?;
584 let mut raw_quota = raw_quota.split(' ');
585 let limit = raw_quota.next()?;
586 let period = raw_quota.next()?;
587 match (limit.parse::<usize>(), period.parse::<usize>()) {
588 (Ok(limit), Ok(period)) if period > 0 => {
589 quota = quota.min(limit / period);
590 }
591 _ => {}
592 }
593 }
594
595 path.pop(); // pop filename
596 path.pop(); // pop dir
597 }
598 };
599
600 quota
601 }
602
603 fn quota_v1(group_path: PathBuf) -> usize {
604 let mut quota = usize::MAX;
605 let mut path = PathBuf::with_capacity(128);
606 let mut read_buf = String::with_capacity(20);
607
608 // Hardcode commonly used locations mentioned in the cgroups(7) manpage
609 // if that doesn't work scan mountinfo and adjust `group_path` for bind-mounts
610 let mounts: &[fn(&Path) -> Option<(_, &Path)>] = &[
611 |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu"), p)),
612 |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu,cpuacct"), p)),
613 // this can be expensive on systems with tons of mountpoints
614 // but we only get to this point when /proc/self/cgroups explicitly indicated
615 // this process belongs to a cpu-controller cgroup v1 and the defaults didn't work
616 find_mountpoint,
617 ];
618
619 for mount in mounts {
620 let Some((mount, group_path)) = mount(&group_path) else { continue };
621
622 path.clear();
623 path.push(mount.as_ref());
624 path.push(&group_path);
625
626 // skip if we guessed the mount incorrectly
627 if matches!(try_exists(&path), Err(_) | Ok(false)) {
628 continue;
629 }
630
631 while path.starts_with(mount.as_ref()) {
632 let mut parse_file = |name| {
633 path.push(name);
634 read_buf.clear();
635
636 let f = File::open(&path);
637 path.pop(); // restore buffer before any early returns
638 f.ok()?.read_to_string(&mut read_buf).ok()?;
639 let parsed = read_buf.trim().parse::<usize>().ok()?;
640
641 Some(parsed)
642 };
643
644 let limit = parse_file("cpu.cfs_quota_us");
645 let period = parse_file("cpu.cfs_period_us");
646
647 match (limit, period) {
648 (Some(limit), Some(period)) if period > 0 => quota = quota.min(limit / period),
649 _ => {}
650 }
651
652 path.pop();
653 }
654
655 // we passed the try_exists above so we should have traversed the correct hierarchy
656 // when reaching this line
657 break;
658 }
659
660 quota
661 }
662
663 /// Scan mountinfo for cgroup v1 mountpoint with a cpu controller
664 ///
665 /// If the cgroupfs is a bind mount then `group_path` is adjusted to skip
666 /// over the already-included prefix
667 fn find_mountpoint(group_path: &Path) -> Option<(Cow<'static, str>, &Path)> {
668 let mut reader = BufReader::new(File::open("/proc/self/mountinfo").ok()?);
669 let mut line = String::with_capacity(256);
670 loop {
671 line.clear();
672 if reader.read_line(&mut line).ok()? == 0 {
673 break;
674 }
675
676 let line = line.trim();
677 let mut items = line.split(' ');
678
679 let sub_path = items.nth(3)?;
680 let mount_point = items.next()?;
681 let mount_opts = items.next_back()?;
682 let filesystem_type = items.nth_back(1)?;
683
684 if filesystem_type != "cgroup" || !mount_opts.split(',').any(|opt| opt == "cpu") {
685 // not a cgroup / not a cpu-controller
686 continue;
687 }
688
689 let sub_path = Path::new(sub_path).strip_prefix("/").ok()?;
690
691 if !group_path.starts_with(sub_path) {
692 // this is a bind-mount and the bound subdirectory
693 // does not contain the cgroup this process belongs to
694 continue;
695 }
696
697 let trimmed_group_path = group_path.strip_prefix(sub_path).ok()?;
698
699 return Some((Cow::Owned(mount_point.to_owned()), trimmed_group_path));
700 }
701
702 None
703 }
704}
705
706// glibc >= 2.15 has a __pthread_get_minstack() function that returns
707// PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
708// We need that information to avoid blowing up when a small stack
709// is created in an application with big thread-local storage requirements.
710// See #6233 for rationale and details.
711#[cfg(all(target_os = "linux", target_env = "gnu"))]
712unsafe fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
713 // We use dlsym to avoid an ELF version dependency on GLIBC_PRIVATE. (#23628)
714 // We shouldn't really be using such an internal symbol, but there's currently
715 // no other way to account for the TLS size.
716 dlsym!(fn __pthread_get_minstack(*const libc::pthread_attr_t) -> libc::size_t);
717
718 match __pthread_get_minstack.get() {
719 None => libc::PTHREAD_STACK_MIN,
720 Some(f) => unsafe { f(attr) },
721 }
722}
723
724// No point in looking up __pthread_get_minstack() on non-glibc platforms.
725#[cfg(all(not(all(target_os = "linux", target_env = "gnu")), not(target_os = "netbsd")))]
726unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
727 libc::PTHREAD_STACK_MIN
728}
729
730#[cfg(target_os = "netbsd")]
731unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
732 2048 // just a guess
733}
734