1 | /* Common definition for pthread_{timed,try}join{_np}. |
2 | Copyright (C) 2017-2024 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include "pthreadP.h" |
20 | #include <atomic.h> |
21 | #include <stap-probe.h> |
22 | #include <time.h> |
23 | #include <futex-internal.h> |
24 | |
25 | static void |
26 | cleanup (void *arg) |
27 | { |
28 | /* If we already changed the waiter ID, reset it. The call cannot |
29 | fail for any reason but the thread not having done that yet so |
30 | there is no reason for a loop. */ |
31 | struct pthread *self = THREAD_SELF; |
32 | atomic_compare_exchange_weak_acquire (&arg, &self, NULL); |
33 | } |
34 | |
35 | int |
36 | __pthread_clockjoin_ex (pthread_t threadid, void **thread_return, |
37 | clockid_t clockid, |
38 | const struct __timespec64 *abstime, bool block) |
39 | { |
40 | struct pthread *pd = (struct pthread *) threadid; |
41 | |
42 | /* Make sure the descriptor is valid. */ |
43 | if (INVALID_NOT_TERMINATED_TD_P (pd)) |
44 | /* Not a valid thread handle. */ |
45 | return ESRCH; |
46 | |
47 | /* Is the thread joinable?. */ |
48 | if (IS_DETACHED (pd)) |
49 | /* We cannot wait for the thread. */ |
50 | return EINVAL; |
51 | |
52 | struct pthread *self = THREAD_SELF; |
53 | int result = 0; |
54 | |
55 | LIBC_PROBE (pthread_join, 1, threadid); |
56 | |
57 | if ((pd == self |
58 | || (self->joinid == pd |
59 | && (pd->cancelhandling |
60 | & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK |
61 | | TERMINATED_BITMASK)) == 0)) |
62 | && !cancel_enabled_and_canceled (value: self->cancelhandling)) |
63 | /* This is a deadlock situation. The threads are waiting for each |
64 | other to finish. Note that this is a "may" error. To be 100% |
65 | sure we catch this error we would have to lock the data |
66 | structures but it is not necessary. In the unlikely case that |
67 | two threads are really caught in this situation they will |
68 | deadlock. It is the programmer's problem to figure this |
69 | out. */ |
70 | return EDEADLK; |
71 | |
72 | /* Wait for the thread to finish. If it is already locked something |
73 | is wrong. There can only be one waiter. */ |
74 | else if (__glibc_unlikely (atomic_compare_exchange_weak_acquire (&pd->joinid, |
75 | &self, |
76 | NULL))) |
77 | /* There is already somebody waiting for the thread. */ |
78 | return EINVAL; |
79 | |
80 | /* BLOCK waits either indefinitely or based on an absolute time. POSIX also |
81 | states a cancellation point shall occur for pthread_join, and we use the |
82 | same rationale for posix_timedjoin_np. Both clockwait_tid and the futex |
83 | call use the cancellable variant. */ |
84 | if (block) |
85 | { |
86 | /* During the wait we change to asynchronous cancellation. If we |
87 | are cancelled the thread we are waiting for must be marked as |
88 | un-wait-ed for again. */ |
89 | pthread_cleanup_push (cleanup, &pd->joinid); |
90 | |
91 | /* We need acquire MO here so that we synchronize with the |
92 | kernel's store to 0 when the clone terminates. (see above) */ |
93 | pid_t tid; |
94 | while ((tid = atomic_load_acquire (&pd->tid)) != 0) |
95 | { |
96 | /* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via |
97 | futex wake-up when the clone terminates. The memory location |
98 | contains the thread ID while the clone is running and is reset to |
99 | zero by the kernel afterwards. The kernel up to version 3.16.3 |
100 | does not use the private futex operations for futex wake-up when |
101 | the clone terminates. */ |
102 | int ret = __futex_abstimed_wait_cancelable64 ( |
103 | (unsigned int *) &pd->tid, tid, clockid, abstime, LLL_SHARED); |
104 | if (ret == ETIMEDOUT || ret == EOVERFLOW) |
105 | { |
106 | result = ret; |
107 | break; |
108 | } |
109 | } |
110 | |
111 | pthread_cleanup_pop (0); |
112 | } |
113 | |
114 | void *pd_result = pd->result; |
115 | if (__glibc_likely (result == 0)) |
116 | { |
117 | /* We mark the thread as terminated and as joined. */ |
118 | pd->tid = -1; |
119 | |
120 | /* Store the return value if the caller is interested. */ |
121 | if (thread_return != NULL) |
122 | *thread_return = pd_result; |
123 | |
124 | /* Free the TCB. */ |
125 | __nptl_free_tcb (pd); |
126 | } |
127 | else |
128 | pd->joinid = NULL; |
129 | |
130 | LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd_result); |
131 | |
132 | return result; |
133 | } |
134 | |