1 | /* pthread_spin_trylock -- trylock a spin lock. Generic version. |
2 | Copyright (C) 2012-2022 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <errno.h> |
20 | #include <atomic.h> |
21 | #include "pthreadP.h" |
22 | #include <shlib-compat.h> |
23 | |
24 | int |
25 | __pthread_spin_trylock (pthread_spinlock_t *lock) |
26 | { |
27 | /* For the spin try lock, we have the following possibilities: |
28 | |
29 | 1) If we assume that trylock will most likely succeed in practice: |
30 | * We just do an exchange. |
31 | |
32 | 2) If we want to bias towards cases where trylock succeeds, but don't |
33 | rule out contention: |
34 | * If exchange is not implemented by a CAS loop, and exchange is faster |
35 | than CAS, do an exchange. |
36 | * If exchange is implemented by a CAS loop, use a weak CAS and not an |
37 | exchange so we bail out after the first failed attempt to change the state. |
38 | |
39 | 3) If we expect contention to be likely: |
40 | * If CAS always brings the cache line into an exclusive state even if the |
41 | spinlock is already acquired, then load the value first with |
42 | atomic_load_relaxed and test if lock is not acquired. Then do 2). |
43 | |
44 | We assume that 2) is the common case, and that this won't be slower than |
45 | 1) in the common case. |
46 | |
47 | We use acquire MO to synchronize-with the release MO store in |
48 | pthread_spin_unlock, and thus ensure that prior critical sections |
49 | happen-before this critical section. */ |
50 | #if ! ATOMIC_EXCHANGE_USES_CAS |
51 | /* Try to acquire the lock with an exchange instruction as this architecture |
52 | has such an instruction and we assume it is faster than a CAS. |
53 | The acquisition succeeds if the lock is not in an acquired state. */ |
54 | if (atomic_exchange_acquire (lock, 1) == 0) |
55 | return 0; |
56 | #else |
57 | /* Try to acquire the lock with a CAS instruction as this architecture |
58 | has no exchange instruction. The acquisition succeeds if the lock is not |
59 | acquired. */ |
60 | do |
61 | { |
62 | int val = 0; |
63 | if (atomic_compare_exchange_weak_acquire (lock, &val, 1)) |
64 | return 0; |
65 | } |
66 | /* atomic_compare_exchange_weak_acquire can fail spuriously. Whereas |
67 | C++11 and C11 make it clear that trylock operations can fail spuriously, |
68 | POSIX does not explicitly specify this; it only specifies that failing |
69 | synchronization operations do not need to have synchronization effects |
70 | themselves, but a spurious failure is something that could contradict a |
71 | happens-before established earlier (e.g., that we need to observe that |
72 | the lock is acquired). Therefore, we emulate a strong CAS by simply |
73 | checking with a relaxed MO load that the lock is really acquired before |
74 | returning EBUSY; the additional overhead this may cause is on the slow |
75 | path. */ |
76 | while (atomic_load_relaxed (lock) == 0); |
77 | #endif |
78 | |
79 | return EBUSY; |
80 | } |
81 | versioned_symbol (libc, __pthread_spin_trylock, pthread_spin_trylock, |
82 | GLIBC_2_34); |
83 | |
84 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34) |
85 | compat_symbol (libpthread, __pthread_spin_trylock, pthread_spin_trylock, |
86 | GLIBC_2_2); |
87 | #endif |
88 | |