1/* elision-lock.c: Elided pthread mutex lock.
2 Copyright (C) 2011-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <pthread.h>
20#include "pthreadP.h"
21#include "lowlevellock.h"
22#include "hle.h"
23#include <elision-conf.h>
24
25#ifndef EXTRAARG
26#define EXTRAARG
27#endif
28#ifndef LLL_LOCK
29#define LLL_LOCK(a,b) lll_lock(a,b), 0
30#endif
31
32#define aconf __elision_aconf
33
34/* Adaptive lock using transactions.
35 By default the lock region is run as a transaction, and when it
36 aborts or the lock is busy the lock adapts itself. */
37
38int
39__lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
40{
41 /* adapt_count can be accessed concurrently; these accesses can be both
42 inside of transactions (if critical sections are nested and the outer
43 critical section uses lock elision) and outside of transactions. Thus,
44 we need to use atomic accesses to avoid data races. However, the
45 value of adapt_count is just a hint, so relaxed MO accesses are
46 sufficient. */
47 if (atomic_load_relaxed (adapt_count) <= 0)
48 {
49 unsigned status;
50 int try_xbegin;
51
52 for (try_xbegin = aconf.retry_try_xbegin;
53 try_xbegin > 0;
54 try_xbegin--)
55 {
56 if ((status = _xbegin()) == _XBEGIN_STARTED)
57 {
58 if (*futex == 0)
59 return 0;
60
61 /* Lock was busy. Fall back to normal locking.
62 Could also _xend here but xabort with 0xff code
63 is more visible in the profiler. */
64 _xabort (_ABORT_LOCK_BUSY);
65 }
66
67 if (!(status & _XABORT_RETRY))
68 {
69 if ((status & _XABORT_EXPLICIT)
70 && _XABORT_CODE (status) == _ABORT_LOCK_BUSY)
71 {
72 /* Right now we skip here. Better would be to wait a bit
73 and retry. This likely needs some spinning. See
74 above for why relaxed MO is sufficient. */
75 if (atomic_load_relaxed (adapt_count)
76 != aconf.skip_lock_busy)
77 atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
78 }
79 /* Internal abort. There is no chance for retry.
80 Use the normal locking and next time use lock.
81 Be careful to avoid writing to the lock. See above for why
82 relaxed MO is sufficient. */
83 else if (atomic_load_relaxed (adapt_count)
84 != aconf.skip_lock_internal_abort)
85 atomic_store_relaxed (adapt_count,
86 aconf.skip_lock_internal_abort);
87 break;
88 }
89 }
90 }
91 else
92 {
93 /* Use a normal lock until the threshold counter runs out.
94 Lost updates possible. */
95 atomic_store_relaxed (adapt_count,
96 atomic_load_relaxed (adapt_count) - 1);
97 }
98
99 /* Use a normal lock here. */
100 return LLL_LOCK ((*futex), private);
101}
102libc_hidden_def (__lll_lock_elision)
103

source code of glibc/sysdeps/unix/sysv/linux/x86/elision-lock.c