1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_ATOMIC_H
3#define _ASM_X86_ATOMIC_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm/alternative.h>
8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
11
12/*
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
15 */
16
17static __always_inline int arch_atomic_read(const atomic_t *v)
18{
19 /*
20 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
21 * it's non-inlined function that increases binary size and stack usage.
22 */
23 return __READ_ONCE((v)->counter);
24}
25
26static __always_inline void arch_atomic_set(atomic_t *v, int i)
27{
28 __WRITE_ONCE(v->counter, i);
29}
30
31static __always_inline void arch_atomic_add(int i, atomic_t *v)
32{
33 asm volatile(LOCK_PREFIX "addl %1,%0"
34 : "+m" (v->counter)
35 : "ir" (i) : "memory");
36}
37
38static __always_inline void arch_atomic_sub(int i, atomic_t *v)
39{
40 asm volatile(LOCK_PREFIX "subl %1,%0"
41 : "+m" (v->counter)
42 : "ir" (i) : "memory");
43}
44
45static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
46{
47 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
48}
49#define arch_atomic_sub_and_test arch_atomic_sub_and_test
50
51static __always_inline void arch_atomic_inc(atomic_t *v)
52{
53 asm volatile(LOCK_PREFIX "incl %0"
54 : "+m" (v->counter) :: "memory");
55}
56#define arch_atomic_inc arch_atomic_inc
57
58static __always_inline void arch_atomic_dec(atomic_t *v)
59{
60 asm volatile(LOCK_PREFIX "decl %0"
61 : "+m" (v->counter) :: "memory");
62}
63#define arch_atomic_dec arch_atomic_dec
64
65static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
66{
67 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
68}
69#define arch_atomic_dec_and_test arch_atomic_dec_and_test
70
71static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
72{
73 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
74}
75#define arch_atomic_inc_and_test arch_atomic_inc_and_test
76
77static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
78{
79 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
80}
81#define arch_atomic_add_negative arch_atomic_add_negative
82
83static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
84{
85 return i + xadd(&v->counter, i);
86}
87#define arch_atomic_add_return arch_atomic_add_return
88
89static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
90{
91 return arch_atomic_add_return(i: -i, v);
92}
93#define arch_atomic_sub_return arch_atomic_sub_return
94
95static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
96{
97 return xadd(&v->counter, i);
98}
99#define arch_atomic_fetch_add arch_atomic_fetch_add
100
101static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
102{
103 return xadd(&v->counter, -i);
104}
105#define arch_atomic_fetch_sub arch_atomic_fetch_sub
106
107static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
108{
109 return arch_cmpxchg(&v->counter, old, new);
110}
111#define arch_atomic_cmpxchg arch_atomic_cmpxchg
112
113static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
114{
115 return arch_try_cmpxchg(&v->counter, old, new);
116}
117#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
118
119static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
120{
121 return arch_xchg(&v->counter, new);
122}
123#define arch_atomic_xchg arch_atomic_xchg
124
125static __always_inline void arch_atomic_and(int i, atomic_t *v)
126{
127 asm volatile(LOCK_PREFIX "andl %1,%0"
128 : "+m" (v->counter)
129 : "ir" (i)
130 : "memory");
131}
132
133static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
134{
135 int val = arch_atomic_read(v);
136
137 do { } while (!arch_atomic_try_cmpxchg(v, old: &val, new: val & i));
138
139 return val;
140}
141#define arch_atomic_fetch_and arch_atomic_fetch_and
142
143static __always_inline void arch_atomic_or(int i, atomic_t *v)
144{
145 asm volatile(LOCK_PREFIX "orl %1,%0"
146 : "+m" (v->counter)
147 : "ir" (i)
148 : "memory");
149}
150
151static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
152{
153 int val = arch_atomic_read(v);
154
155 do { } while (!arch_atomic_try_cmpxchg(v, old: &val, new: val | i));
156
157 return val;
158}
159#define arch_atomic_fetch_or arch_atomic_fetch_or
160
161static __always_inline void arch_atomic_xor(int i, atomic_t *v)
162{
163 asm volatile(LOCK_PREFIX "xorl %1,%0"
164 : "+m" (v->counter)
165 : "ir" (i)
166 : "memory");
167}
168
169static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
170{
171 int val = arch_atomic_read(v);
172
173 do { } while (!arch_atomic_try_cmpxchg(v, old: &val, new: val ^ i));
174
175 return val;
176}
177#define arch_atomic_fetch_xor arch_atomic_fetch_xor
178
179#ifdef CONFIG_X86_32
180# include <asm/atomic64_32.h>
181#else
182# include <asm/atomic64_64.h>
183#endif
184
185#endif /* _ASM_X86_ATOMIC_H */
186

source code of linux/arch/x86/include/asm/atomic.h