1 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
2 | #define __LINUX_SPINLOCK_API_SMP_H |
3 | |
4 | #ifndef __LINUX_INSIDE_SPINLOCK_H |
5 | # error "please don't include this file directly" |
6 | #endif |
7 | |
8 | /* |
9 | * include/linux/spinlock_api_smp.h |
10 | * |
11 | * spinlock API declarations on SMP (and debug) |
12 | * (implemented in kernel/spinlock.c) |
13 | * |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
15 | * Released under the General Public License (GPL). |
16 | */ |
17 | |
18 | int in_lock_functions(unsigned long addr); |
19 | |
20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
21 | |
22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
24 | __acquires(lock); |
25 | void __lockfunc |
26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
27 | __acquires(lock); |
28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); |
29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
30 | __acquires(lock); |
31 | |
32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
33 | __acquires(lock); |
34 | unsigned long __lockfunc |
35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
36 | __acquires(lock); |
37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); |
38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); |
39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); |
41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); |
42 | void __lockfunc |
43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
44 | __releases(lock); |
45 | |
46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
48 | #endif |
49 | |
50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
52 | #endif |
53 | |
54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
56 | #endif |
57 | |
58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
60 | #endif |
61 | |
62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
64 | #endif |
65 | |
66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
68 | #endif |
69 | |
70 | #ifndef CONFIG_UNINLINE_SPIN_UNLOCK |
71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
72 | #endif |
73 | |
74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
76 | #endif |
77 | |
78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
80 | #endif |
81 | |
82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
84 | #endif |
85 | |
86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
87 | { |
88 | preempt_disable(); |
89 | if (do_raw_spin_trylock(lock)) { |
90 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
91 | return 1; |
92 | } |
93 | preempt_enable(); |
94 | return 0; |
95 | } |
96 | |
97 | /* |
98 | * If lockdep is enabled then we use the non-preemption spin-ops |
99 | * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are |
100 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
101 | */ |
102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
103 | |
104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
105 | { |
106 | unsigned long flags; |
107 | |
108 | local_irq_save(flags); |
109 | preempt_disable(); |
110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
111 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
112 | return flags; |
113 | } |
114 | |
115 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
116 | { |
117 | local_irq_disable(); |
118 | preempt_disable(); |
119 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
120 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
121 | } |
122 | |
123 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
124 | { |
125 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
126 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
127 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
128 | } |
129 | |
130 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
131 | { |
132 | preempt_disable(); |
133 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
134 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
135 | } |
136 | |
137 | #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ |
138 | |
139 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
140 | { |
141 | spin_release(&lock->dep_map, _RET_IP_); |
142 | do_raw_spin_unlock(lock); |
143 | preempt_enable(); |
144 | } |
145 | |
146 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
147 | unsigned long flags) |
148 | { |
149 | spin_release(&lock->dep_map, _RET_IP_); |
150 | do_raw_spin_unlock(lock); |
151 | local_irq_restore(flags); |
152 | preempt_enable(); |
153 | } |
154 | |
155 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
156 | { |
157 | spin_release(&lock->dep_map, _RET_IP_); |
158 | do_raw_spin_unlock(lock); |
159 | local_irq_enable(); |
160 | preempt_enable(); |
161 | } |
162 | |
163 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
164 | { |
165 | spin_release(&lock->dep_map, _RET_IP_); |
166 | do_raw_spin_unlock(lock); |
167 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
168 | } |
169 | |
170 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
171 | { |
172 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
173 | if (do_raw_spin_trylock(lock)) { |
174 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
175 | return 1; |
176 | } |
177 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
178 | return 0; |
179 | } |
180 | |
181 | /* PREEMPT_RT has its own rwlock implementation */ |
182 | #ifndef CONFIG_PREEMPT_RT |
183 | #include <linux/rwlock_api_smp.h> |
184 | #endif |
185 | |
186 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |
187 | |