1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __ASM_PREEMPT_H |
3 | #define __ASM_PREEMPT_H |
4 | |
5 | #include <asm/rmwcc.h> |
6 | #include <asm/percpu.h> |
7 | #include <asm/current.h> |
8 | |
9 | #include <linux/thread_info.h> |
10 | #include <linux/static_call_types.h> |
11 | |
12 | /* We use the MSB mostly because its available */ |
13 | #define PREEMPT_NEED_RESCHED 0x80000000 |
14 | |
15 | /* |
16 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such |
17 | * that a decrement hitting 0 means we can and should reschedule. |
18 | */ |
19 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) |
20 | |
21 | /* |
22 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
23 | * that think a non-zero value indicates we cannot preempt. |
24 | */ |
25 | static __always_inline int preempt_count(void) |
26 | { |
27 | return raw_cpu_read_4(pcpu_hot.preempt_count) & ~PREEMPT_NEED_RESCHED; |
28 | } |
29 | |
30 | static __always_inline void preempt_count_set(int pc) |
31 | { |
32 | int old, new; |
33 | |
34 | old = raw_cpu_read_4(pcpu_hot.preempt_count); |
35 | do { |
36 | new = (old & PREEMPT_NEED_RESCHED) | |
37 | (pc & ~PREEMPT_NEED_RESCHED); |
38 | } while (!raw_cpu_try_cmpxchg_4(pcpu_hot.preempt_count, &old, new)); |
39 | } |
40 | |
41 | /* |
42 | * must be macros to avoid header recursion hell |
43 | */ |
44 | #define init_task_preempt_count(p) do { } while (0) |
45 | |
46 | #define init_idle_preempt_count(p, cpu) do { \ |
47 | per_cpu(pcpu_hot.preempt_count, (cpu)) = PREEMPT_DISABLED; \ |
48 | } while (0) |
49 | |
50 | /* |
51 | * We fold the NEED_RESCHED bit into the preempt count such that |
52 | * preempt_enable() can decrement and test for needing to reschedule with a |
53 | * single instruction. |
54 | * |
55 | * We invert the actual bit, so that when the decrement hits 0 we know we both |
56 | * need to resched (the bit is cleared) and can resched (no preempt count). |
57 | */ |
58 | |
59 | static __always_inline void set_preempt_need_resched(void) |
60 | { |
61 | raw_cpu_and_4(pcpu_hot.preempt_count, ~PREEMPT_NEED_RESCHED); |
62 | } |
63 | |
64 | static __always_inline void clear_preempt_need_resched(void) |
65 | { |
66 | raw_cpu_or_4(pcpu_hot.preempt_count, PREEMPT_NEED_RESCHED); |
67 | } |
68 | |
69 | static __always_inline bool test_preempt_need_resched(void) |
70 | { |
71 | return !(raw_cpu_read_4(pcpu_hot.preempt_count) & PREEMPT_NEED_RESCHED); |
72 | } |
73 | |
74 | /* |
75 | * The various preempt_count add/sub methods |
76 | */ |
77 | |
78 | static __always_inline void __preempt_count_add(int val) |
79 | { |
80 | raw_cpu_add_4(pcpu_hot.preempt_count, val); |
81 | } |
82 | |
83 | static __always_inline void __preempt_count_sub(int val) |
84 | { |
85 | raw_cpu_add_4(pcpu_hot.preempt_count, -val); |
86 | } |
87 | |
88 | /* |
89 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule |
90 | * a decrement which hits zero means we have no preempt_count and should |
91 | * reschedule. |
92 | */ |
93 | static __always_inline bool __preempt_count_dec_and_test(void) |
94 | { |
95 | return GEN_UNARY_RMWcc("decl" , pcpu_hot.preempt_count, e, |
96 | __percpu_arg([var])); |
97 | } |
98 | |
99 | /* |
100 | * Returns true when we need to resched and can (barring IRQ state). |
101 | */ |
102 | static __always_inline bool should_resched(int preempt_offset) |
103 | { |
104 | return unlikely(raw_cpu_read_4(pcpu_hot.preempt_count) == preempt_offset); |
105 | } |
106 | |
107 | #ifdef CONFIG_PREEMPTION |
108 | |
109 | extern asmlinkage void preempt_schedule(void); |
110 | extern asmlinkage void preempt_schedule_thunk(void); |
111 | |
112 | #define preempt_schedule_dynamic_enabled preempt_schedule_thunk |
113 | #define preempt_schedule_dynamic_disabled NULL |
114 | |
115 | extern asmlinkage void preempt_schedule_notrace(void); |
116 | extern asmlinkage void preempt_schedule_notrace_thunk(void); |
117 | |
118 | #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace_thunk |
119 | #define preempt_schedule_notrace_dynamic_disabled NULL |
120 | |
121 | #ifdef CONFIG_PREEMPT_DYNAMIC |
122 | |
123 | DECLARE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); |
124 | |
125 | #define __preempt_schedule() \ |
126 | do { \ |
127 | __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule); \ |
128 | asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \ |
129 | } while (0) |
130 | |
131 | DECLARE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); |
132 | |
133 | #define __preempt_schedule_notrace() \ |
134 | do { \ |
135 | __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule_notrace); \ |
136 | asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : ASM_CALL_CONSTRAINT); \ |
137 | } while (0) |
138 | |
139 | #else /* PREEMPT_DYNAMIC */ |
140 | |
141 | #define __preempt_schedule() \ |
142 | asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT); |
143 | |
144 | #define __preempt_schedule_notrace() \ |
145 | asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT); |
146 | |
147 | #endif /* PREEMPT_DYNAMIC */ |
148 | |
149 | #endif /* PREEMPTION */ |
150 | |
151 | #endif /* __ASM_PREEMPT_H */ |
152 | |