1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2019 Intel Corporation |
5 | */ |
6 | |
7 | #ifndef INTEL_WAKEREF_H |
8 | #define INTEL_WAKEREF_H |
9 | |
10 | #include <linux/atomic.h> |
11 | #include <linux/bitfield.h> |
12 | #include <linux/bits.h> |
13 | #include <linux/lockdep.h> |
14 | #include <linux/mutex.h> |
15 | #include <linux/refcount.h> |
16 | #include <linux/stackdepot.h> |
17 | #include <linux/timer.h> |
18 | #include <linux/workqueue.h> |
19 | |
20 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
21 | #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) |
22 | #else |
23 | #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) |
24 | #endif |
25 | |
26 | struct intel_runtime_pm; |
27 | struct intel_wakeref; |
28 | |
29 | typedef depot_stack_handle_t intel_wakeref_t; |
30 | |
31 | struct intel_wakeref_ops { |
32 | int (*get)(struct intel_wakeref *wf); |
33 | int (*put)(struct intel_wakeref *wf); |
34 | }; |
35 | |
36 | struct intel_wakeref { |
37 | atomic_t count; |
38 | struct mutex mutex; |
39 | |
40 | intel_wakeref_t wakeref; |
41 | |
42 | struct drm_i915_private *i915; |
43 | const struct intel_wakeref_ops *ops; |
44 | |
45 | struct delayed_work work; |
46 | }; |
47 | |
48 | struct intel_wakeref_lockclass { |
49 | struct lock_class_key mutex; |
50 | struct lock_class_key work; |
51 | }; |
52 | |
53 | void __intel_wakeref_init(struct intel_wakeref *wf, |
54 | struct drm_i915_private *i915, |
55 | const struct intel_wakeref_ops *ops, |
56 | struct intel_wakeref_lockclass *key); |
57 | #define intel_wakeref_init(wf, i915, ops) do { \ |
58 | static struct intel_wakeref_lockclass __key; \ |
59 | \ |
60 | __intel_wakeref_init((wf), (i915), (ops), &__key); \ |
61 | } while (0) |
62 | |
63 | int __intel_wakeref_get_first(struct intel_wakeref *wf); |
64 | void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags); |
65 | |
66 | /** |
67 | * intel_wakeref_get: Acquire the wakeref |
68 | * @wf: the wakeref |
69 | * |
70 | * Acquire a hold on the wakeref. The first user to do so, will acquire |
71 | * the runtime pm wakeref and then call the intel_wakeref_ops->get() |
72 | * underneath the wakeref mutex. |
73 | * |
74 | * Note that intel_wakeref_ops->get() is allowed to fail, in which case |
75 | * the runtime-pm wakeref will be released and the acquisition unwound, |
76 | * and an error reported. |
77 | * |
78 | * Returns: 0 if the wakeref was acquired successfully, or a negative error |
79 | * code otherwise. |
80 | */ |
81 | static inline int |
82 | intel_wakeref_get(struct intel_wakeref *wf) |
83 | { |
84 | might_sleep(); |
85 | if (unlikely(!atomic_inc_not_zero(&wf->count))) |
86 | return __intel_wakeref_get_first(wf); |
87 | |
88 | return 0; |
89 | } |
90 | |
91 | /** |
92 | * __intel_wakeref_get: Acquire the wakeref, again |
93 | * @wf: the wakeref |
94 | * |
95 | * Increment the wakeref counter, only valid if it is already held by |
96 | * the caller. |
97 | * |
98 | * See intel_wakeref_get(). |
99 | */ |
100 | static inline void |
101 | __intel_wakeref_get(struct intel_wakeref *wf) |
102 | { |
103 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
104 | atomic_inc(v: &wf->count); |
105 | } |
106 | |
107 | /** |
108 | * intel_wakeref_get_if_active: Acquire the wakeref |
109 | * @wf: the wakeref |
110 | * |
111 | * Acquire a hold on the wakeref, but only if the wakeref is already |
112 | * active. |
113 | * |
114 | * Returns: true if the wakeref was acquired, false otherwise. |
115 | */ |
116 | static inline bool |
117 | intel_wakeref_get_if_active(struct intel_wakeref *wf) |
118 | { |
119 | return atomic_inc_not_zero(v: &wf->count); |
120 | } |
121 | |
122 | enum { |
123 | INTEL_WAKEREF_PUT_ASYNC_BIT = 0, |
124 | __INTEL_WAKEREF_PUT_LAST_BIT__ |
125 | }; |
126 | |
127 | static inline void |
128 | intel_wakeref_might_get(struct intel_wakeref *wf) |
129 | { |
130 | might_lock(&wf->mutex); |
131 | } |
132 | |
133 | /** |
134 | * __intel_wakeref_put: Release the wakeref |
135 | * @wf: the wakeref |
136 | * @flags: control flags |
137 | * |
138 | * Release our hold on the wakeref. When there are no more users, |
139 | * the runtime pm wakeref will be released after the intel_wakeref_ops->put() |
140 | * callback is called underneath the wakeref mutex. |
141 | * |
142 | * Note that intel_wakeref_ops->put() is allowed to fail, in which case the |
143 | * runtime-pm wakeref is retained. |
144 | * |
145 | */ |
146 | static inline void |
147 | __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) |
148 | #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT) |
149 | #define INTEL_WAKEREF_PUT_DELAY \ |
150 | GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__) |
151 | { |
152 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
153 | if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) |
154 | __intel_wakeref_put_last(wf, flags); |
155 | } |
156 | |
157 | static inline void |
158 | intel_wakeref_put(struct intel_wakeref *wf) |
159 | { |
160 | might_sleep(); |
161 | __intel_wakeref_put(wf, flags: 0); |
162 | } |
163 | |
164 | static inline void |
165 | intel_wakeref_put_async(struct intel_wakeref *wf) |
166 | { |
167 | __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); |
168 | } |
169 | |
170 | static inline void |
171 | intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) |
172 | { |
173 | __intel_wakeref_put(wf, |
174 | INTEL_WAKEREF_PUT_ASYNC | |
175 | FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); |
176 | } |
177 | |
178 | static inline void |
179 | intel_wakeref_might_put(struct intel_wakeref *wf) |
180 | { |
181 | might_lock(&wf->mutex); |
182 | } |
183 | |
184 | /** |
185 | * intel_wakeref_lock: Lock the wakeref (mutex) |
186 | * @wf: the wakeref |
187 | * |
188 | * Locks the wakeref to prevent it being acquired or released. New users |
189 | * can still adjust the counter, but the wakeref itself (and callback) |
190 | * cannot be acquired or released. |
191 | */ |
192 | static inline void |
193 | intel_wakeref_lock(struct intel_wakeref *wf) |
194 | __acquires(wf->mutex) |
195 | { |
196 | mutex_lock(&wf->mutex); |
197 | } |
198 | |
199 | /** |
200 | * intel_wakeref_unlock: Unlock the wakeref |
201 | * @wf: the wakeref |
202 | * |
203 | * Releases a previously acquired intel_wakeref_lock(). |
204 | */ |
205 | static inline void |
206 | intel_wakeref_unlock(struct intel_wakeref *wf) |
207 | __releases(wf->mutex) |
208 | { |
209 | mutex_unlock(lock: &wf->mutex); |
210 | } |
211 | |
212 | /** |
213 | * intel_wakeref_unlock_wait: Wait until the active callback is complete |
214 | * @wf: the wakeref |
215 | * |
216 | * Waits for the active callback (under the @wf->mutex or another CPU) is |
217 | * complete. |
218 | */ |
219 | static inline void |
220 | intel_wakeref_unlock_wait(struct intel_wakeref *wf) |
221 | { |
222 | mutex_lock(&wf->mutex); |
223 | mutex_unlock(lock: &wf->mutex); |
224 | flush_delayed_work(dwork: &wf->work); |
225 | } |
226 | |
227 | /** |
228 | * intel_wakeref_is_active: Query whether the wakeref is currently held |
229 | * @wf: the wakeref |
230 | * |
231 | * Returns: true if the wakeref is currently held. |
232 | */ |
233 | static inline bool |
234 | intel_wakeref_is_active(const struct intel_wakeref *wf) |
235 | { |
236 | return READ_ONCE(wf->wakeref); |
237 | } |
238 | |
239 | /** |
240 | * __intel_wakeref_defer_park: Defer the current park callback |
241 | * @wf: the wakeref |
242 | */ |
243 | static inline void |
244 | __intel_wakeref_defer_park(struct intel_wakeref *wf) |
245 | { |
246 | lockdep_assert_held(&wf->mutex); |
247 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); |
248 | atomic_set_release(v: &wf->count, i: 1); |
249 | } |
250 | |
251 | /** |
252 | * intel_wakeref_wait_for_idle: Wait until the wakeref is idle |
253 | * @wf: the wakeref |
254 | * |
255 | * Wait for the earlier asynchronous release of the wakeref. Note |
256 | * this will wait for any third party as well, so make sure you only wait |
257 | * when you have control over the wakeref and trust no one else is acquiring |
258 | * it. |
259 | * |
260 | * Return: 0 on success, error code if killed. |
261 | */ |
262 | int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); |
263 | |
264 | struct intel_wakeref_auto { |
265 | struct drm_i915_private *i915; |
266 | struct timer_list timer; |
267 | intel_wakeref_t wakeref; |
268 | spinlock_t lock; |
269 | refcount_t count; |
270 | }; |
271 | |
272 | /** |
273 | * intel_wakeref_auto: Delay the runtime-pm autosuspend |
274 | * @wf: the wakeref |
275 | * @timeout: relative timeout in jiffies |
276 | * |
277 | * The runtime-pm core uses a suspend delay after the last wakeref |
278 | * is released before triggering runtime suspend of the device. That |
279 | * delay is configurable via sysfs with little regard to the device |
280 | * characteristics. Instead, we want to tune the autosuspend based on our |
281 | * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied |
282 | * timeout. |
283 | * |
284 | * Pass @timeout = 0 to cancel a previous autosuspend by executing the |
285 | * suspend immediately. |
286 | */ |
287 | void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); |
288 | |
289 | void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, |
290 | struct drm_i915_private *i915); |
291 | void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); |
292 | |
293 | #endif /* INTEL_WAKEREF_H */ |
294 | |