| 1 | /* |
| 2 | * SPDX-License-Identifier: MIT |
| 3 | * |
| 4 | * Copyright © 2019 Intel Corporation |
| 5 | */ |
| 6 | |
| 7 | #ifndef INTEL_WAKEREF_H |
| 8 | #define INTEL_WAKEREF_H |
| 9 | |
| 10 | #include <linux/atomic.h> |
| 11 | #include <linux/bitfield.h> |
| 12 | #include <linux/bits.h> |
| 13 | #include <linux/lockdep.h> |
| 14 | #include <linux/mutex.h> |
| 15 | #include <linux/refcount.h> |
| 16 | #include <linux/ref_tracker.h> |
| 17 | #include <linux/timer.h> |
| 18 | #include <linux/workqueue.h> |
| 19 | |
| 20 | struct drm_printer; |
| 21 | struct intel_runtime_pm; |
| 22 | struct intel_wakeref; |
| 23 | |
| 24 | typedef struct ref_tracker *intel_wakeref_t; |
| 25 | |
| 26 | #define INTEL_REFTRACK_DEAD_COUNT 16 |
| 27 | #define INTEL_REFTRACK_PRINT_LIMIT 16 |
| 28 | |
| 29 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
| 30 | #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) |
| 31 | #else |
| 32 | #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) |
| 33 | #endif |
| 34 | |
| 35 | struct intel_wakeref_ops { |
| 36 | int (*get)(struct intel_wakeref *wf); |
| 37 | int (*put)(struct intel_wakeref *wf); |
| 38 | }; |
| 39 | |
| 40 | struct intel_wakeref { |
| 41 | atomic_t count; |
| 42 | struct mutex mutex; |
| 43 | |
| 44 | intel_wakeref_t wakeref; |
| 45 | |
| 46 | struct drm_i915_private *i915; |
| 47 | const struct intel_wakeref_ops *ops; |
| 48 | |
| 49 | struct delayed_work work; |
| 50 | |
| 51 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) |
| 52 | struct ref_tracker_dir debug; |
| 53 | #endif |
| 54 | }; |
| 55 | |
| 56 | struct intel_wakeref_lockclass { |
| 57 | struct lock_class_key mutex; |
| 58 | struct lock_class_key work; |
| 59 | }; |
| 60 | |
| 61 | void __intel_wakeref_init(struct intel_wakeref *wf, |
| 62 | struct drm_i915_private *i915, |
| 63 | const struct intel_wakeref_ops *ops, |
| 64 | struct intel_wakeref_lockclass *key, |
| 65 | const char *name); |
| 66 | #define intel_wakeref_init(wf, i915, ops, name) do { \ |
| 67 | static struct intel_wakeref_lockclass __key; \ |
| 68 | \ |
| 69 | __intel_wakeref_init((wf), (i915), (ops), &__key, name); \ |
| 70 | } while (0) |
| 71 | |
| 72 | int __intel_wakeref_get_first(struct intel_wakeref *wf); |
| 73 | void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags); |
| 74 | |
| 75 | /** |
| 76 | * intel_wakeref_get: Acquire the wakeref |
| 77 | * @wf: the wakeref |
| 78 | * |
| 79 | * Acquire a hold on the wakeref. The first user to do so, will acquire |
| 80 | * the runtime pm wakeref and then call the intel_wakeref_ops->get() |
| 81 | * underneath the wakeref mutex. |
| 82 | * |
| 83 | * Note that intel_wakeref_ops->get() is allowed to fail, in which case |
| 84 | * the runtime-pm wakeref will be released and the acquisition unwound, |
| 85 | * and an error reported. |
| 86 | * |
| 87 | * Returns: 0 if the wakeref was acquired successfully, or a negative error |
| 88 | * code otherwise. |
| 89 | */ |
| 90 | static inline int |
| 91 | intel_wakeref_get(struct intel_wakeref *wf) |
| 92 | { |
| 93 | might_sleep(); |
| 94 | if (unlikely(!atomic_inc_not_zero(&wf->count))) |
| 95 | return __intel_wakeref_get_first(wf); |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | /** |
| 101 | * __intel_wakeref_get: Acquire the wakeref, again |
| 102 | * @wf: the wakeref |
| 103 | * |
| 104 | * Increment the wakeref counter, only valid if it is already held by |
| 105 | * the caller. |
| 106 | * |
| 107 | * See intel_wakeref_get(). |
| 108 | */ |
| 109 | static inline void |
| 110 | __intel_wakeref_get(struct intel_wakeref *wf) |
| 111 | { |
| 112 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
| 113 | atomic_inc(v: &wf->count); |
| 114 | } |
| 115 | |
| 116 | /** |
| 117 | * intel_wakeref_get_if_active: Acquire the wakeref |
| 118 | * @wf: the wakeref |
| 119 | * |
| 120 | * Acquire a hold on the wakeref, but only if the wakeref is already |
| 121 | * active. |
| 122 | * |
| 123 | * Returns: true if the wakeref was acquired, false otherwise. |
| 124 | */ |
| 125 | static inline bool |
| 126 | intel_wakeref_get_if_active(struct intel_wakeref *wf) |
| 127 | { |
| 128 | return atomic_inc_not_zero(v: &wf->count); |
| 129 | } |
| 130 | |
| 131 | enum { |
| 132 | INTEL_WAKEREF_PUT_ASYNC_BIT = 0, |
| 133 | __INTEL_WAKEREF_PUT_LAST_BIT__ |
| 134 | }; |
| 135 | |
| 136 | static inline void |
| 137 | intel_wakeref_might_get(struct intel_wakeref *wf) |
| 138 | { |
| 139 | might_lock(&wf->mutex); |
| 140 | } |
| 141 | |
| 142 | /** |
| 143 | * __intel_wakeref_put: Release the wakeref |
| 144 | * @wf: the wakeref |
| 145 | * @flags: control flags |
| 146 | * |
| 147 | * Release our hold on the wakeref. When there are no more users, |
| 148 | * the runtime pm wakeref will be released after the intel_wakeref_ops->put() |
| 149 | * callback is called underneath the wakeref mutex. |
| 150 | * |
| 151 | * Note that intel_wakeref_ops->put() is allowed to fail, in which case the |
| 152 | * runtime-pm wakeref is retained. |
| 153 | * |
| 154 | */ |
| 155 | static inline void |
| 156 | __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) |
| 157 | #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT) |
| 158 | #define INTEL_WAKEREF_PUT_DELAY \ |
| 159 | GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__) |
| 160 | { |
| 161 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
| 162 | if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) |
| 163 | __intel_wakeref_put_last(wf, flags); |
| 164 | } |
| 165 | |
| 166 | static inline void |
| 167 | intel_wakeref_put(struct intel_wakeref *wf) |
| 168 | { |
| 169 | might_sleep(); |
| 170 | __intel_wakeref_put(wf, flags: 0); |
| 171 | } |
| 172 | |
| 173 | static inline void |
| 174 | intel_wakeref_put_async(struct intel_wakeref *wf) |
| 175 | { |
| 176 | __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); |
| 177 | } |
| 178 | |
| 179 | static inline void |
| 180 | intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) |
| 181 | { |
| 182 | __intel_wakeref_put(wf, |
| 183 | INTEL_WAKEREF_PUT_ASYNC | |
| 184 | FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); |
| 185 | } |
| 186 | |
| 187 | static inline void |
| 188 | intel_wakeref_might_put(struct intel_wakeref *wf) |
| 189 | { |
| 190 | might_lock(&wf->mutex); |
| 191 | } |
| 192 | |
| 193 | /** |
| 194 | * intel_wakeref_lock: Lock the wakeref (mutex) |
| 195 | * @wf: the wakeref |
| 196 | * |
| 197 | * Locks the wakeref to prevent it being acquired or released. New users |
| 198 | * can still adjust the counter, but the wakeref itself (and callback) |
| 199 | * cannot be acquired or released. |
| 200 | */ |
| 201 | static inline void |
| 202 | intel_wakeref_lock(struct intel_wakeref *wf) |
| 203 | __acquires(wf->mutex) |
| 204 | { |
| 205 | mutex_lock(&wf->mutex); |
| 206 | } |
| 207 | |
| 208 | /** |
| 209 | * intel_wakeref_unlock: Unlock the wakeref |
| 210 | * @wf: the wakeref |
| 211 | * |
| 212 | * Releases a previously acquired intel_wakeref_lock(). |
| 213 | */ |
| 214 | static inline void |
| 215 | intel_wakeref_unlock(struct intel_wakeref *wf) |
| 216 | __releases(wf->mutex) |
| 217 | { |
| 218 | mutex_unlock(lock: &wf->mutex); |
| 219 | } |
| 220 | |
| 221 | /** |
| 222 | * intel_wakeref_unlock_wait: Wait until the active callback is complete |
| 223 | * @wf: the wakeref |
| 224 | * |
| 225 | * Waits for the active callback (under the @wf->mutex or another CPU) is |
| 226 | * complete. |
| 227 | */ |
| 228 | static inline void |
| 229 | intel_wakeref_unlock_wait(struct intel_wakeref *wf) |
| 230 | { |
| 231 | mutex_lock(&wf->mutex); |
| 232 | mutex_unlock(lock: &wf->mutex); |
| 233 | flush_delayed_work(dwork: &wf->work); |
| 234 | } |
| 235 | |
| 236 | /** |
| 237 | * intel_wakeref_is_active: Query whether the wakeref is currently held |
| 238 | * @wf: the wakeref |
| 239 | * |
| 240 | * Returns: true if the wakeref is currently held. |
| 241 | */ |
| 242 | static inline bool |
| 243 | intel_wakeref_is_active(const struct intel_wakeref *wf) |
| 244 | { |
| 245 | return READ_ONCE(wf->wakeref); |
| 246 | } |
| 247 | |
| 248 | /** |
| 249 | * __intel_wakeref_defer_park: Defer the current park callback |
| 250 | * @wf: the wakeref |
| 251 | */ |
| 252 | static inline void |
| 253 | __intel_wakeref_defer_park(struct intel_wakeref *wf) |
| 254 | { |
| 255 | lockdep_assert_held(&wf->mutex); |
| 256 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); |
| 257 | atomic_set_release(v: &wf->count, i: 1); |
| 258 | } |
| 259 | |
| 260 | /** |
| 261 | * intel_wakeref_wait_for_idle: Wait until the wakeref is idle |
| 262 | * @wf: the wakeref |
| 263 | * |
| 264 | * Wait for the earlier asynchronous release of the wakeref. Note |
| 265 | * this will wait for any third party as well, so make sure you only wait |
| 266 | * when you have control over the wakeref and trust no one else is acquiring |
| 267 | * it. |
| 268 | * |
| 269 | * Return: 0 on success, error code if killed. |
| 270 | */ |
| 271 | int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); |
| 272 | |
| 273 | #define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT) |
| 274 | |
| 275 | static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir) |
| 276 | { |
| 277 | struct ref_tracker *user = NULL; |
| 278 | |
| 279 | ref_tracker_alloc(dir, trackerp: &user, GFP_NOWAIT); |
| 280 | |
| 281 | return user ?: INTEL_WAKEREF_DEF; |
| 282 | } |
| 283 | |
| 284 | static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir, |
| 285 | intel_wakeref_t wakeref) |
| 286 | { |
| 287 | if (wakeref == INTEL_WAKEREF_DEF) |
| 288 | wakeref = NULL; |
| 289 | |
| 290 | if (WARN_ON(IS_ERR(wakeref))) |
| 291 | return; |
| 292 | |
| 293 | ref_tracker_free(dir, trackerp: &wakeref); |
| 294 | } |
| 295 | |
| 296 | void intel_ref_tracker_show(struct ref_tracker_dir *dir, |
| 297 | struct drm_printer *p); |
| 298 | |
| 299 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) |
| 300 | |
| 301 | static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) |
| 302 | { |
| 303 | return intel_ref_tracker_alloc(dir: &wf->debug); |
| 304 | } |
| 305 | |
| 306 | static inline void intel_wakeref_untrack(struct intel_wakeref *wf, |
| 307 | intel_wakeref_t handle) |
| 308 | { |
| 309 | intel_ref_tracker_free(dir: &wf->debug, wakeref: handle); |
| 310 | } |
| 311 | |
| 312 | #else |
| 313 | |
| 314 | static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) |
| 315 | { |
| 316 | return INTEL_WAKEREF_DEF; |
| 317 | } |
| 318 | |
| 319 | static inline void intel_wakeref_untrack(struct intel_wakeref *wf, |
| 320 | intel_wakeref_t handle) |
| 321 | { |
| 322 | } |
| 323 | |
| 324 | #endif |
| 325 | |
| 326 | struct intel_wakeref_auto { |
| 327 | struct drm_i915_private *i915; |
| 328 | struct timer_list timer; |
| 329 | intel_wakeref_t wakeref; |
| 330 | spinlock_t lock; |
| 331 | refcount_t count; |
| 332 | }; |
| 333 | |
| 334 | /** |
| 335 | * intel_wakeref_auto: Delay the runtime-pm autosuspend |
| 336 | * @wf: the wakeref |
| 337 | * @timeout: relative timeout in jiffies |
| 338 | * |
| 339 | * The runtime-pm core uses a suspend delay after the last wakeref |
| 340 | * is released before triggering runtime suspend of the device. That |
| 341 | * delay is configurable via sysfs with little regard to the device |
| 342 | * characteristics. Instead, we want to tune the autosuspend based on our |
| 343 | * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied |
| 344 | * timeout. |
| 345 | * |
| 346 | * Pass @timeout = 0 to cancel a previous autosuspend by executing the |
| 347 | * suspend immediately. |
| 348 | */ |
| 349 | void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); |
| 350 | |
| 351 | void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, |
| 352 | struct drm_i915_private *i915); |
| 353 | void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); |
| 354 | |
| 355 | #endif /* INTEL_WAKEREF_H */ |
| 356 | |