1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Eugeni Dodonov <eugeni.dodonov@intel.com> |
25 | * Daniel Vetter <daniel.vetter@ffwll.ch> |
26 | * |
27 | */ |
28 | |
29 | #include <linux/pm_runtime.h> |
30 | |
31 | #include <drm/drm_print.h> |
32 | |
33 | #include "i915_drv.h" |
34 | #include "i915_trace.h" |
35 | |
36 | /** |
37 | * DOC: runtime pm |
38 | * |
39 | * The i915 driver supports dynamic enabling and disabling of entire hardware |
40 | * blocks at runtime. This is especially important on the display side where |
41 | * software is supposed to control many power gates manually on recent hardware, |
42 | * since on the GT side a lot of the power management is done by the hardware. |
43 | * But even there some manual control at the device level is required. |
44 | * |
45 | * Since i915 supports a diverse set of platforms with a unified codebase and |
46 | * hardware engineers just love to shuffle functionality around between power |
47 | * domains there's a sizeable amount of indirection required. This file provides |
48 | * generic functions to the driver for grabbing and releasing references for |
49 | * abstract power domains. It then maps those to the actual power wells |
50 | * present for a given platform. |
51 | */ |
52 | |
53 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
54 | |
55 | #include <linux/sort.h> |
56 | |
57 | #define STACKDEPTH 8 |
58 | |
59 | static noinline depot_stack_handle_t __save_depot_stack(void) |
60 | { |
61 | unsigned long entries[STACKDEPTH]; |
62 | unsigned int n; |
63 | |
64 | n = stack_trace_save(store: entries, ARRAY_SIZE(entries), skipnr: 1); |
65 | return stack_depot_save(entries, nr_entries: n, GFP_NOWAIT | __GFP_NOWARN); |
66 | } |
67 | |
68 | static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
69 | { |
70 | spin_lock_init(&rpm->debug.lock); |
71 | stack_depot_init(); |
72 | } |
73 | |
74 | static noinline depot_stack_handle_t |
75 | track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
76 | { |
77 | depot_stack_handle_t stack, *stacks; |
78 | unsigned long flags; |
79 | |
80 | if (rpm->no_wakeref_tracking) |
81 | return -1; |
82 | |
83 | stack = __save_depot_stack(); |
84 | if (!stack) |
85 | return -1; |
86 | |
87 | spin_lock_irqsave(&rpm->debug.lock, flags); |
88 | |
89 | if (!rpm->debug.count) |
90 | rpm->debug.last_acquire = stack; |
91 | |
92 | stacks = krealloc(objp: rpm->debug.owners, |
93 | new_size: (rpm->debug.count + 1) * sizeof(*stacks), |
94 | GFP_NOWAIT | __GFP_NOWARN); |
95 | if (stacks) { |
96 | stacks[rpm->debug.count++] = stack; |
97 | rpm->debug.owners = stacks; |
98 | } else { |
99 | stack = -1; |
100 | } |
101 | |
102 | spin_unlock_irqrestore(lock: &rpm->debug.lock, flags); |
103 | |
104 | return stack; |
105 | } |
106 | |
107 | static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
108 | depot_stack_handle_t stack) |
109 | { |
110 | struct drm_i915_private *i915 = container_of(rpm, |
111 | struct drm_i915_private, |
112 | runtime_pm); |
113 | unsigned long flags, n; |
114 | bool found = false; |
115 | |
116 | if (unlikely(stack == -1)) |
117 | return; |
118 | |
119 | spin_lock_irqsave(&rpm->debug.lock, flags); |
120 | for (n = rpm->debug.count; n--; ) { |
121 | if (rpm->debug.owners[n] == stack) { |
122 | memmove(rpm->debug.owners + n, |
123 | rpm->debug.owners + n + 1, |
124 | (--rpm->debug.count - n) * sizeof(stack)); |
125 | found = true; |
126 | break; |
127 | } |
128 | } |
129 | spin_unlock_irqrestore(lock: &rpm->debug.lock, flags); |
130 | |
131 | if (drm_WARN(&i915->drm, !found, |
132 | "Unmatched wakeref (tracking %lu), count %u\n" , |
133 | rpm->debug.count, atomic_read(&rpm->wakeref_count))) { |
134 | char *buf; |
135 | |
136 | buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); |
137 | if (!buf) |
138 | return; |
139 | |
140 | stack_depot_snprint(handle: stack, buf, PAGE_SIZE, spaces: 2); |
141 | DRM_DEBUG_DRIVER("wakeref %x from\n%s" , stack, buf); |
142 | |
143 | stack = READ_ONCE(rpm->debug.last_release); |
144 | if (stack) { |
145 | stack_depot_snprint(handle: stack, buf, PAGE_SIZE, spaces: 2); |
146 | DRM_DEBUG_DRIVER("wakeref last released at\n%s" , buf); |
147 | } |
148 | |
149 | kfree(objp: buf); |
150 | } |
151 | } |
152 | |
153 | static int cmphandle(const void *_a, const void *_b) |
154 | { |
155 | const depot_stack_handle_t * const a = _a, * const b = _b; |
156 | |
157 | if (*a < *b) |
158 | return -1; |
159 | else if (*a > *b) |
160 | return 1; |
161 | else |
162 | return 0; |
163 | } |
164 | |
165 | static void |
166 | __print_intel_runtime_pm_wakeref(struct drm_printer *p, |
167 | const struct intel_runtime_pm_debug *dbg) |
168 | { |
169 | unsigned long i; |
170 | char *buf; |
171 | |
172 | buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); |
173 | if (!buf) |
174 | return; |
175 | |
176 | if (dbg->last_acquire) { |
177 | stack_depot_snprint(handle: dbg->last_acquire, buf, PAGE_SIZE, spaces: 2); |
178 | drm_printf(p, f: "Wakeref last acquired:\n%s" , buf); |
179 | } |
180 | |
181 | if (dbg->last_release) { |
182 | stack_depot_snprint(handle: dbg->last_release, buf, PAGE_SIZE, spaces: 2); |
183 | drm_printf(p, f: "Wakeref last released:\n%s" , buf); |
184 | } |
185 | |
186 | drm_printf(p, f: "Wakeref count: %lu\n" , dbg->count); |
187 | |
188 | sort(base: dbg->owners, num: dbg->count, size: sizeof(*dbg->owners), cmp_func: cmphandle, NULL); |
189 | |
190 | for (i = 0; i < dbg->count; i++) { |
191 | depot_stack_handle_t stack = dbg->owners[i]; |
192 | unsigned long rep; |
193 | |
194 | rep = 1; |
195 | while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) |
196 | rep++, i++; |
197 | stack_depot_snprint(handle: stack, buf, PAGE_SIZE, spaces: 2); |
198 | drm_printf(p, f: "Wakeref x%lu taken at:\n%s" , rep, buf); |
199 | } |
200 | |
201 | kfree(objp: buf); |
202 | } |
203 | |
204 | static noinline void |
205 | __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, |
206 | struct intel_runtime_pm_debug *saved) |
207 | { |
208 | *saved = *debug; |
209 | |
210 | debug->owners = NULL; |
211 | debug->count = 0; |
212 | debug->last_release = __save_depot_stack(); |
213 | } |
214 | |
215 | static void |
216 | dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) |
217 | { |
218 | if (debug->count) { |
219 | struct drm_printer p = drm_debug_printer(prefix: "i915" ); |
220 | |
221 | __print_intel_runtime_pm_wakeref(p: &p, dbg: debug); |
222 | } |
223 | |
224 | kfree(objp: debug->owners); |
225 | } |
226 | |
227 | static noinline void |
228 | __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) |
229 | { |
230 | struct intel_runtime_pm_debug dbg = {}; |
231 | unsigned long flags; |
232 | |
233 | if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count, |
234 | &rpm->debug.lock, |
235 | flags)) |
236 | return; |
237 | |
238 | __untrack_all_wakerefs(debug: &rpm->debug, saved: &dbg); |
239 | spin_unlock_irqrestore(lock: &rpm->debug.lock, flags); |
240 | |
241 | dump_and_free_wakeref_tracking(debug: &dbg); |
242 | } |
243 | |
244 | static noinline void |
245 | untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) |
246 | { |
247 | struct intel_runtime_pm_debug dbg = {}; |
248 | unsigned long flags; |
249 | |
250 | spin_lock_irqsave(&rpm->debug.lock, flags); |
251 | __untrack_all_wakerefs(debug: &rpm->debug, saved: &dbg); |
252 | spin_unlock_irqrestore(lock: &rpm->debug.lock, flags); |
253 | |
254 | dump_and_free_wakeref_tracking(debug: &dbg); |
255 | } |
256 | |
257 | void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
258 | struct drm_printer *p) |
259 | { |
260 | struct intel_runtime_pm_debug dbg = {}; |
261 | |
262 | do { |
263 | unsigned long alloc = dbg.count; |
264 | depot_stack_handle_t *s; |
265 | |
266 | spin_lock_irq(lock: &rpm->debug.lock); |
267 | dbg.count = rpm->debug.count; |
268 | if (dbg.count <= alloc) { |
269 | memcpy(dbg.owners, |
270 | rpm->debug.owners, |
271 | dbg.count * sizeof(*s)); |
272 | } |
273 | dbg.last_acquire = rpm->debug.last_acquire; |
274 | dbg.last_release = rpm->debug.last_release; |
275 | spin_unlock_irq(lock: &rpm->debug.lock); |
276 | if (dbg.count <= alloc) |
277 | break; |
278 | |
279 | s = krealloc(objp: dbg.owners, |
280 | new_size: dbg.count * sizeof(*s), |
281 | GFP_NOWAIT | __GFP_NOWARN); |
282 | if (!s) |
283 | goto out; |
284 | |
285 | dbg.owners = s; |
286 | } while (1); |
287 | |
288 | __print_intel_runtime_pm_wakeref(p, dbg: &dbg); |
289 | |
290 | out: |
291 | kfree(objp: dbg.owners); |
292 | } |
293 | |
294 | #else |
295 | |
296 | static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
297 | { |
298 | } |
299 | |
300 | static depot_stack_handle_t |
301 | track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
302 | { |
303 | return -1; |
304 | } |
305 | |
306 | static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
307 | intel_wakeref_t wref) |
308 | { |
309 | } |
310 | |
311 | static void |
312 | __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) |
313 | { |
314 | atomic_dec(&rpm->wakeref_count); |
315 | } |
316 | |
317 | static void |
318 | untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) |
319 | { |
320 | } |
321 | |
322 | #endif |
323 | |
324 | static void |
325 | intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock) |
326 | { |
327 | if (wakelock) { |
328 | atomic_add(i: 1 + INTEL_RPM_WAKELOCK_BIAS, v: &rpm->wakeref_count); |
329 | assert_rpm_wakelock_held(rpm); |
330 | } else { |
331 | atomic_inc(v: &rpm->wakeref_count); |
332 | assert_rpm_raw_wakeref_held(rpm); |
333 | } |
334 | } |
335 | |
336 | static void |
337 | intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock) |
338 | { |
339 | if (wakelock) { |
340 | assert_rpm_wakelock_held(rpm); |
341 | atomic_sub(INTEL_RPM_WAKELOCK_BIAS, v: &rpm->wakeref_count); |
342 | } else { |
343 | assert_rpm_raw_wakeref_held(rpm); |
344 | } |
345 | |
346 | __intel_wakeref_dec_and_check_tracking(rpm); |
347 | } |
348 | |
349 | static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, |
350 | bool wakelock) |
351 | { |
352 | struct drm_i915_private *i915 = container_of(rpm, |
353 | struct drm_i915_private, |
354 | runtime_pm); |
355 | int ret; |
356 | |
357 | ret = pm_runtime_get_sync(dev: rpm->kdev); |
358 | drm_WARN_ONCE(&i915->drm, ret < 0, |
359 | "pm_runtime_get_sync() failed: %d\n" , ret); |
360 | |
361 | intel_runtime_pm_acquire(rpm, wakelock); |
362 | |
363 | return track_intel_runtime_pm_wakeref(rpm); |
364 | } |
365 | |
366 | /** |
367 | * intel_runtime_pm_get_raw - grab a raw runtime pm reference |
368 | * @rpm: the intel_runtime_pm structure |
369 | * |
370 | * This is the unlocked version of intel_display_power_is_enabled() and should |
371 | * only be used from error capture and recovery code where deadlocks are |
372 | * possible. |
373 | * This function grabs a device-level runtime pm reference (mostly used for |
374 | * asynchronous PM management from display code) and ensures that it is powered |
375 | * up. Raw references are not considered during wakelock assert checks. |
376 | * |
377 | * Any runtime pm reference obtained by this function must have a symmetric |
378 | * call to intel_runtime_pm_put_raw() to release the reference again. |
379 | * |
380 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates |
381 | * as True if the wakeref was acquired, or False otherwise. |
382 | */ |
383 | intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm) |
384 | { |
385 | return __intel_runtime_pm_get(rpm, wakelock: false); |
386 | } |
387 | |
388 | /** |
389 | * intel_runtime_pm_get - grab a runtime pm reference |
390 | * @rpm: the intel_runtime_pm structure |
391 | * |
392 | * This function grabs a device-level runtime pm reference (mostly used for GEM |
393 | * code to ensure the GTT or GT is on) and ensures that it is powered up. |
394 | * |
395 | * Any runtime pm reference obtained by this function must have a symmetric |
396 | * call to intel_runtime_pm_put() to release the reference again. |
397 | * |
398 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put() |
399 | */ |
400 | intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) |
401 | { |
402 | return __intel_runtime_pm_get(rpm, wakelock: true); |
403 | } |
404 | |
405 | /** |
406 | * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active |
407 | * @rpm: the intel_runtime_pm structure |
408 | * @ignore_usecount: get a ref even if dev->power.usage_count is 0 |
409 | * |
410 | * This function grabs a device-level runtime pm reference if the device is |
411 | * already active and ensures that it is powered up. It is illegal to try |
412 | * and access the HW should intel_runtime_pm_get_if_active() report failure. |
413 | * |
414 | * If @ignore_usecount is true, a reference will be acquired even if there is no |
415 | * user requiring the device to be powered up (dev->power.usage_count == 0). |
416 | * If the function returns false in this case then it's guaranteed that the |
417 | * device's runtime suspend hook has been called already or that it will be |
418 | * called (and hence it's also guaranteed that the device's runtime resume |
419 | * hook will be called eventually). |
420 | * |
421 | * Any runtime pm reference obtained by this function must have a symmetric |
422 | * call to intel_runtime_pm_put() to release the reference again. |
423 | * |
424 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates |
425 | * as True if the wakeref was acquired, or False otherwise. |
426 | */ |
427 | static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm, |
428 | bool ignore_usecount) |
429 | { |
430 | if (IS_ENABLED(CONFIG_PM)) { |
431 | /* |
432 | * In cases runtime PM is disabled by the RPM core and we get |
433 | * an -EINVAL return value we are not supposed to call this |
434 | * function, since the power state is undefined. This applies |
435 | * atm to the late/early system suspend/resume handlers. |
436 | */ |
437 | if (pm_runtime_get_if_active(dev: rpm->kdev, ign_usage_count: ignore_usecount) <= 0) |
438 | return 0; |
439 | } |
440 | |
441 | intel_runtime_pm_acquire(rpm, wakelock: true); |
442 | |
443 | return track_intel_runtime_pm_wakeref(rpm); |
444 | } |
445 | |
446 | intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) |
447 | { |
448 | return __intel_runtime_pm_get_if_active(rpm, ignore_usecount: false); |
449 | } |
450 | |
451 | intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) |
452 | { |
453 | return __intel_runtime_pm_get_if_active(rpm, ignore_usecount: true); |
454 | } |
455 | |
456 | /** |
457 | * intel_runtime_pm_get_noresume - grab a runtime pm reference |
458 | * @rpm: the intel_runtime_pm structure |
459 | * |
460 | * This function grabs a device-level runtime pm reference (mostly used for GEM |
461 | * code to ensure the GTT or GT is on). |
462 | * |
463 | * It will _not_ power up the device but instead only check that it's powered |
464 | * on. Therefore it is only valid to call this functions from contexts where |
465 | * the device is known to be powered up and where trying to power it up would |
466 | * result in hilarity and deadlocks. That pretty much means only the system |
467 | * suspend/resume code where this is used to grab runtime pm references for |
468 | * delayed setup down in work items. |
469 | * |
470 | * Any runtime pm reference obtained by this function must have a symmetric |
471 | * call to intel_runtime_pm_put() to release the reference again. |
472 | * |
473 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put() |
474 | */ |
475 | intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) |
476 | { |
477 | assert_rpm_wakelock_held(rpm); |
478 | pm_runtime_get_noresume(dev: rpm->kdev); |
479 | |
480 | intel_runtime_pm_acquire(rpm, wakelock: true); |
481 | |
482 | return track_intel_runtime_pm_wakeref(rpm); |
483 | } |
484 | |
485 | static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, |
486 | intel_wakeref_t wref, |
487 | bool wakelock) |
488 | { |
489 | struct device *kdev = rpm->kdev; |
490 | |
491 | untrack_intel_runtime_pm_wakeref(rpm, stack: wref); |
492 | |
493 | intel_runtime_pm_release(rpm, wakelock); |
494 | |
495 | pm_runtime_mark_last_busy(dev: kdev); |
496 | pm_runtime_put_autosuspend(dev: kdev); |
497 | } |
498 | |
499 | /** |
500 | * intel_runtime_pm_put_raw - release a raw runtime pm reference |
501 | * @rpm: the intel_runtime_pm structure |
502 | * @wref: wakeref acquired for the reference that is being released |
503 | * |
504 | * This function drops the device-level runtime pm reference obtained by |
505 | * intel_runtime_pm_get_raw() and might power down the corresponding |
506 | * hardware block right away if this is the last reference. |
507 | */ |
508 | void |
509 | intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) |
510 | { |
511 | __intel_runtime_pm_put(rpm, wref, wakelock: false); |
512 | } |
513 | |
514 | /** |
515 | * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference |
516 | * @rpm: the intel_runtime_pm structure |
517 | * |
518 | * This function drops the device-level runtime pm reference obtained by |
519 | * intel_runtime_pm_get() and might power down the corresponding |
520 | * hardware block right away if this is the last reference. |
521 | * |
522 | * This function exists only for historical reasons and should be avoided in |
523 | * new code, as the correctness of its use cannot be checked. Always use |
524 | * intel_runtime_pm_put() instead. |
525 | */ |
526 | void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) |
527 | { |
528 | __intel_runtime_pm_put(rpm, wref: -1, wakelock: true); |
529 | } |
530 | |
531 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
532 | /** |
533 | * intel_runtime_pm_put - release a runtime pm reference |
534 | * @rpm: the intel_runtime_pm structure |
535 | * @wref: wakeref acquired for the reference that is being released |
536 | * |
537 | * This function drops the device-level runtime pm reference obtained by |
538 | * intel_runtime_pm_get() and might power down the corresponding |
539 | * hardware block right away if this is the last reference. |
540 | */ |
541 | void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) |
542 | { |
543 | __intel_runtime_pm_put(rpm, wref, wakelock: true); |
544 | } |
545 | #endif |
546 | |
547 | /** |
548 | * intel_runtime_pm_enable - enable runtime pm |
549 | * @rpm: the intel_runtime_pm structure |
550 | * |
551 | * This function enables runtime pm at the end of the driver load sequence. |
552 | * |
553 | * Note that this function does currently not enable runtime pm for the |
554 | * subordinate display power domains. That is done by |
555 | * intel_power_domains_enable(). |
556 | */ |
557 | void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) |
558 | { |
559 | struct drm_i915_private *i915 = container_of(rpm, |
560 | struct drm_i915_private, |
561 | runtime_pm); |
562 | struct device *kdev = rpm->kdev; |
563 | |
564 | /* |
565 | * Disable the system suspend direct complete optimization, which can |
566 | * leave the device suspended skipping the driver's suspend handlers |
567 | * if the device was already runtime suspended. This is needed due to |
568 | * the difference in our runtime and system suspend sequence and |
569 | * becaue the HDA driver may require us to enable the audio power |
570 | * domain during system suspend. |
571 | */ |
572 | dev_pm_set_driver_flags(dev: kdev, DPM_FLAG_NO_DIRECT_COMPLETE); |
573 | |
574 | pm_runtime_set_autosuspend_delay(dev: kdev, delay: 10000); /* 10s */ |
575 | pm_runtime_mark_last_busy(dev: kdev); |
576 | |
577 | /* |
578 | * Take a permanent reference to disable the RPM functionality and drop |
579 | * it only when unloading the driver. Use the low level get/put helpers, |
580 | * so the driver's own RPM reference tracking asserts also work on |
581 | * platforms without RPM support. |
582 | */ |
583 | if (!rpm->available) { |
584 | int ret; |
585 | |
586 | pm_runtime_dont_use_autosuspend(dev: kdev); |
587 | ret = pm_runtime_get_sync(dev: kdev); |
588 | drm_WARN(&i915->drm, ret < 0, |
589 | "pm_runtime_get_sync() failed: %d\n" , ret); |
590 | } else { |
591 | pm_runtime_use_autosuspend(dev: kdev); |
592 | } |
593 | |
594 | /* |
595 | * FIXME: Temp hammer to keep autosupend disable on lmem supported platforms. |
596 | * As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe |
597 | * function will be unsupported in case PCIe endpoint function is in D3. |
598 | * Let's keep i915 autosuspend control 'on' till we fix all known issue |
599 | * with lmem access in D3. |
600 | */ |
601 | if (!IS_DGFX(i915)) |
602 | pm_runtime_allow(dev: kdev); |
603 | |
604 | /* |
605 | * The core calls the driver load handler with an RPM reference held. |
606 | * We drop that here and will reacquire it during unloading in |
607 | * intel_power_domains_fini(). |
608 | */ |
609 | pm_runtime_put_autosuspend(dev: kdev); |
610 | } |
611 | |
612 | void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) |
613 | { |
614 | struct drm_i915_private *i915 = container_of(rpm, |
615 | struct drm_i915_private, |
616 | runtime_pm); |
617 | struct device *kdev = rpm->kdev; |
618 | |
619 | /* Transfer rpm ownership back to core */ |
620 | drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0, |
621 | "Failed to pass rpm ownership back to core\n" ); |
622 | |
623 | pm_runtime_dont_use_autosuspend(dev: kdev); |
624 | |
625 | if (!rpm->available) |
626 | pm_runtime_put(dev: kdev); |
627 | } |
628 | |
629 | void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) |
630 | { |
631 | struct drm_i915_private *i915 = container_of(rpm, |
632 | struct drm_i915_private, |
633 | runtime_pm); |
634 | int count = atomic_read(v: &rpm->wakeref_count); |
635 | |
636 | intel_wakeref_auto_fini(wf: &rpm->userfault_wakeref); |
637 | |
638 | drm_WARN(&i915->drm, count, |
639 | "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n" , |
640 | intel_rpm_raw_wakeref_count(count), |
641 | intel_rpm_wakelock_count(count)); |
642 | |
643 | untrack_all_intel_runtime_pm_wakerefs(rpm); |
644 | } |
645 | |
646 | void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) |
647 | { |
648 | struct drm_i915_private *i915 = |
649 | container_of(rpm, struct drm_i915_private, runtime_pm); |
650 | struct pci_dev *pdev = to_pci_dev(i915->drm.dev); |
651 | struct device *kdev = &pdev->dev; |
652 | |
653 | rpm->kdev = kdev; |
654 | rpm->available = HAS_RUNTIME_PM(i915); |
655 | atomic_set(v: &rpm->wakeref_count, i: 0); |
656 | |
657 | init_intel_runtime_pm_wakeref(rpm); |
658 | INIT_LIST_HEAD(list: &rpm->lmem_userfault_list); |
659 | spin_lock_init(&rpm->lmem_userfault_lock); |
660 | intel_wakeref_auto_init(wf: &rpm->userfault_wakeref, i915); |
661 | } |
662 | |