1 | /* SPDX-License-Identifier: MIT */ |
2 | /* |
3 | * Copyright © 2019 Intel Corporation |
4 | */ |
5 | |
6 | #ifndef __INTEL_RUNTIME_PM_H__ |
7 | #define __INTEL_RUNTIME_PM_H__ |
8 | |
9 | #include <linux/pm_runtime.h> |
10 | #include <linux/types.h> |
11 | |
12 | #include "intel_wakeref.h" |
13 | |
14 | #include "i915_utils.h" |
15 | |
16 | struct device; |
17 | struct drm_i915_private; |
18 | struct drm_printer; |
19 | |
20 | /* |
21 | * This struct helps tracking the state needed for runtime PM, which puts the |
22 | * device in PCI D3 state. Notice that when this happens, nothing on the |
23 | * graphics device works, even register access, so we don't get interrupts nor |
24 | * anything else. |
25 | * |
26 | * Every piece of our code that needs to actually touch the hardware needs to |
27 | * either call intel_runtime_pm_get or call intel_display_power_get with the |
28 | * appropriate power domain. |
29 | * |
30 | * Our driver uses the autosuspend delay feature, which means we'll only really |
31 | * suspend if we stay with zero refcount for a certain amount of time. The |
32 | * default value is currently very conservative (see intel_runtime_pm_enable), but |
33 | * it can be changed with the standard runtime PM files from sysfs. |
34 | * |
35 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and |
36 | * goes back to false exactly before we reenable the IRQs. We use this variable |
37 | * to check if someone is trying to enable/disable IRQs while they're supposed |
38 | * to be disabled. This shouldn't happen and we'll print some error messages in |
39 | * case it happens. |
40 | * |
41 | * For more, read the Documentation/power/runtime_pm.rst. |
42 | */ |
43 | struct intel_runtime_pm { |
44 | atomic_t wakeref_count; |
45 | struct device *kdev; /* points to i915->drm.dev */ |
46 | bool available; |
47 | bool irqs_enabled; |
48 | bool no_wakeref_tracking; |
49 | |
50 | /* |
51 | * Protects access to lmem usefault list. |
52 | * It is required, if we are outside of the runtime suspend path, |
53 | * access to @lmem_userfault_list requires always first grabbing the |
54 | * runtime pm, to ensure we can't race against runtime suspend. |
55 | * Once we have that we also need to grab @lmem_userfault_lock, |
56 | * at which point we have exclusive access. |
57 | * The runtime suspend path is special since it doesn't really hold any locks, |
58 | * but instead has exclusive access by virtue of all other accesses requiring |
59 | * holding the runtime pm wakeref. |
60 | */ |
61 | spinlock_t lmem_userfault_lock; |
62 | |
63 | /* |
64 | * Keep list of userfaulted gem obj, which require to release their |
65 | * mmap mappings at runtime suspend path. |
66 | */ |
67 | struct list_head lmem_userfault_list; |
68 | |
69 | /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */ |
70 | struct intel_wakeref_auto userfault_wakeref; |
71 | |
72 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
73 | /* |
74 | * To aide detection of wakeref leaks and general misuse, we |
75 | * track all wakeref holders. With manual markup (i.e. returning |
76 | * a cookie to each rpm_get caller which they then supply to their |
77 | * paired rpm_put) we can remove corresponding pairs of and keep |
78 | * the array trimmed to active wakerefs. |
79 | */ |
80 | struct intel_runtime_pm_debug { |
81 | spinlock_t lock; |
82 | |
83 | depot_stack_handle_t last_acquire; |
84 | depot_stack_handle_t last_release; |
85 | |
86 | depot_stack_handle_t *owners; |
87 | unsigned long count; |
88 | } debug; |
89 | #endif |
90 | }; |
91 | |
92 | #define BITS_PER_WAKEREF \ |
93 | BITS_PER_TYPE(typeof_member(struct intel_runtime_pm, wakeref_count)) |
94 | #define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2) |
95 | #define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT) |
96 | #define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1) |
97 | |
98 | static inline int |
99 | intel_rpm_raw_wakeref_count(int wakeref_count) |
100 | { |
101 | return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK; |
102 | } |
103 | |
104 | static inline int |
105 | intel_rpm_wakelock_count(int wakeref_count) |
106 | { |
107 | return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT; |
108 | } |
109 | |
110 | static inline void |
111 | assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm) |
112 | { |
113 | WARN_ONCE(pm_runtime_suspended(rpm->kdev), |
114 | "Device suspended during HW access\n" ); |
115 | } |
116 | |
117 | static inline void |
118 | __assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count) |
119 | { |
120 | assert_rpm_device_not_suspended(rpm); |
121 | WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count), |
122 | "RPM raw-wakeref not held\n" ); |
123 | } |
124 | |
125 | static inline void |
126 | __assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count) |
127 | { |
128 | __assert_rpm_raw_wakeref_held(rpm, wakeref_count); |
129 | WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count), |
130 | "RPM wakelock ref not held during HW access\n" ); |
131 | } |
132 | |
133 | static inline void |
134 | assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm) |
135 | { |
136 | __assert_rpm_raw_wakeref_held(rpm, wakeref_count: atomic_read(v: &rpm->wakeref_count)); |
137 | } |
138 | |
139 | static inline void |
140 | assert_rpm_wakelock_held(struct intel_runtime_pm *rpm) |
141 | { |
142 | __assert_rpm_wakelock_held(rpm, wakeref_count: atomic_read(v: &rpm->wakeref_count)); |
143 | } |
144 | |
145 | /** |
146 | * disable_rpm_wakeref_asserts - disable the RPM assert checks |
147 | * @rpm: the intel_runtime_pm structure |
148 | * |
149 | * This function disable asserts that check if we hold an RPM wakelock |
150 | * reference, while keeping the device-not-suspended checks still enabled. |
151 | * It's meant to be used only in special circumstances where our rule about |
152 | * the wakelock refcount wrt. the device power state doesn't hold. According |
153 | * to this rule at any point where we access the HW or want to keep the HW in |
154 | * an active state we must hold an RPM wakelock reference acquired via one of |
155 | * the intel_runtime_pm_get() helpers. Currently there are a few special spots |
156 | * where this rule doesn't hold: the IRQ and suspend/resume handlers, the |
157 | * forcewake release timer, and the GPU RPS and hangcheck works. All other |
158 | * users should avoid using this function. |
159 | * |
160 | * Any calls to this function must have a symmetric call to |
161 | * enable_rpm_wakeref_asserts(). |
162 | */ |
163 | static inline void |
164 | disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) |
165 | { |
166 | atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1, |
167 | v: &rpm->wakeref_count); |
168 | } |
169 | |
170 | /** |
171 | * enable_rpm_wakeref_asserts - re-enable the RPM assert checks |
172 | * @rpm: the intel_runtime_pm structure |
173 | * |
174 | * This function re-enables the RPM assert checks after disabling them with |
175 | * disable_rpm_wakeref_asserts. It's meant to be used only in special |
176 | * circumstances otherwise its use should be avoided. |
177 | * |
178 | * Any calls to this function must have a symmetric call to |
179 | * disable_rpm_wakeref_asserts(). |
180 | */ |
181 | static inline void |
182 | enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) |
183 | { |
184 | atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1, |
185 | v: &rpm->wakeref_count); |
186 | } |
187 | |
188 | void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm); |
189 | void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); |
190 | void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); |
191 | void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); |
192 | |
193 | intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); |
194 | intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); |
195 | intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm); |
196 | intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); |
197 | intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); |
198 | |
199 | #define with_intel_runtime_pm(rpm, wf) \ |
200 | for ((wf) = intel_runtime_pm_get(rpm); (wf); \ |
201 | intel_runtime_pm_put((rpm), (wf)), (wf) = 0) |
202 | |
203 | #define with_intel_runtime_pm_if_in_use(rpm, wf) \ |
204 | for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ |
205 | intel_runtime_pm_put((rpm), (wf)), (wf) = 0) |
206 | |
207 | #define with_intel_runtime_pm_if_active(rpm, wf) \ |
208 | for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \ |
209 | intel_runtime_pm_put((rpm), (wf)), (wf) = 0) |
210 | |
211 | void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); |
212 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
213 | void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref); |
214 | #else |
215 | static inline void |
216 | intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) |
217 | { |
218 | intel_runtime_pm_put_unchecked(rpm); |
219 | } |
220 | #endif |
221 | void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref); |
222 | |
223 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
224 | void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
225 | struct drm_printer *p); |
226 | #else |
227 | static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
228 | struct drm_printer *p) |
229 | { |
230 | } |
231 | #endif |
232 | |
233 | #endif /* __INTEL_RUNTIME_PM_H__ */ |
234 | |