1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * pm_runtime.h - Device run-time power management helper functions. |
4 | * |
5 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl> |
6 | */ |
7 | |
8 | #ifndef _LINUX_PM_RUNTIME_H |
9 | #define _LINUX_PM_RUNTIME_H |
10 | |
11 | #include <linux/device.h> |
12 | #include <linux/notifier.h> |
13 | #include <linux/pm.h> |
14 | |
15 | #include <linux/jiffies.h> |
16 | |
17 | /* Runtime PM flag argument bits */ |
18 | #define RPM_ASYNC 0x01 /* Request is asynchronous */ |
19 | #define RPM_NOWAIT 0x02 /* Don't wait for concurrent |
20 | state change */ |
21 | #define RPM_GET_PUT 0x04 /* Increment/decrement the |
22 | usage_count */ |
23 | #define RPM_AUTO 0x08 /* Use autosuspend_delay */ |
24 | |
25 | /* |
26 | * Use this for defining a set of PM operations to be used in all situations |
27 | * (system suspend, hibernation or runtime PM). |
28 | * |
29 | * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS() |
30 | * macro, which uses the provided callbacks for both runtime PM and system |
31 | * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend() |
32 | * and pm_runtime_force_resume() for its system sleep callbacks. |
33 | * |
34 | * If the underlying dev_pm_ops struct symbol has to be exported, use |
35 | * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead. |
36 | */ |
37 | #define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ |
38 | _DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \ |
39 | pm_runtime_force_resume, suspend_fn, \ |
40 | resume_fn, idle_fn) |
41 | |
42 | #define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ |
43 | EXPORT_DEV_PM_OPS(name) = { \ |
44 | RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ |
45 | } |
46 | #define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ |
47 | EXPORT_GPL_DEV_PM_OPS(name) = { \ |
48 | RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ |
49 | } |
50 | #define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \ |
51 | EXPORT_NS_DEV_PM_OPS(name, ns) = { \ |
52 | RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ |
53 | } |
54 | #define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \ |
55 | EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \ |
56 | RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ |
57 | } |
58 | |
59 | #ifdef CONFIG_PM |
60 | extern struct workqueue_struct *pm_wq; |
61 | |
62 | static inline bool queue_pm_work(struct work_struct *work) |
63 | { |
64 | return queue_work(wq: pm_wq, work); |
65 | } |
66 | |
67 | extern int pm_generic_runtime_suspend(struct device *dev); |
68 | extern int pm_generic_runtime_resume(struct device *dev); |
69 | extern int pm_runtime_force_suspend(struct device *dev); |
70 | extern int pm_runtime_force_resume(struct device *dev); |
71 | |
72 | extern int __pm_runtime_idle(struct device *dev, int rpmflags); |
73 | extern int __pm_runtime_suspend(struct device *dev, int rpmflags); |
74 | extern int __pm_runtime_resume(struct device *dev, int rpmflags); |
75 | extern int pm_runtime_get_if_active(struct device *dev); |
76 | extern int pm_runtime_get_if_in_use(struct device *dev); |
77 | extern int pm_schedule_suspend(struct device *dev, unsigned int delay); |
78 | extern int __pm_runtime_set_status(struct device *dev, unsigned int status); |
79 | extern int pm_runtime_barrier(struct device *dev); |
80 | extern void pm_runtime_enable(struct device *dev); |
81 | extern void __pm_runtime_disable(struct device *dev, bool check_resume); |
82 | extern void pm_runtime_allow(struct device *dev); |
83 | extern void pm_runtime_forbid(struct device *dev); |
84 | extern void pm_runtime_no_callbacks(struct device *dev); |
85 | extern void pm_runtime_irq_safe(struct device *dev); |
86 | extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); |
87 | extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); |
88 | extern u64 pm_runtime_autosuspend_expiration(struct device *dev); |
89 | extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); |
90 | extern void pm_runtime_get_suppliers(struct device *dev); |
91 | extern void pm_runtime_put_suppliers(struct device *dev); |
92 | extern void pm_runtime_new_link(struct device *dev); |
93 | extern void pm_runtime_drop_link(struct device_link *link); |
94 | extern void pm_runtime_release_supplier(struct device_link *link); |
95 | |
96 | extern int devm_pm_runtime_enable(struct device *dev); |
97 | |
98 | /** |
99 | * pm_suspend_ignore_children - Set runtime PM behavior regarding children. |
100 | * @dev: Target device. |
101 | * @enable: Whether or not to ignore possible dependencies on children. |
102 | * |
103 | * The dependencies of @dev on its children will not be taken into account by |
104 | * the runtime PM framework going forward if @enable is %true, or they will |
105 | * be taken into account otherwise. |
106 | */ |
107 | static inline void pm_suspend_ignore_children(struct device *dev, bool enable) |
108 | { |
109 | dev->power.ignore_children = enable; |
110 | } |
111 | |
112 | /** |
113 | * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device. |
114 | * @dev: Target device. |
115 | */ |
116 | static inline void pm_runtime_get_noresume(struct device *dev) |
117 | { |
118 | atomic_inc(v: &dev->power.usage_count); |
119 | } |
120 | |
121 | /** |
122 | * pm_runtime_put_noidle - Drop runtime PM usage counter of a device. |
123 | * @dev: Target device. |
124 | * |
125 | * Decrement the runtime PM usage counter of @dev unless it is 0 already. |
126 | */ |
127 | static inline void pm_runtime_put_noidle(struct device *dev) |
128 | { |
129 | atomic_add_unless(v: &dev->power.usage_count, a: -1, u: 0); |
130 | } |
131 | |
132 | /** |
133 | * pm_runtime_suspended - Check whether or not a device is runtime-suspended. |
134 | * @dev: Target device. |
135 | * |
136 | * Return %true if runtime PM is enabled for @dev and its runtime PM status is |
137 | * %RPM_SUSPENDED, or %false otherwise. |
138 | * |
139 | * Note that the return value of this function can only be trusted if it is |
140 | * called under the runtime PM lock of @dev or under conditions in which |
141 | * runtime PM cannot be either disabled or enabled for @dev and its runtime PM |
142 | * status cannot change. |
143 | */ |
144 | static inline bool pm_runtime_suspended(struct device *dev) |
145 | { |
146 | return dev->power.runtime_status == RPM_SUSPENDED |
147 | && !dev->power.disable_depth; |
148 | } |
149 | |
150 | /** |
151 | * pm_runtime_active - Check whether or not a device is runtime-active. |
152 | * @dev: Target device. |
153 | * |
154 | * Return %true if runtime PM is disabled for @dev or its runtime PM status is |
155 | * %RPM_ACTIVE, or %false otherwise. |
156 | * |
157 | * Note that the return value of this function can only be trusted if it is |
158 | * called under the runtime PM lock of @dev or under conditions in which |
159 | * runtime PM cannot be either disabled or enabled for @dev and its runtime PM |
160 | * status cannot change. |
161 | */ |
162 | static inline bool pm_runtime_active(struct device *dev) |
163 | { |
164 | return dev->power.runtime_status == RPM_ACTIVE |
165 | || dev->power.disable_depth; |
166 | } |
167 | |
168 | /** |
169 | * pm_runtime_status_suspended - Check if runtime PM status is "suspended". |
170 | * @dev: Target device. |
171 | * |
172 | * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false |
173 | * otherwise, regardless of whether or not runtime PM has been enabled for @dev. |
174 | * |
175 | * Note that the return value of this function can only be trusted if it is |
176 | * called under the runtime PM lock of @dev or under conditions in which the |
177 | * runtime PM status of @dev cannot change. |
178 | */ |
179 | static inline bool pm_runtime_status_suspended(struct device *dev) |
180 | { |
181 | return dev->power.runtime_status == RPM_SUSPENDED; |
182 | } |
183 | |
184 | /** |
185 | * pm_runtime_enabled - Check if runtime PM is enabled. |
186 | * @dev: Target device. |
187 | * |
188 | * Return %true if runtime PM is enabled for @dev or %false otherwise. |
189 | * |
190 | * Note that the return value of this function can only be trusted if it is |
191 | * called under the runtime PM lock of @dev or under conditions in which |
192 | * runtime PM cannot be either disabled or enabled for @dev. |
193 | */ |
194 | static inline bool pm_runtime_enabled(struct device *dev) |
195 | { |
196 | return !dev->power.disable_depth; |
197 | } |
198 | |
199 | /** |
200 | * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present. |
201 | * @dev: Target device. |
202 | * |
203 | * Return %true if @dev is a special device without runtime PM callbacks or |
204 | * %false otherwise. |
205 | */ |
206 | static inline bool pm_runtime_has_no_callbacks(struct device *dev) |
207 | { |
208 | return dev->power.no_callbacks; |
209 | } |
210 | |
211 | /** |
212 | * pm_runtime_mark_last_busy - Update the last access time of a device. |
213 | * @dev: Target device. |
214 | * |
215 | * Update the last access time of @dev used by the runtime PM autosuspend |
216 | * mechanism to the current time as returned by ktime_get_mono_fast_ns(). |
217 | */ |
218 | static inline void pm_runtime_mark_last_busy(struct device *dev) |
219 | { |
220 | WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); |
221 | } |
222 | |
223 | /** |
224 | * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context. |
225 | * @dev: Target device. |
226 | * |
227 | * Return %true if @dev has been marked as an "IRQ-safe" device (with respect |
228 | * to runtime PM), in which case its runtime PM callabcks can be expected to |
229 | * work correctly when invoked from interrupt handlers. |
230 | */ |
231 | static inline bool pm_runtime_is_irq_safe(struct device *dev) |
232 | { |
233 | return dev->power.irq_safe; |
234 | } |
235 | |
236 | extern u64 pm_runtime_suspended_time(struct device *dev); |
237 | |
238 | #else /* !CONFIG_PM */ |
239 | |
240 | static inline bool queue_pm_work(struct work_struct *work) { return false; } |
241 | |
242 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } |
243 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } |
244 | static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } |
245 | static inline int pm_runtime_force_resume(struct device *dev) { return 0; } |
246 | |
247 | static inline int __pm_runtime_idle(struct device *dev, int rpmflags) |
248 | { |
249 | return -ENOSYS; |
250 | } |
251 | static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) |
252 | { |
253 | return -ENOSYS; |
254 | } |
255 | static inline int __pm_runtime_resume(struct device *dev, int rpmflags) |
256 | { |
257 | return 1; |
258 | } |
259 | static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) |
260 | { |
261 | return -ENOSYS; |
262 | } |
263 | static inline int pm_runtime_get_if_in_use(struct device *dev) |
264 | { |
265 | return -EINVAL; |
266 | } |
267 | static inline int pm_runtime_get_if_active(struct device *dev) |
268 | { |
269 | return -EINVAL; |
270 | } |
271 | static inline int __pm_runtime_set_status(struct device *dev, |
272 | unsigned int status) { return 0; } |
273 | static inline int pm_runtime_barrier(struct device *dev) { return 0; } |
274 | static inline void pm_runtime_enable(struct device *dev) {} |
275 | static inline void __pm_runtime_disable(struct device *dev, bool c) {} |
276 | static inline void pm_runtime_allow(struct device *dev) {} |
277 | static inline void pm_runtime_forbid(struct device *dev) {} |
278 | |
279 | static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } |
280 | |
281 | static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} |
282 | static inline void pm_runtime_get_noresume(struct device *dev) {} |
283 | static inline void pm_runtime_put_noidle(struct device *dev) {} |
284 | static inline bool pm_runtime_suspended(struct device *dev) { return false; } |
285 | static inline bool pm_runtime_active(struct device *dev) { return true; } |
286 | static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } |
287 | static inline bool pm_runtime_enabled(struct device *dev) { return false; } |
288 | |
289 | static inline void pm_runtime_no_callbacks(struct device *dev) {} |
290 | static inline void pm_runtime_irq_safe(struct device *dev) {} |
291 | static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } |
292 | |
293 | static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; } |
294 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} |
295 | static inline void __pm_runtime_use_autosuspend(struct device *dev, |
296 | bool use) {} |
297 | static inline void pm_runtime_set_autosuspend_delay(struct device *dev, |
298 | int delay) {} |
299 | static inline u64 pm_runtime_autosuspend_expiration( |
300 | struct device *dev) { return 0; } |
301 | static inline void pm_runtime_set_memalloc_noio(struct device *dev, |
302 | bool enable){} |
303 | static inline void pm_runtime_get_suppliers(struct device *dev) {} |
304 | static inline void pm_runtime_put_suppliers(struct device *dev) {} |
305 | static inline void pm_runtime_new_link(struct device *dev) {} |
306 | static inline void pm_runtime_drop_link(struct device_link *link) {} |
307 | static inline void pm_runtime_release_supplier(struct device_link *link) {} |
308 | |
309 | #endif /* !CONFIG_PM */ |
310 | |
311 | /** |
312 | * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it. |
313 | * @dev: Target device. |
314 | * |
315 | * Invoke the "idle check" callback of @dev and, depending on its return value, |
316 | * set up autosuspend of @dev or suspend it (depending on whether or not |
317 | * autosuspend has been enabled for it). |
318 | */ |
319 | static inline int pm_runtime_idle(struct device *dev) |
320 | { |
321 | return __pm_runtime_idle(dev, rpmflags: 0); |
322 | } |
323 | |
324 | /** |
325 | * pm_runtime_suspend - Suspend a device synchronously. |
326 | * @dev: Target device. |
327 | */ |
328 | static inline int pm_runtime_suspend(struct device *dev) |
329 | { |
330 | return __pm_runtime_suspend(dev, rpmflags: 0); |
331 | } |
332 | |
333 | /** |
334 | * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it. |
335 | * @dev: Target device. |
336 | * |
337 | * Set up autosuspend of @dev or suspend it (depending on whether or not |
338 | * autosuspend is enabled for it) without engaging its "idle check" callback. |
339 | */ |
340 | static inline int pm_runtime_autosuspend(struct device *dev) |
341 | { |
342 | return __pm_runtime_suspend(dev, RPM_AUTO); |
343 | } |
344 | |
345 | /** |
346 | * pm_runtime_resume - Resume a device synchronously. |
347 | * @dev: Target device. |
348 | */ |
349 | static inline int pm_runtime_resume(struct device *dev) |
350 | { |
351 | return __pm_runtime_resume(dev, rpmflags: 0); |
352 | } |
353 | |
354 | /** |
355 | * pm_request_idle - Queue up "idle check" execution for a device. |
356 | * @dev: Target device. |
357 | * |
358 | * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev |
359 | * asynchronously. |
360 | */ |
361 | static inline int pm_request_idle(struct device *dev) |
362 | { |
363 | return __pm_runtime_idle(dev, RPM_ASYNC); |
364 | } |
365 | |
366 | /** |
367 | * pm_request_resume - Queue up runtime-resume of a device. |
368 | * @dev: Target device. |
369 | */ |
370 | static inline int pm_request_resume(struct device *dev) |
371 | { |
372 | return __pm_runtime_resume(dev, RPM_ASYNC); |
373 | } |
374 | |
375 | /** |
376 | * pm_request_autosuspend - Queue up autosuspend of a device. |
377 | * @dev: Target device. |
378 | * |
379 | * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev |
380 | * asynchronously. |
381 | */ |
382 | static inline int pm_request_autosuspend(struct device *dev) |
383 | { |
384 | return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); |
385 | } |
386 | |
387 | /** |
388 | * pm_runtime_get - Bump up usage counter and queue up resume of a device. |
389 | * @dev: Target device. |
390 | * |
391 | * Bump up the runtime PM usage counter of @dev and queue up a work item to |
392 | * carry out runtime-resume of it. |
393 | */ |
394 | static inline int pm_runtime_get(struct device *dev) |
395 | { |
396 | return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); |
397 | } |
398 | |
399 | /** |
400 | * pm_runtime_get_sync - Bump up usage counter of a device and resume it. |
401 | * @dev: Target device. |
402 | * |
403 | * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of |
404 | * it synchronously. |
405 | * |
406 | * The possible return values of this function are the same as for |
407 | * pm_runtime_resume() and the runtime PM usage counter of @dev remains |
408 | * incremented in all cases, even if it returns an error code. |
409 | * Consider using pm_runtime_resume_and_get() instead of it, especially |
410 | * if its return value is checked by the caller, as this is likely to result |
411 | * in cleaner code. |
412 | */ |
413 | static inline int pm_runtime_get_sync(struct device *dev) |
414 | { |
415 | return __pm_runtime_resume(dev, RPM_GET_PUT); |
416 | } |
417 | |
418 | /** |
419 | * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it. |
420 | * @dev: Target device. |
421 | * |
422 | * Resume @dev synchronously and if that is successful, increment its runtime |
423 | * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been |
424 | * incremented or a negative error code otherwise. |
425 | */ |
426 | static inline int pm_runtime_resume_and_get(struct device *dev) |
427 | { |
428 | int ret; |
429 | |
430 | ret = __pm_runtime_resume(dev, RPM_GET_PUT); |
431 | if (ret < 0) { |
432 | pm_runtime_put_noidle(dev); |
433 | return ret; |
434 | } |
435 | |
436 | return 0; |
437 | } |
438 | |
439 | /** |
440 | * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0. |
441 | * @dev: Target device. |
442 | * |
443 | * Decrement the runtime PM usage counter of @dev and if it turns out to be |
444 | * equal to 0, queue up a work item for @dev like in pm_request_idle(). |
445 | */ |
446 | static inline int pm_runtime_put(struct device *dev) |
447 | { |
448 | return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); |
449 | } |
450 | |
451 | /** |
452 | * __pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0. |
453 | * @dev: Target device. |
454 | * |
455 | * Decrement the runtime PM usage counter of @dev and if it turns out to be |
456 | * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). |
457 | */ |
458 | static inline int __pm_runtime_put_autosuspend(struct device *dev) |
459 | { |
460 | return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); |
461 | } |
462 | |
463 | /** |
464 | * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0. |
465 | * @dev: Target device. |
466 | * |
467 | * Decrement the runtime PM usage counter of @dev and if it turns out to be |
468 | * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). |
469 | */ |
470 | static inline int pm_runtime_put_autosuspend(struct device *dev) |
471 | { |
472 | return __pm_runtime_suspend(dev, |
473 | RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); |
474 | } |
475 | |
476 | /** |
477 | * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0. |
478 | * @dev: Target device. |
479 | * |
480 | * Decrement the runtime PM usage counter of @dev and if it turns out to be |
481 | * equal to 0, invoke the "idle check" callback of @dev and, depending on its |
482 | * return value, set up autosuspend of @dev or suspend it (depending on whether |
483 | * or not autosuspend has been enabled for it). |
484 | * |
485 | * The possible return values of this function are the same as for |
486 | * pm_runtime_idle() and the runtime PM usage counter of @dev remains |
487 | * decremented in all cases, even if it returns an error code. |
488 | */ |
489 | static inline int pm_runtime_put_sync(struct device *dev) |
490 | { |
491 | return __pm_runtime_idle(dev, RPM_GET_PUT); |
492 | } |
493 | |
494 | /** |
495 | * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0. |
496 | * @dev: Target device. |
497 | * |
498 | * Decrement the runtime PM usage counter of @dev and if it turns out to be |
499 | * equal to 0, carry out runtime-suspend of @dev synchronously. |
500 | * |
501 | * The possible return values of this function are the same as for |
502 | * pm_runtime_suspend() and the runtime PM usage counter of @dev remains |
503 | * decremented in all cases, even if it returns an error code. |
504 | */ |
505 | static inline int pm_runtime_put_sync_suspend(struct device *dev) |
506 | { |
507 | return __pm_runtime_suspend(dev, RPM_GET_PUT); |
508 | } |
509 | |
510 | /** |
511 | * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0. |
512 | * @dev: Target device. |
513 | * |
514 | * Decrement the runtime PM usage counter of @dev and if it turns out to be |
515 | * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending |
516 | * on whether or not autosuspend has been enabled for it). |
517 | * |
518 | * The possible return values of this function are the same as for |
519 | * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains |
520 | * decremented in all cases, even if it returns an error code. |
521 | */ |
522 | static inline int pm_runtime_put_sync_autosuspend(struct device *dev) |
523 | { |
524 | return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); |
525 | } |
526 | |
527 | /** |
528 | * pm_runtime_set_active - Set runtime PM status to "active". |
529 | * @dev: Target device. |
530 | * |
531 | * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies |
532 | * of it will be taken into account. |
533 | * |
534 | * It is not valid to call this function for devices with runtime PM enabled. |
535 | */ |
536 | static inline int pm_runtime_set_active(struct device *dev) |
537 | { |
538 | return __pm_runtime_set_status(dev, status: RPM_ACTIVE); |
539 | } |
540 | |
541 | /** |
542 | * pm_runtime_set_suspended - Set runtime PM status to "suspended". |
543 | * @dev: Target device. |
544 | * |
545 | * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that |
546 | * dependencies of it will be taken into account. |
547 | * |
548 | * It is not valid to call this function for devices with runtime PM enabled. |
549 | */ |
550 | static inline int pm_runtime_set_suspended(struct device *dev) |
551 | { |
552 | return __pm_runtime_set_status(dev, status: RPM_SUSPENDED); |
553 | } |
554 | |
555 | /** |
556 | * pm_runtime_disable - Disable runtime PM for a device. |
557 | * @dev: Target device. |
558 | * |
559 | * Prevent the runtime PM framework from working with @dev (by incrementing its |
560 | * "blocking" counter). |
561 | * |
562 | * For each invocation of this function for @dev there must be a matching |
563 | * pm_runtime_enable() call in order for runtime PM to be enabled for it. |
564 | */ |
565 | static inline void pm_runtime_disable(struct device *dev) |
566 | { |
567 | __pm_runtime_disable(dev, check_resume: true); |
568 | } |
569 | |
570 | /** |
571 | * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device. |
572 | * @dev: Target device. |
573 | * |
574 | * Allow the runtime PM autosuspend mechanism to be used for @dev whenever |
575 | * requested (or "autosuspend" will be handled as direct runtime-suspend for |
576 | * it). |
577 | * |
578 | * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend() |
579 | * at driver exit time unless your driver initially enabled pm_runtime |
580 | * with devm_pm_runtime_enable() (which handles it for you). |
581 | */ |
582 | static inline void pm_runtime_use_autosuspend(struct device *dev) |
583 | { |
584 | __pm_runtime_use_autosuspend(dev, use: true); |
585 | } |
586 | |
587 | /** |
588 | * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used. |
589 | * @dev: Target device. |
590 | * |
591 | * Prevent the runtime PM autosuspend mechanism from being used for @dev which |
592 | * means that "autosuspend" will be handled as direct runtime-suspend for it |
593 | * going forward. |
594 | */ |
595 | static inline void pm_runtime_dont_use_autosuspend(struct device *dev) |
596 | { |
597 | __pm_runtime_use_autosuspend(dev, use: false); |
598 | } |
599 | |
600 | #endif |
601 | |