1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems |
4 | * |
5 | * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
6 | */ |
7 | #include <linux/pm.h> |
8 | #include <linux/pm_runtime.h> |
9 | #include <linux/export.h> |
10 | |
11 | #ifdef CONFIG_PM |
12 | /** |
13 | * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. |
14 | * @dev: Device to suspend. |
15 | * |
16 | * If PM operations are defined for the @dev's driver and they include |
17 | * ->runtime_suspend(), execute it and return its error code. Otherwise, |
18 | * return 0. |
19 | */ |
20 | int pm_generic_runtime_suspend(struct device *dev) |
21 | { |
22 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
23 | int ret; |
24 | |
25 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; |
26 | |
27 | return ret; |
28 | } |
29 | EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); |
30 | |
31 | /** |
32 | * pm_generic_runtime_resume - Generic runtime resume callback for subsystems. |
33 | * @dev: Device to resume. |
34 | * |
35 | * If PM operations are defined for the @dev's driver and they include |
36 | * ->runtime_resume(), execute it and return its error code. Otherwise, |
37 | * return 0. |
38 | */ |
39 | int pm_generic_runtime_resume(struct device *dev) |
40 | { |
41 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
42 | int ret; |
43 | |
44 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; |
45 | |
46 | return ret; |
47 | } |
48 | EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); |
49 | #endif /* CONFIG_PM */ |
50 | |
51 | #ifdef CONFIG_PM_SLEEP |
52 | /** |
53 | * pm_generic_prepare - Generic routine preparing a device for power transition. |
54 | * @dev: Device to prepare. |
55 | * |
56 | * Prepare a device for a system-wide power transition. |
57 | */ |
58 | int pm_generic_prepare(struct device *dev) |
59 | { |
60 | struct device_driver *drv = dev->driver; |
61 | int ret = 0; |
62 | |
63 | if (drv && drv->pm && drv->pm->prepare) |
64 | ret = drv->pm->prepare(dev); |
65 | |
66 | return ret; |
67 | } |
68 | |
69 | /** |
70 | * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. |
71 | * @dev: Device to suspend. |
72 | */ |
73 | int pm_generic_suspend_noirq(struct device *dev) |
74 | { |
75 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
76 | |
77 | return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; |
78 | } |
79 | EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); |
80 | |
81 | /** |
82 | * pm_generic_suspend_late - Generic suspend_late callback for subsystems. |
83 | * @dev: Device to suspend. |
84 | */ |
85 | int pm_generic_suspend_late(struct device *dev) |
86 | { |
87 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
88 | |
89 | return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; |
90 | } |
91 | EXPORT_SYMBOL_GPL(pm_generic_suspend_late); |
92 | |
93 | /** |
94 | * pm_generic_suspend - Generic suspend callback for subsystems. |
95 | * @dev: Device to suspend. |
96 | */ |
97 | int pm_generic_suspend(struct device *dev) |
98 | { |
99 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
100 | |
101 | return pm && pm->suspend ? pm->suspend(dev) : 0; |
102 | } |
103 | EXPORT_SYMBOL_GPL(pm_generic_suspend); |
104 | |
105 | /** |
106 | * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems. |
107 | * @dev: Device to freeze. |
108 | */ |
109 | int pm_generic_freeze_noirq(struct device *dev) |
110 | { |
111 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
112 | |
113 | return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; |
114 | } |
115 | EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); |
116 | |
117 | /** |
118 | * pm_generic_freeze_late - Generic freeze_late callback for subsystems. |
119 | * @dev: Device to freeze. |
120 | */ |
121 | int pm_generic_freeze_late(struct device *dev) |
122 | { |
123 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
124 | |
125 | return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; |
126 | } |
127 | EXPORT_SYMBOL_GPL(pm_generic_freeze_late); |
128 | |
129 | /** |
130 | * pm_generic_freeze - Generic freeze callback for subsystems. |
131 | * @dev: Device to freeze. |
132 | */ |
133 | int pm_generic_freeze(struct device *dev) |
134 | { |
135 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
136 | |
137 | return pm && pm->freeze ? pm->freeze(dev) : 0; |
138 | } |
139 | EXPORT_SYMBOL_GPL(pm_generic_freeze); |
140 | |
141 | /** |
142 | * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems. |
143 | * @dev: Device to handle. |
144 | */ |
145 | int pm_generic_poweroff_noirq(struct device *dev) |
146 | { |
147 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
148 | |
149 | return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; |
150 | } |
151 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); |
152 | |
153 | /** |
154 | * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems. |
155 | * @dev: Device to handle. |
156 | */ |
157 | int pm_generic_poweroff_late(struct device *dev) |
158 | { |
159 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
160 | |
161 | return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; |
162 | } |
163 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); |
164 | |
165 | /** |
166 | * pm_generic_poweroff - Generic poweroff callback for subsystems. |
167 | * @dev: Device to handle. |
168 | */ |
169 | int pm_generic_poweroff(struct device *dev) |
170 | { |
171 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
172 | |
173 | return pm && pm->poweroff ? pm->poweroff(dev) : 0; |
174 | } |
175 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); |
176 | |
177 | /** |
178 | * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems. |
179 | * @dev: Device to thaw. |
180 | */ |
181 | int pm_generic_thaw_noirq(struct device *dev) |
182 | { |
183 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
184 | |
185 | return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; |
186 | } |
187 | EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); |
188 | |
189 | /** |
190 | * pm_generic_thaw_early - Generic thaw_early callback for subsystems. |
191 | * @dev: Device to thaw. |
192 | */ |
193 | int pm_generic_thaw_early(struct device *dev) |
194 | { |
195 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
196 | |
197 | return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; |
198 | } |
199 | EXPORT_SYMBOL_GPL(pm_generic_thaw_early); |
200 | |
201 | /** |
202 | * pm_generic_thaw - Generic thaw callback for subsystems. |
203 | * @dev: Device to thaw. |
204 | */ |
205 | int pm_generic_thaw(struct device *dev) |
206 | { |
207 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
208 | |
209 | return pm && pm->thaw ? pm->thaw(dev) : 0; |
210 | } |
211 | EXPORT_SYMBOL_GPL(pm_generic_thaw); |
212 | |
213 | /** |
214 | * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. |
215 | * @dev: Device to resume. |
216 | */ |
217 | int pm_generic_resume_noirq(struct device *dev) |
218 | { |
219 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
220 | |
221 | return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; |
222 | } |
223 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); |
224 | |
225 | /** |
226 | * pm_generic_resume_early - Generic resume_early callback for subsystems. |
227 | * @dev: Device to resume. |
228 | */ |
229 | int pm_generic_resume_early(struct device *dev) |
230 | { |
231 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
232 | |
233 | return pm && pm->resume_early ? pm->resume_early(dev) : 0; |
234 | } |
235 | EXPORT_SYMBOL_GPL(pm_generic_resume_early); |
236 | |
237 | /** |
238 | * pm_generic_resume - Generic resume callback for subsystems. |
239 | * @dev: Device to resume. |
240 | */ |
241 | int pm_generic_resume(struct device *dev) |
242 | { |
243 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
244 | |
245 | return pm && pm->resume ? pm->resume(dev) : 0; |
246 | } |
247 | EXPORT_SYMBOL_GPL(pm_generic_resume); |
248 | |
249 | /** |
250 | * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems. |
251 | * @dev: Device to restore. |
252 | */ |
253 | int pm_generic_restore_noirq(struct device *dev) |
254 | { |
255 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
256 | |
257 | return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; |
258 | } |
259 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); |
260 | |
261 | /** |
262 | * pm_generic_restore_early - Generic restore_early callback for subsystems. |
263 | * @dev: Device to resume. |
264 | */ |
265 | int pm_generic_restore_early(struct device *dev) |
266 | { |
267 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
268 | |
269 | return pm && pm->restore_early ? pm->restore_early(dev) : 0; |
270 | } |
271 | EXPORT_SYMBOL_GPL(pm_generic_restore_early); |
272 | |
273 | /** |
274 | * pm_generic_restore - Generic restore callback for subsystems. |
275 | * @dev: Device to restore. |
276 | */ |
277 | int pm_generic_restore(struct device *dev) |
278 | { |
279 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
280 | |
281 | return pm && pm->restore ? pm->restore(dev) : 0; |
282 | } |
283 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
284 | |
285 | /** |
286 | * pm_generic_complete - Generic routine completing a device power transition. |
287 | * @dev: Device to handle. |
288 | * |
289 | * Complete a device power transition during a system-wide power transition. |
290 | */ |
291 | void pm_generic_complete(struct device *dev) |
292 | { |
293 | struct device_driver *drv = dev->driver; |
294 | |
295 | if (drv && drv->pm && drv->pm->complete) |
296 | drv->pm->complete(dev); |
297 | } |
298 | #endif /* CONFIG_PM_SLEEP */ |
299 |