1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Generic OPP Interface |
4 | * |
5 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. |
6 | * Nishanth Menon |
7 | * Romit Dasgupta |
8 | * Kevin Hilman |
9 | */ |
10 | |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | |
13 | #include <linux/clk.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/err.h> |
16 | #include <linux/device.h> |
17 | #include <linux/export.h> |
18 | #include <linux/pm_domain.h> |
19 | #include <linux/regulator/consumer.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/xarray.h> |
22 | |
23 | #include "opp.h" |
24 | |
25 | /* |
26 | * The root of the list of all opp-tables. All opp_table structures branch off |
27 | * from here, with each opp_table containing the list of opps it supports in |
28 | * various states of availability. |
29 | */ |
30 | LIST_HEAD(opp_tables); |
31 | |
32 | /* Lock to allow exclusive modification to the device and opp lists */ |
33 | DEFINE_MUTEX(opp_table_lock); |
34 | /* Flag indicating that opp_tables list is being updated at the moment */ |
35 | static bool opp_tables_busy; |
36 | |
37 | /* OPP ID allocator */ |
38 | static DEFINE_XARRAY_ALLOC1(opp_configs); |
39 | |
40 | static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) |
41 | { |
42 | struct opp_device *opp_dev; |
43 | bool found = false; |
44 | |
45 | mutex_lock(&opp_table->lock); |
46 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) |
47 | if (opp_dev->dev == dev) { |
48 | found = true; |
49 | break; |
50 | } |
51 | |
52 | mutex_unlock(lock: &opp_table->lock); |
53 | return found; |
54 | } |
55 | |
56 | static struct opp_table *_find_opp_table_unlocked(struct device *dev) |
57 | { |
58 | struct opp_table *opp_table; |
59 | |
60 | list_for_each_entry(opp_table, &opp_tables, node) { |
61 | if (_find_opp_dev(dev, opp_table)) { |
62 | _get_opp_table_kref(opp_table); |
63 | return opp_table; |
64 | } |
65 | } |
66 | |
67 | return ERR_PTR(error: -ENODEV); |
68 | } |
69 | |
70 | /** |
71 | * _find_opp_table() - find opp_table struct using device pointer |
72 | * @dev: device pointer used to lookup OPP table |
73 | * |
74 | * Search OPP table for one containing matching device. |
75 | * |
76 | * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or |
77 | * -EINVAL based on type of error. |
78 | * |
79 | * The callers must call dev_pm_opp_put_opp_table() after the table is used. |
80 | */ |
81 | struct opp_table *_find_opp_table(struct device *dev) |
82 | { |
83 | struct opp_table *opp_table; |
84 | |
85 | if (IS_ERR_OR_NULL(ptr: dev)) { |
86 | pr_err("%s: Invalid parameters\n" , __func__); |
87 | return ERR_PTR(error: -EINVAL); |
88 | } |
89 | |
90 | mutex_lock(&opp_table_lock); |
91 | opp_table = _find_opp_table_unlocked(dev); |
92 | mutex_unlock(lock: &opp_table_lock); |
93 | |
94 | return opp_table; |
95 | } |
96 | |
97 | /* |
98 | * Returns true if multiple clocks aren't there, else returns false with WARN. |
99 | * |
100 | * We don't force clk_count == 1 here as there are users who don't have a clock |
101 | * representation in the OPP table and manage the clock configuration themselves |
102 | * in an platform specific way. |
103 | */ |
104 | static bool assert_single_clk(struct opp_table *opp_table) |
105 | { |
106 | return !WARN_ON(opp_table->clk_count > 1); |
107 | } |
108 | |
109 | /** |
110 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp |
111 | * @opp: opp for which voltage has to be returned for |
112 | * |
113 | * Return: voltage in micro volt corresponding to the opp, else |
114 | * return 0 |
115 | * |
116 | * This is useful only for devices with single power supply. |
117 | */ |
118 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) |
119 | { |
120 | if (IS_ERR_OR_NULL(ptr: opp)) { |
121 | pr_err("%s: Invalid parameters\n" , __func__); |
122 | return 0; |
123 | } |
124 | |
125 | return opp->supplies[0].u_volt; |
126 | } |
127 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); |
128 | |
129 | /** |
130 | * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp |
131 | * @opp: opp for which voltage has to be returned for |
132 | * @supplies: Placeholder for copying the supply information. |
133 | * |
134 | * Return: negative error number on failure, 0 otherwise on success after |
135 | * setting @supplies. |
136 | * |
137 | * This can be used for devices with any number of power supplies. The caller |
138 | * must ensure the @supplies array must contain space for each regulator. |
139 | */ |
140 | int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, |
141 | struct dev_pm_opp_supply *supplies) |
142 | { |
143 | if (IS_ERR_OR_NULL(ptr: opp) || !supplies) { |
144 | pr_err("%s: Invalid parameters\n" , __func__); |
145 | return -EINVAL; |
146 | } |
147 | |
148 | memcpy(supplies, opp->supplies, |
149 | sizeof(*supplies) * opp->opp_table->regulator_count); |
150 | return 0; |
151 | } |
152 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies); |
153 | |
154 | /** |
155 | * dev_pm_opp_get_power() - Gets the power corresponding to an opp |
156 | * @opp: opp for which power has to be returned for |
157 | * |
158 | * Return: power in micro watt corresponding to the opp, else |
159 | * return 0 |
160 | * |
161 | * This is useful only for devices with single power supply. |
162 | */ |
163 | unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) |
164 | { |
165 | unsigned long opp_power = 0; |
166 | int i; |
167 | |
168 | if (IS_ERR_OR_NULL(ptr: opp)) { |
169 | pr_err("%s: Invalid parameters\n" , __func__); |
170 | return 0; |
171 | } |
172 | for (i = 0; i < opp->opp_table->regulator_count; i++) |
173 | opp_power += opp->supplies[i].u_watt; |
174 | |
175 | return opp_power; |
176 | } |
177 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_power); |
178 | |
179 | /** |
180 | * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an |
181 | * available opp with specified index |
182 | * @opp: opp for which frequency has to be returned for |
183 | * @index: index of the frequency within the required opp |
184 | * |
185 | * Return: frequency in hertz corresponding to the opp with specified index, |
186 | * else return 0 |
187 | */ |
188 | unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) |
189 | { |
190 | if (IS_ERR_OR_NULL(ptr: opp) || index >= opp->opp_table->clk_count) { |
191 | pr_err("%s: Invalid parameters\n" , __func__); |
192 | return 0; |
193 | } |
194 | |
195 | return opp->rates[index]; |
196 | } |
197 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); |
198 | |
199 | /** |
200 | * dev_pm_opp_get_level() - Gets the level corresponding to an available opp |
201 | * @opp: opp for which level value has to be returned for |
202 | * |
203 | * Return: level read from device tree corresponding to the opp, else |
204 | * return U32_MAX. |
205 | */ |
206 | unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) |
207 | { |
208 | if (IS_ERR_OR_NULL(ptr: opp) || !opp->available) { |
209 | pr_err("%s: Invalid parameters\n" , __func__); |
210 | return 0; |
211 | } |
212 | |
213 | return opp->level; |
214 | } |
215 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); |
216 | |
217 | /** |
218 | * dev_pm_opp_get_required_pstate() - Gets the required performance state |
219 | * corresponding to an available opp |
220 | * @opp: opp for which performance state has to be returned for |
221 | * @index: index of the required opp |
222 | * |
223 | * Return: performance state read from device tree corresponding to the |
224 | * required opp, else return U32_MAX. |
225 | */ |
226 | unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, |
227 | unsigned int index) |
228 | { |
229 | if (IS_ERR_OR_NULL(ptr: opp) || !opp->available || |
230 | index >= opp->opp_table->required_opp_count) { |
231 | pr_err("%s: Invalid parameters\n" , __func__); |
232 | return 0; |
233 | } |
234 | |
235 | /* required-opps not fully initialized yet */ |
236 | if (lazy_linking_pending(opp_table: opp->opp_table)) |
237 | return 0; |
238 | |
239 | /* The required OPP table must belong to a genpd */ |
240 | if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) { |
241 | pr_err("%s: Performance state is only valid for genpds.\n" , __func__); |
242 | return 0; |
243 | } |
244 | |
245 | return opp->required_opps[index]->level; |
246 | } |
247 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); |
248 | |
249 | /** |
250 | * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not |
251 | * @opp: opp for which turbo mode is being verified |
252 | * |
253 | * Turbo OPPs are not for normal use, and can be enabled (under certain |
254 | * conditions) for short duration of times to finish high throughput work |
255 | * quickly. Running on them for longer times may overheat the chip. |
256 | * |
257 | * Return: true if opp is turbo opp, else false. |
258 | */ |
259 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) |
260 | { |
261 | if (IS_ERR_OR_NULL(ptr: opp) || !opp->available) { |
262 | pr_err("%s: Invalid parameters\n" , __func__); |
263 | return false; |
264 | } |
265 | |
266 | return opp->turbo; |
267 | } |
268 | EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); |
269 | |
270 | /** |
271 | * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds |
272 | * @dev: device for which we do this operation |
273 | * |
274 | * Return: This function returns the max clock latency in nanoseconds. |
275 | */ |
276 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) |
277 | { |
278 | struct opp_table *opp_table; |
279 | unsigned long clock_latency_ns; |
280 | |
281 | opp_table = _find_opp_table(dev); |
282 | if (IS_ERR(ptr: opp_table)) |
283 | return 0; |
284 | |
285 | clock_latency_ns = opp_table->clock_latency_ns_max; |
286 | |
287 | dev_pm_opp_put_opp_table(opp_table); |
288 | |
289 | return clock_latency_ns; |
290 | } |
291 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); |
292 | |
293 | /** |
294 | * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds |
295 | * @dev: device for which we do this operation |
296 | * |
297 | * Return: This function returns the max voltage latency in nanoseconds. |
298 | */ |
299 | unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) |
300 | { |
301 | struct opp_table *opp_table; |
302 | struct dev_pm_opp *opp; |
303 | struct regulator *reg; |
304 | unsigned long latency_ns = 0; |
305 | int ret, i, count; |
306 | struct { |
307 | unsigned long min; |
308 | unsigned long max; |
309 | } *uV; |
310 | |
311 | opp_table = _find_opp_table(dev); |
312 | if (IS_ERR(ptr: opp_table)) |
313 | return 0; |
314 | |
315 | /* Regulator may not be required for the device */ |
316 | if (!opp_table->regulators) |
317 | goto put_opp_table; |
318 | |
319 | count = opp_table->regulator_count; |
320 | |
321 | uV = kmalloc_array(n: count, size: sizeof(*uV), GFP_KERNEL); |
322 | if (!uV) |
323 | goto put_opp_table; |
324 | |
325 | mutex_lock(&opp_table->lock); |
326 | |
327 | for (i = 0; i < count; i++) { |
328 | uV[i].min = ~0; |
329 | uV[i].max = 0; |
330 | |
331 | list_for_each_entry(opp, &opp_table->opp_list, node) { |
332 | if (!opp->available) |
333 | continue; |
334 | |
335 | if (opp->supplies[i].u_volt_min < uV[i].min) |
336 | uV[i].min = opp->supplies[i].u_volt_min; |
337 | if (opp->supplies[i].u_volt_max > uV[i].max) |
338 | uV[i].max = opp->supplies[i].u_volt_max; |
339 | } |
340 | } |
341 | |
342 | mutex_unlock(lock: &opp_table->lock); |
343 | |
344 | /* |
345 | * The caller needs to ensure that opp_table (and hence the regulator) |
346 | * isn't freed, while we are executing this routine. |
347 | */ |
348 | for (i = 0; i < count; i++) { |
349 | reg = opp_table->regulators[i]; |
350 | ret = regulator_set_voltage_time(regulator: reg, old_uV: uV[i].min, new_uV: uV[i].max); |
351 | if (ret > 0) |
352 | latency_ns += ret * 1000; |
353 | } |
354 | |
355 | kfree(objp: uV); |
356 | put_opp_table: |
357 | dev_pm_opp_put_opp_table(opp_table); |
358 | |
359 | return latency_ns; |
360 | } |
361 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); |
362 | |
363 | /** |
364 | * dev_pm_opp_get_max_transition_latency() - Get max transition latency in |
365 | * nanoseconds |
366 | * @dev: device for which we do this operation |
367 | * |
368 | * Return: This function returns the max transition latency, in nanoseconds, to |
369 | * switch from one OPP to other. |
370 | */ |
371 | unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) |
372 | { |
373 | return dev_pm_opp_get_max_volt_latency(dev) + |
374 | dev_pm_opp_get_max_clock_latency(dev); |
375 | } |
376 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); |
377 | |
378 | /** |
379 | * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz |
380 | * @dev: device for which we do this operation |
381 | * |
382 | * Return: This function returns the frequency of the OPP marked as suspend_opp |
383 | * if one is available, else returns 0; |
384 | */ |
385 | unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) |
386 | { |
387 | struct opp_table *opp_table; |
388 | unsigned long freq = 0; |
389 | |
390 | opp_table = _find_opp_table(dev); |
391 | if (IS_ERR(ptr: opp_table)) |
392 | return 0; |
393 | |
394 | if (opp_table->suspend_opp && opp_table->suspend_opp->available) |
395 | freq = dev_pm_opp_get_freq(opp: opp_table->suspend_opp); |
396 | |
397 | dev_pm_opp_put_opp_table(opp_table); |
398 | |
399 | return freq; |
400 | } |
401 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); |
402 | |
403 | int _get_opp_count(struct opp_table *opp_table) |
404 | { |
405 | struct dev_pm_opp *opp; |
406 | int count = 0; |
407 | |
408 | mutex_lock(&opp_table->lock); |
409 | |
410 | list_for_each_entry(opp, &opp_table->opp_list, node) { |
411 | if (opp->available) |
412 | count++; |
413 | } |
414 | |
415 | mutex_unlock(lock: &opp_table->lock); |
416 | |
417 | return count; |
418 | } |
419 | |
420 | /** |
421 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table |
422 | * @dev: device for which we do this operation |
423 | * |
424 | * Return: This function returns the number of available opps if there are any, |
425 | * else returns 0 if none or the corresponding error value. |
426 | */ |
427 | int dev_pm_opp_get_opp_count(struct device *dev) |
428 | { |
429 | struct opp_table *opp_table; |
430 | int count; |
431 | |
432 | opp_table = _find_opp_table(dev); |
433 | if (IS_ERR(ptr: opp_table)) { |
434 | count = PTR_ERR(ptr: opp_table); |
435 | dev_dbg(dev, "%s: OPP table not found (%d)\n" , |
436 | __func__, count); |
437 | return count; |
438 | } |
439 | |
440 | count = _get_opp_count(opp_table); |
441 | dev_pm_opp_put_opp_table(opp_table); |
442 | |
443 | return count; |
444 | } |
445 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); |
446 | |
447 | /* Helpers to read keys */ |
448 | static unsigned long _read_freq(struct dev_pm_opp *opp, int index) |
449 | { |
450 | return opp->rates[index]; |
451 | } |
452 | |
453 | static unsigned long _read_level(struct dev_pm_opp *opp, int index) |
454 | { |
455 | return opp->level; |
456 | } |
457 | |
458 | static unsigned long _read_bw(struct dev_pm_opp *opp, int index) |
459 | { |
460 | return opp->bandwidth[index].peak; |
461 | } |
462 | |
463 | /* Generic comparison helpers */ |
464 | static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, |
465 | unsigned long opp_key, unsigned long key) |
466 | { |
467 | if (opp_key == key) { |
468 | *opp = temp_opp; |
469 | return true; |
470 | } |
471 | |
472 | return false; |
473 | } |
474 | |
475 | static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, |
476 | unsigned long opp_key, unsigned long key) |
477 | { |
478 | if (opp_key >= key) { |
479 | *opp = temp_opp; |
480 | return true; |
481 | } |
482 | |
483 | return false; |
484 | } |
485 | |
486 | static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, |
487 | unsigned long opp_key, unsigned long key) |
488 | { |
489 | if (opp_key > key) |
490 | return true; |
491 | |
492 | *opp = temp_opp; |
493 | return false; |
494 | } |
495 | |
496 | /* Generic key finding helpers */ |
497 | static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, |
498 | unsigned long *key, int index, bool available, |
499 | unsigned long (*read)(struct dev_pm_opp *opp, int index), |
500 | bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, |
501 | unsigned long opp_key, unsigned long key), |
502 | bool (*assert)(struct opp_table *opp_table)) |
503 | { |
504 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(error: -ERANGE); |
505 | |
506 | /* Assert that the requirement is met */ |
507 | if (assert && !assert(opp_table)) |
508 | return ERR_PTR(error: -EINVAL); |
509 | |
510 | mutex_lock(&opp_table->lock); |
511 | |
512 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { |
513 | if (temp_opp->available == available) { |
514 | if (compare(&opp, temp_opp, read(temp_opp, index), *key)) |
515 | break; |
516 | } |
517 | } |
518 | |
519 | /* Increment the reference count of OPP */ |
520 | if (!IS_ERR(ptr: opp)) { |
521 | *key = read(opp, index); |
522 | dev_pm_opp_get(opp); |
523 | } |
524 | |
525 | mutex_unlock(lock: &opp_table->lock); |
526 | |
527 | return opp; |
528 | } |
529 | |
530 | static struct dev_pm_opp * |
531 | _find_key(struct device *dev, unsigned long *key, int index, bool available, |
532 | unsigned long (*read)(struct dev_pm_opp *opp, int index), |
533 | bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, |
534 | unsigned long opp_key, unsigned long key), |
535 | bool (*assert)(struct opp_table *opp_table)) |
536 | { |
537 | struct opp_table *opp_table; |
538 | struct dev_pm_opp *opp; |
539 | |
540 | opp_table = _find_opp_table(dev); |
541 | if (IS_ERR(ptr: opp_table)) { |
542 | dev_err(dev, "%s: OPP table not found (%ld)\n" , __func__, |
543 | PTR_ERR(opp_table)); |
544 | return ERR_CAST(ptr: opp_table); |
545 | } |
546 | |
547 | opp = _opp_table_find_key(opp_table, key, index, available, read, |
548 | compare, assert); |
549 | |
550 | dev_pm_opp_put_opp_table(opp_table); |
551 | |
552 | return opp; |
553 | } |
554 | |
555 | static struct dev_pm_opp *_find_key_exact(struct device *dev, |
556 | unsigned long key, int index, bool available, |
557 | unsigned long (*read)(struct dev_pm_opp *opp, int index), |
558 | bool (*assert)(struct opp_table *opp_table)) |
559 | { |
560 | /* |
561 | * The value of key will be updated here, but will be ignored as the |
562 | * caller doesn't need it. |
563 | */ |
564 | return _find_key(dev, key: &key, index, available, read, compare: _compare_exact, |
565 | assert); |
566 | } |
567 | |
568 | static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, |
569 | unsigned long *key, int index, bool available, |
570 | unsigned long (*read)(struct dev_pm_opp *opp, int index), |
571 | bool (*assert)(struct opp_table *opp_table)) |
572 | { |
573 | return _opp_table_find_key(opp_table, key, index, available, read, |
574 | compare: _compare_ceil, assert); |
575 | } |
576 | |
577 | static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, |
578 | int index, bool available, |
579 | unsigned long (*read)(struct dev_pm_opp *opp, int index), |
580 | bool (*assert)(struct opp_table *opp_table)) |
581 | { |
582 | return _find_key(dev, key, index, available, read, compare: _compare_ceil, |
583 | assert); |
584 | } |
585 | |
586 | static struct dev_pm_opp *_find_key_floor(struct device *dev, |
587 | unsigned long *key, int index, bool available, |
588 | unsigned long (*read)(struct dev_pm_opp *opp, int index), |
589 | bool (*assert)(struct opp_table *opp_table)) |
590 | { |
591 | return _find_key(dev, key, index, available, read, compare: _compare_floor, |
592 | assert); |
593 | } |
594 | |
595 | /** |
596 | * dev_pm_opp_find_freq_exact() - search for an exact frequency |
597 | * @dev: device for which we do this operation |
598 | * @freq: frequency to search for |
599 | * @available: true/false - match for available opp |
600 | * |
601 | * Return: Searches for exact match in the opp table and returns pointer to the |
602 | * matching opp if found, else returns ERR_PTR in case of error and should |
603 | * be handled using IS_ERR. Error return values can be: |
604 | * EINVAL: for bad pointer |
605 | * ERANGE: no match found for search |
606 | * ENODEV: if device not found in list of registered devices |
607 | * |
608 | * Note: available is a modifier for the search. if available=true, then the |
609 | * match is for exact matching frequency and is available in the stored OPP |
610 | * table. if false, the match is for exact frequency which is not available. |
611 | * |
612 | * This provides a mechanism to enable an opp which is not available currently |
613 | * or the opposite as well. |
614 | * |
615 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
616 | * use. |
617 | */ |
618 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
619 | unsigned long freq, bool available) |
620 | { |
621 | return _find_key_exact(dev, key: freq, index: 0, available, read: _read_freq, |
622 | assert: assert_single_clk); |
623 | } |
624 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); |
625 | |
626 | /** |
627 | * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the |
628 | * clock corresponding to the index |
629 | * @dev: Device for which we do this operation |
630 | * @freq: frequency to search for |
631 | * @index: Clock index |
632 | * @available: true/false - match for available opp |
633 | * |
634 | * Search for the matching exact OPP for the clock corresponding to the |
635 | * specified index from a starting freq for a device. |
636 | * |
637 | * Return: matching *opp , else returns ERR_PTR in case of error and should be |
638 | * handled using IS_ERR. Error return values can be: |
639 | * EINVAL: for bad pointer |
640 | * ERANGE: no match found for search |
641 | * ENODEV: if device not found in list of registered devices |
642 | * |
643 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
644 | * use. |
645 | */ |
646 | struct dev_pm_opp * |
647 | dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, |
648 | u32 index, bool available) |
649 | { |
650 | return _find_key_exact(dev, key: freq, index, available, read: _read_freq, NULL); |
651 | } |
652 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed); |
653 | |
654 | static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, |
655 | unsigned long *freq) |
656 | { |
657 | return _opp_table_find_key_ceil(opp_table, key: freq, index: 0, available: true, read: _read_freq, |
658 | assert: assert_single_clk); |
659 | } |
660 | |
661 | /** |
662 | * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq |
663 | * @dev: device for which we do this operation |
664 | * @freq: Start frequency |
665 | * |
666 | * Search for the matching ceil *available* OPP from a starting freq |
667 | * for a device. |
668 | * |
669 | * Return: matching *opp and refreshes *freq accordingly, else returns |
670 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
671 | * values can be: |
672 | * EINVAL: for bad pointer |
673 | * ERANGE: no match found for search |
674 | * ENODEV: if device not found in list of registered devices |
675 | * |
676 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
677 | * use. |
678 | */ |
679 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, |
680 | unsigned long *freq) |
681 | { |
682 | return _find_key_ceil(dev, key: freq, index: 0, available: true, read: _read_freq, assert: assert_single_clk); |
683 | } |
684 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); |
685 | |
686 | /** |
687 | * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the |
688 | * clock corresponding to the index |
689 | * @dev: Device for which we do this operation |
690 | * @freq: Start frequency |
691 | * @index: Clock index |
692 | * |
693 | * Search for the matching ceil *available* OPP for the clock corresponding to |
694 | * the specified index from a starting freq for a device. |
695 | * |
696 | * Return: matching *opp and refreshes *freq accordingly, else returns |
697 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
698 | * values can be: |
699 | * EINVAL: for bad pointer |
700 | * ERANGE: no match found for search |
701 | * ENODEV: if device not found in list of registered devices |
702 | * |
703 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
704 | * use. |
705 | */ |
706 | struct dev_pm_opp * |
707 | dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, |
708 | u32 index) |
709 | { |
710 | return _find_key_ceil(dev, key: freq, index, available: true, read: _read_freq, NULL); |
711 | } |
712 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed); |
713 | |
714 | /** |
715 | * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq |
716 | * @dev: device for which we do this operation |
717 | * @freq: Start frequency |
718 | * |
719 | * Search for the matching floor *available* OPP from a starting freq |
720 | * for a device. |
721 | * |
722 | * Return: matching *opp and refreshes *freq accordingly, else returns |
723 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
724 | * values can be: |
725 | * EINVAL: for bad pointer |
726 | * ERANGE: no match found for search |
727 | * ENODEV: if device not found in list of registered devices |
728 | * |
729 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
730 | * use. |
731 | */ |
732 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, |
733 | unsigned long *freq) |
734 | { |
735 | return _find_key_floor(dev, key: freq, index: 0, available: true, read: _read_freq, assert: assert_single_clk); |
736 | } |
737 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
738 | |
739 | /** |
740 | * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the |
741 | * clock corresponding to the index |
742 | * @dev: Device for which we do this operation |
743 | * @freq: Start frequency |
744 | * @index: Clock index |
745 | * |
746 | * Search for the matching floor *available* OPP for the clock corresponding to |
747 | * the specified index from a starting freq for a device. |
748 | * |
749 | * Return: matching *opp and refreshes *freq accordingly, else returns |
750 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
751 | * values can be: |
752 | * EINVAL: for bad pointer |
753 | * ERANGE: no match found for search |
754 | * ENODEV: if device not found in list of registered devices |
755 | * |
756 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
757 | * use. |
758 | */ |
759 | struct dev_pm_opp * |
760 | dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, |
761 | u32 index) |
762 | { |
763 | return _find_key_floor(dev, key: freq, index, available: true, read: _read_freq, NULL); |
764 | } |
765 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed); |
766 | |
767 | /** |
768 | * dev_pm_opp_find_level_exact() - search for an exact level |
769 | * @dev: device for which we do this operation |
770 | * @level: level to search for |
771 | * |
772 | * Return: Searches for exact match in the opp table and returns pointer to the |
773 | * matching opp if found, else returns ERR_PTR in case of error and should |
774 | * be handled using IS_ERR. Error return values can be: |
775 | * EINVAL: for bad pointer |
776 | * ERANGE: no match found for search |
777 | * ENODEV: if device not found in list of registered devices |
778 | * |
779 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
780 | * use. |
781 | */ |
782 | struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, |
783 | unsigned int level) |
784 | { |
785 | return _find_key_exact(dev, key: level, index: 0, available: true, read: _read_level, NULL); |
786 | } |
787 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); |
788 | |
789 | /** |
790 | * dev_pm_opp_find_level_ceil() - search for an rounded up level |
791 | * @dev: device for which we do this operation |
792 | * @level: level to search for |
793 | * |
794 | * Return: Searches for rounded up match in the opp table and returns pointer |
795 | * to the matching opp if found, else returns ERR_PTR in case of error and |
796 | * should be handled using IS_ERR. Error return values can be: |
797 | * EINVAL: for bad pointer |
798 | * ERANGE: no match found for search |
799 | * ENODEV: if device not found in list of registered devices |
800 | * |
801 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
802 | * use. |
803 | */ |
804 | struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, |
805 | unsigned int *level) |
806 | { |
807 | unsigned long temp = *level; |
808 | struct dev_pm_opp *opp; |
809 | |
810 | opp = _find_key_ceil(dev, key: &temp, index: 0, available: true, read: _read_level, NULL); |
811 | if (IS_ERR(ptr: opp)) |
812 | return opp; |
813 | |
814 | /* False match */ |
815 | if (temp == OPP_LEVEL_UNSET) { |
816 | dev_err(dev, "%s: OPP levels aren't available\n" , __func__); |
817 | dev_pm_opp_put(opp); |
818 | return ERR_PTR(error: -ENODEV); |
819 | } |
820 | |
821 | *level = temp; |
822 | return opp; |
823 | } |
824 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); |
825 | |
826 | /** |
827 | * dev_pm_opp_find_level_floor() - Search for a rounded floor level |
828 | * @dev: device for which we do this operation |
829 | * @level: Start level |
830 | * |
831 | * Search for the matching floor *available* OPP from a starting level |
832 | * for a device. |
833 | * |
834 | * Return: matching *opp and refreshes *level accordingly, else returns |
835 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
836 | * values can be: |
837 | * EINVAL: for bad pointer |
838 | * ERANGE: no match found for search |
839 | * ENODEV: if device not found in list of registered devices |
840 | * |
841 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
842 | * use. |
843 | */ |
844 | struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, |
845 | unsigned int *level) |
846 | { |
847 | unsigned long temp = *level; |
848 | struct dev_pm_opp *opp; |
849 | |
850 | opp = _find_key_floor(dev, key: &temp, index: 0, available: true, read: _read_level, NULL); |
851 | *level = temp; |
852 | return opp; |
853 | } |
854 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); |
855 | |
856 | /** |
857 | * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth |
858 | * @dev: device for which we do this operation |
859 | * @bw: start bandwidth |
860 | * @index: which bandwidth to compare, in case of OPPs with several values |
861 | * |
862 | * Search for the matching floor *available* OPP from a starting bandwidth |
863 | * for a device. |
864 | * |
865 | * Return: matching *opp and refreshes *bw accordingly, else returns |
866 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
867 | * values can be: |
868 | * EINVAL: for bad pointer |
869 | * ERANGE: no match found for search |
870 | * ENODEV: if device not found in list of registered devices |
871 | * |
872 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
873 | * use. |
874 | */ |
875 | struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, |
876 | int index) |
877 | { |
878 | unsigned long temp = *bw; |
879 | struct dev_pm_opp *opp; |
880 | |
881 | opp = _find_key_ceil(dev, key: &temp, index, available: true, read: _read_bw, NULL); |
882 | *bw = temp; |
883 | return opp; |
884 | } |
885 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil); |
886 | |
887 | /** |
888 | * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth |
889 | * @dev: device for which we do this operation |
890 | * @bw: start bandwidth |
891 | * @index: which bandwidth to compare, in case of OPPs with several values |
892 | * |
893 | * Search for the matching floor *available* OPP from a starting bandwidth |
894 | * for a device. |
895 | * |
896 | * Return: matching *opp and refreshes *bw accordingly, else returns |
897 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
898 | * values can be: |
899 | * EINVAL: for bad pointer |
900 | * ERANGE: no match found for search |
901 | * ENODEV: if device not found in list of registered devices |
902 | * |
903 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
904 | * use. |
905 | */ |
906 | struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, |
907 | unsigned int *bw, int index) |
908 | { |
909 | unsigned long temp = *bw; |
910 | struct dev_pm_opp *opp; |
911 | |
912 | opp = _find_key_floor(dev, key: &temp, index, available: true, read: _read_bw, NULL); |
913 | *bw = temp; |
914 | return opp; |
915 | } |
916 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor); |
917 | |
918 | static int _set_opp_voltage(struct device *dev, struct regulator *reg, |
919 | struct dev_pm_opp_supply *supply) |
920 | { |
921 | int ret; |
922 | |
923 | /* Regulator not available for device */ |
924 | if (IS_ERR(ptr: reg)) { |
925 | dev_dbg(dev, "%s: regulator not available: %ld\n" , __func__, |
926 | PTR_ERR(reg)); |
927 | return 0; |
928 | } |
929 | |
930 | dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n" , __func__, |
931 | supply->u_volt_min, supply->u_volt, supply->u_volt_max); |
932 | |
933 | ret = regulator_set_voltage_triplet(regulator: reg, min_uV: supply->u_volt_min, |
934 | target_uV: supply->u_volt, max_uV: supply->u_volt_max); |
935 | if (ret) |
936 | dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n" , |
937 | __func__, supply->u_volt_min, supply->u_volt, |
938 | supply->u_volt_max, ret); |
939 | |
940 | return ret; |
941 | } |
942 | |
943 | static int |
944 | _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, |
945 | struct dev_pm_opp *opp, void *data, bool scaling_down) |
946 | { |
947 | unsigned long *target = data; |
948 | unsigned long freq; |
949 | int ret; |
950 | |
951 | /* One of target and opp must be available */ |
952 | if (target) { |
953 | freq = *target; |
954 | } else if (opp) { |
955 | freq = opp->rates[0]; |
956 | } else { |
957 | WARN_ON(1); |
958 | return -EINVAL; |
959 | } |
960 | |
961 | ret = clk_set_rate(clk: opp_table->clk, rate: freq); |
962 | if (ret) { |
963 | dev_err(dev, "%s: failed to set clock rate: %d\n" , __func__, |
964 | ret); |
965 | } else { |
966 | opp_table->current_rate_single_clk = freq; |
967 | } |
968 | |
969 | return ret; |
970 | } |
971 | |
972 | /* |
973 | * Simple implementation for configuring multiple clocks. Configure clocks in |
974 | * the order in which they are present in the array while scaling up. |
975 | */ |
976 | int dev_pm_opp_config_clks_simple(struct device *dev, |
977 | struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, |
978 | bool scaling_down) |
979 | { |
980 | int ret, i; |
981 | |
982 | if (scaling_down) { |
983 | for (i = opp_table->clk_count - 1; i >= 0; i--) { |
984 | ret = clk_set_rate(clk: opp_table->clks[i], rate: opp->rates[i]); |
985 | if (ret) { |
986 | dev_err(dev, "%s: failed to set clock rate: %d\n" , __func__, |
987 | ret); |
988 | return ret; |
989 | } |
990 | } |
991 | } else { |
992 | for (i = 0; i < opp_table->clk_count; i++) { |
993 | ret = clk_set_rate(clk: opp_table->clks[i], rate: opp->rates[i]); |
994 | if (ret) { |
995 | dev_err(dev, "%s: failed to set clock rate: %d\n" , __func__, |
996 | ret); |
997 | return ret; |
998 | } |
999 | } |
1000 | } |
1001 | |
1002 | return 0; |
1003 | } |
1004 | EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple); |
1005 | |
1006 | static int _opp_config_regulator_single(struct device *dev, |
1007 | struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp, |
1008 | struct regulator **regulators, unsigned int count) |
1009 | { |
1010 | struct regulator *reg = regulators[0]; |
1011 | int ret; |
1012 | |
1013 | /* This function only supports single regulator per device */ |
1014 | if (WARN_ON(count > 1)) { |
1015 | dev_err(dev, "multiple regulators are not supported\n" ); |
1016 | return -EINVAL; |
1017 | } |
1018 | |
1019 | ret = _set_opp_voltage(dev, reg, supply: new_opp->supplies); |
1020 | if (ret) |
1021 | return ret; |
1022 | |
1023 | /* |
1024 | * Enable the regulator after setting its voltages, otherwise it breaks |
1025 | * some boot-enabled regulators. |
1026 | */ |
1027 | if (unlikely(!new_opp->opp_table->enabled)) { |
1028 | ret = regulator_enable(regulator: reg); |
1029 | if (ret < 0) |
1030 | dev_warn(dev, "Failed to enable regulator: %d" , ret); |
1031 | } |
1032 | |
1033 | return 0; |
1034 | } |
1035 | |
1036 | static int _set_opp_bw(const struct opp_table *opp_table, |
1037 | struct dev_pm_opp *opp, struct device *dev) |
1038 | { |
1039 | u32 avg, peak; |
1040 | int i, ret; |
1041 | |
1042 | if (!opp_table->paths) |
1043 | return 0; |
1044 | |
1045 | for (i = 0; i < opp_table->path_count; i++) { |
1046 | if (!opp) { |
1047 | avg = 0; |
1048 | peak = 0; |
1049 | } else { |
1050 | avg = opp->bandwidth[i].avg; |
1051 | peak = opp->bandwidth[i].peak; |
1052 | } |
1053 | ret = icc_set_bw(path: opp_table->paths[i], avg_bw: avg, peak_bw: peak); |
1054 | if (ret) { |
1055 | dev_err(dev, "Failed to %s bandwidth[%d]: %d\n" , |
1056 | opp ? "set" : "remove" , i, ret); |
1057 | return ret; |
1058 | } |
1059 | } |
1060 | |
1061 | return 0; |
1062 | } |
1063 | |
1064 | /* This is only called for PM domain for now */ |
1065 | static int _set_required_opps(struct device *dev, struct opp_table *opp_table, |
1066 | struct dev_pm_opp *opp, bool up) |
1067 | { |
1068 | struct device **devs = opp_table->required_devs; |
1069 | struct dev_pm_opp *required_opp; |
1070 | int index, target, delta, ret; |
1071 | |
1072 | if (!devs) |
1073 | return 0; |
1074 | |
1075 | /* required-opps not fully initialized yet */ |
1076 | if (lazy_linking_pending(opp_table)) |
1077 | return -EBUSY; |
1078 | |
1079 | /* Scaling up? Set required OPPs in normal order, else reverse */ |
1080 | if (up) { |
1081 | index = 0; |
1082 | target = opp_table->required_opp_count; |
1083 | delta = 1; |
1084 | } else { |
1085 | index = opp_table->required_opp_count - 1; |
1086 | target = -1; |
1087 | delta = -1; |
1088 | } |
1089 | |
1090 | while (index != target) { |
1091 | if (devs[index]) { |
1092 | required_opp = opp ? opp->required_opps[index] : NULL; |
1093 | |
1094 | ret = dev_pm_opp_set_opp(dev: devs[index], opp: required_opp); |
1095 | if (ret) |
1096 | return ret; |
1097 | } |
1098 | |
1099 | index += delta; |
1100 | } |
1101 | |
1102 | return 0; |
1103 | } |
1104 | |
1105 | static int _set_opp_level(struct device *dev, struct opp_table *opp_table, |
1106 | struct dev_pm_opp *opp) |
1107 | { |
1108 | unsigned int level = 0; |
1109 | int ret = 0; |
1110 | |
1111 | if (opp) { |
1112 | if (opp->level == OPP_LEVEL_UNSET) |
1113 | return 0; |
1114 | |
1115 | level = opp->level; |
1116 | } |
1117 | |
1118 | /* Request a new performance state through the device's PM domain. */ |
1119 | ret = dev_pm_domain_set_performance_state(dev, state: level); |
1120 | if (ret) |
1121 | dev_err(dev, "Failed to set performance state %u (%d)\n" , level, |
1122 | ret); |
1123 | |
1124 | return ret; |
1125 | } |
1126 | |
1127 | static void _find_current_opp(struct device *dev, struct opp_table *opp_table) |
1128 | { |
1129 | struct dev_pm_opp *opp = ERR_PTR(error: -ENODEV); |
1130 | unsigned long freq; |
1131 | |
1132 | if (!IS_ERR(ptr: opp_table->clk)) { |
1133 | freq = clk_get_rate(clk: opp_table->clk); |
1134 | opp = _find_freq_ceil(opp_table, freq: &freq); |
1135 | } |
1136 | |
1137 | /* |
1138 | * Unable to find the current OPP ? Pick the first from the list since |
1139 | * it is in ascending order, otherwise rest of the code will need to |
1140 | * make special checks to validate current_opp. |
1141 | */ |
1142 | if (IS_ERR(ptr: opp)) { |
1143 | mutex_lock(&opp_table->lock); |
1144 | opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); |
1145 | dev_pm_opp_get(opp); |
1146 | mutex_unlock(lock: &opp_table->lock); |
1147 | } |
1148 | |
1149 | opp_table->current_opp = opp; |
1150 | } |
1151 | |
1152 | static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) |
1153 | { |
1154 | int ret; |
1155 | |
1156 | if (!opp_table->enabled) |
1157 | return 0; |
1158 | |
1159 | /* |
1160 | * Some drivers need to support cases where some platforms may |
1161 | * have OPP table for the device, while others don't and |
1162 | * opp_set_rate() just needs to behave like clk_set_rate(). |
1163 | */ |
1164 | if (!_get_opp_count(opp_table)) |
1165 | return 0; |
1166 | |
1167 | ret = _set_opp_bw(opp_table, NULL, dev); |
1168 | if (ret) |
1169 | return ret; |
1170 | |
1171 | if (opp_table->regulators) |
1172 | regulator_disable(regulator: opp_table->regulators[0]); |
1173 | |
1174 | ret = _set_opp_level(dev, opp_table, NULL); |
1175 | if (ret) |
1176 | goto out; |
1177 | |
1178 | ret = _set_required_opps(dev, opp_table, NULL, up: false); |
1179 | |
1180 | out: |
1181 | opp_table->enabled = false; |
1182 | return ret; |
1183 | } |
1184 | |
1185 | static int _set_opp(struct device *dev, struct opp_table *opp_table, |
1186 | struct dev_pm_opp *opp, void *clk_data, bool forced) |
1187 | { |
1188 | struct dev_pm_opp *old_opp; |
1189 | int scaling_down, ret; |
1190 | |
1191 | if (unlikely(!opp)) |
1192 | return _disable_opp_table(dev, opp_table); |
1193 | |
1194 | /* Find the currently set OPP if we don't know already */ |
1195 | if (unlikely(!opp_table->current_opp)) |
1196 | _find_current_opp(dev, opp_table); |
1197 | |
1198 | old_opp = opp_table->current_opp; |
1199 | |
1200 | /* Return early if nothing to do */ |
1201 | if (!forced && old_opp == opp && opp_table->enabled) { |
1202 | dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n" , __func__); |
1203 | return 0; |
1204 | } |
1205 | |
1206 | dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n" , |
1207 | __func__, old_opp->rates[0], opp->rates[0], old_opp->level, |
1208 | opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0, |
1209 | opp->bandwidth ? opp->bandwidth[0].peak : 0); |
1210 | |
1211 | scaling_down = _opp_compare_key(opp_table, opp1: old_opp, opp2: opp); |
1212 | if (scaling_down == -1) |
1213 | scaling_down = 0; |
1214 | |
1215 | /* Scaling up? Configure required OPPs before frequency */ |
1216 | if (!scaling_down) { |
1217 | ret = _set_required_opps(dev, opp_table, opp, up: true); |
1218 | if (ret) { |
1219 | dev_err(dev, "Failed to set required opps: %d\n" , ret); |
1220 | return ret; |
1221 | } |
1222 | |
1223 | ret = _set_opp_level(dev, opp_table, opp); |
1224 | if (ret) |
1225 | return ret; |
1226 | |
1227 | ret = _set_opp_bw(opp_table, opp, dev); |
1228 | if (ret) { |
1229 | dev_err(dev, "Failed to set bw: %d\n" , ret); |
1230 | return ret; |
1231 | } |
1232 | |
1233 | if (opp_table->config_regulators) { |
1234 | ret = opp_table->config_regulators(dev, old_opp, opp, |
1235 | opp_table->regulators, |
1236 | opp_table->regulator_count); |
1237 | if (ret) { |
1238 | dev_err(dev, "Failed to set regulator voltages: %d\n" , |
1239 | ret); |
1240 | return ret; |
1241 | } |
1242 | } |
1243 | } |
1244 | |
1245 | if (opp_table->config_clks) { |
1246 | ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down); |
1247 | if (ret) |
1248 | return ret; |
1249 | } |
1250 | |
1251 | /* Scaling down? Configure required OPPs after frequency */ |
1252 | if (scaling_down) { |
1253 | if (opp_table->config_regulators) { |
1254 | ret = opp_table->config_regulators(dev, old_opp, opp, |
1255 | opp_table->regulators, |
1256 | opp_table->regulator_count); |
1257 | if (ret) { |
1258 | dev_err(dev, "Failed to set regulator voltages: %d\n" , |
1259 | ret); |
1260 | return ret; |
1261 | } |
1262 | } |
1263 | |
1264 | ret = _set_opp_bw(opp_table, opp, dev); |
1265 | if (ret) { |
1266 | dev_err(dev, "Failed to set bw: %d\n" , ret); |
1267 | return ret; |
1268 | } |
1269 | |
1270 | ret = _set_opp_level(dev, opp_table, opp); |
1271 | if (ret) |
1272 | return ret; |
1273 | |
1274 | ret = _set_required_opps(dev, opp_table, opp, up: false); |
1275 | if (ret) { |
1276 | dev_err(dev, "Failed to set required opps: %d\n" , ret); |
1277 | return ret; |
1278 | } |
1279 | } |
1280 | |
1281 | opp_table->enabled = true; |
1282 | dev_pm_opp_put(opp: old_opp); |
1283 | |
1284 | /* Make sure current_opp doesn't get freed */ |
1285 | dev_pm_opp_get(opp); |
1286 | opp_table->current_opp = opp; |
1287 | |
1288 | return ret; |
1289 | } |
1290 | |
1291 | /** |
1292 | * dev_pm_opp_set_rate() - Configure new OPP based on frequency |
1293 | * @dev: device for which we do this operation |
1294 | * @target_freq: frequency to achieve |
1295 | * |
1296 | * This configures the power-supplies to the levels specified by the OPP |
1297 | * corresponding to the target_freq, and programs the clock to a value <= |
1298 | * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax |
1299 | * provided by the opp, should have already rounded to the target OPP's |
1300 | * frequency. |
1301 | */ |
1302 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) |
1303 | { |
1304 | struct opp_table *opp_table; |
1305 | unsigned long freq = 0, temp_freq; |
1306 | struct dev_pm_opp *opp = NULL; |
1307 | bool forced = false; |
1308 | int ret; |
1309 | |
1310 | opp_table = _find_opp_table(dev); |
1311 | if (IS_ERR(ptr: opp_table)) { |
1312 | dev_err(dev, "%s: device's opp table doesn't exist\n" , __func__); |
1313 | return PTR_ERR(ptr: opp_table); |
1314 | } |
1315 | |
1316 | if (target_freq) { |
1317 | /* |
1318 | * For IO devices which require an OPP on some platforms/SoCs |
1319 | * while just needing to scale the clock on some others |
1320 | * we look for empty OPP tables with just a clock handle and |
1321 | * scale only the clk. This makes dev_pm_opp_set_rate() |
1322 | * equivalent to a clk_set_rate() |
1323 | */ |
1324 | if (!_get_opp_count(opp_table)) { |
1325 | ret = opp_table->config_clks(dev, opp_table, NULL, |
1326 | &target_freq, false); |
1327 | goto put_opp_table; |
1328 | } |
1329 | |
1330 | freq = clk_round_rate(clk: opp_table->clk, rate: target_freq); |
1331 | if ((long)freq <= 0) |
1332 | freq = target_freq; |
1333 | |
1334 | /* |
1335 | * The clock driver may support finer resolution of the |
1336 | * frequencies than the OPP table, don't update the frequency we |
1337 | * pass to clk_set_rate() here. |
1338 | */ |
1339 | temp_freq = freq; |
1340 | opp = _find_freq_ceil(opp_table, freq: &temp_freq); |
1341 | if (IS_ERR(ptr: opp)) { |
1342 | ret = PTR_ERR(ptr: opp); |
1343 | dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n" , |
1344 | __func__, freq, ret); |
1345 | goto put_opp_table; |
1346 | } |
1347 | |
1348 | /* |
1349 | * An OPP entry specifies the highest frequency at which other |
1350 | * properties of the OPP entry apply. Even if the new OPP is |
1351 | * same as the old one, we may still reach here for a different |
1352 | * value of the frequency. In such a case, do not abort but |
1353 | * configure the hardware to the desired frequency forcefully. |
1354 | */ |
1355 | forced = opp_table->current_rate_single_clk != freq; |
1356 | } |
1357 | |
1358 | ret = _set_opp(dev, opp_table, opp, clk_data: &freq, forced); |
1359 | |
1360 | if (freq) |
1361 | dev_pm_opp_put(opp); |
1362 | |
1363 | put_opp_table: |
1364 | dev_pm_opp_put_opp_table(opp_table); |
1365 | return ret; |
1366 | } |
1367 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); |
1368 | |
1369 | /** |
1370 | * dev_pm_opp_set_opp() - Configure device for OPP |
1371 | * @dev: device for which we do this operation |
1372 | * @opp: OPP to set to |
1373 | * |
1374 | * This configures the device based on the properties of the OPP passed to this |
1375 | * routine. |
1376 | * |
1377 | * Return: 0 on success, a negative error number otherwise. |
1378 | */ |
1379 | int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) |
1380 | { |
1381 | struct opp_table *opp_table; |
1382 | int ret; |
1383 | |
1384 | opp_table = _find_opp_table(dev); |
1385 | if (IS_ERR(ptr: opp_table)) { |
1386 | dev_err(dev, "%s: device opp doesn't exist\n" , __func__); |
1387 | return PTR_ERR(ptr: opp_table); |
1388 | } |
1389 | |
1390 | ret = _set_opp(dev, opp_table, opp, NULL, forced: false); |
1391 | dev_pm_opp_put_opp_table(opp_table); |
1392 | |
1393 | return ret; |
1394 | } |
1395 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); |
1396 | |
1397 | /* OPP-dev Helpers */ |
1398 | static void _remove_opp_dev(struct opp_device *opp_dev, |
1399 | struct opp_table *opp_table) |
1400 | { |
1401 | opp_debug_unregister(opp_dev, opp_table); |
1402 | list_del(entry: &opp_dev->node); |
1403 | kfree(objp: opp_dev); |
1404 | } |
1405 | |
1406 | struct opp_device *_add_opp_dev(const struct device *dev, |
1407 | struct opp_table *opp_table) |
1408 | { |
1409 | struct opp_device *opp_dev; |
1410 | |
1411 | opp_dev = kzalloc(size: sizeof(*opp_dev), GFP_KERNEL); |
1412 | if (!opp_dev) |
1413 | return NULL; |
1414 | |
1415 | /* Initialize opp-dev */ |
1416 | opp_dev->dev = dev; |
1417 | |
1418 | mutex_lock(&opp_table->lock); |
1419 | list_add(new: &opp_dev->node, head: &opp_table->dev_list); |
1420 | mutex_unlock(lock: &opp_table->lock); |
1421 | |
1422 | /* Create debugfs entries for the opp_table */ |
1423 | opp_debug_register(opp_dev, opp_table); |
1424 | |
1425 | return opp_dev; |
1426 | } |
1427 | |
1428 | static struct opp_table *_allocate_opp_table(struct device *dev, int index) |
1429 | { |
1430 | struct opp_table *opp_table; |
1431 | struct opp_device *opp_dev; |
1432 | int ret; |
1433 | |
1434 | /* |
1435 | * Allocate a new OPP table. In the infrequent case where a new |
1436 | * device is needed to be added, we pay this penalty. |
1437 | */ |
1438 | opp_table = kzalloc(size: sizeof(*opp_table), GFP_KERNEL); |
1439 | if (!opp_table) |
1440 | return ERR_PTR(error: -ENOMEM); |
1441 | |
1442 | mutex_init(&opp_table->lock); |
1443 | INIT_LIST_HEAD(list: &opp_table->dev_list); |
1444 | INIT_LIST_HEAD(list: &opp_table->lazy); |
1445 | |
1446 | opp_table->clk = ERR_PTR(error: -ENODEV); |
1447 | |
1448 | /* Mark regulator count uninitialized */ |
1449 | opp_table->regulator_count = -1; |
1450 | |
1451 | opp_dev = _add_opp_dev(dev, opp_table); |
1452 | if (!opp_dev) { |
1453 | ret = -ENOMEM; |
1454 | goto err; |
1455 | } |
1456 | |
1457 | _of_init_opp_table(opp_table, dev, index); |
1458 | |
1459 | /* Find interconnect path(s) for the device */ |
1460 | ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); |
1461 | if (ret) { |
1462 | if (ret == -EPROBE_DEFER) |
1463 | goto remove_opp_dev; |
1464 | |
1465 | dev_warn(dev, "%s: Error finding interconnect paths: %d\n" , |
1466 | __func__, ret); |
1467 | } |
1468 | |
1469 | BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); |
1470 | INIT_LIST_HEAD(list: &opp_table->opp_list); |
1471 | kref_init(kref: &opp_table->kref); |
1472 | |
1473 | return opp_table; |
1474 | |
1475 | remove_opp_dev: |
1476 | _of_clear_opp_table(opp_table); |
1477 | _remove_opp_dev(opp_dev, opp_table); |
1478 | mutex_destroy(lock: &opp_table->lock); |
1479 | err: |
1480 | kfree(objp: opp_table); |
1481 | return ERR_PTR(error: ret); |
1482 | } |
1483 | |
1484 | void _get_opp_table_kref(struct opp_table *opp_table) |
1485 | { |
1486 | kref_get(kref: &opp_table->kref); |
1487 | } |
1488 | |
1489 | static struct opp_table *_update_opp_table_clk(struct device *dev, |
1490 | struct opp_table *opp_table, |
1491 | bool getclk) |
1492 | { |
1493 | int ret; |
1494 | |
1495 | /* |
1496 | * Return early if we don't need to get clk or we have already done it |
1497 | * earlier. |
1498 | */ |
1499 | if (!getclk || IS_ERR(ptr: opp_table) || !IS_ERR(ptr: opp_table->clk) || |
1500 | opp_table->clks) |
1501 | return opp_table; |
1502 | |
1503 | /* Find clk for the device */ |
1504 | opp_table->clk = clk_get(dev, NULL); |
1505 | |
1506 | ret = PTR_ERR_OR_ZERO(ptr: opp_table->clk); |
1507 | if (!ret) { |
1508 | opp_table->config_clks = _opp_config_clk_single; |
1509 | opp_table->clk_count = 1; |
1510 | return opp_table; |
1511 | } |
1512 | |
1513 | if (ret == -ENOENT) { |
1514 | /* |
1515 | * There are few platforms which don't want the OPP core to |
1516 | * manage device's clock settings. In such cases neither the |
1517 | * platform provides the clks explicitly to us, nor the DT |
1518 | * contains a valid clk entry. The OPP nodes in DT may still |
1519 | * contain "opp-hz" property though, which we need to parse and |
1520 | * allow the platform to find an OPP based on freq later on. |
1521 | * |
1522 | * This is a simple solution to take care of such corner cases, |
1523 | * i.e. make the clk_count 1, which lets us allocate space for |
1524 | * frequency in opp->rates and also parse the entries in DT. |
1525 | */ |
1526 | opp_table->clk_count = 1; |
1527 | |
1528 | dev_dbg(dev, "%s: Couldn't find clock: %d\n" , __func__, ret); |
1529 | return opp_table; |
1530 | } |
1531 | |
1532 | dev_pm_opp_put_opp_table(opp_table); |
1533 | dev_err_probe(dev, err: ret, fmt: "Couldn't find clock\n" ); |
1534 | |
1535 | return ERR_PTR(error: ret); |
1536 | } |
1537 | |
1538 | /* |
1539 | * We need to make sure that the OPP table for a device doesn't get added twice, |
1540 | * if this routine gets called in parallel with the same device pointer. |
1541 | * |
1542 | * The simplest way to enforce that is to perform everything (find existing |
1543 | * table and if not found, create a new one) under the opp_table_lock, so only |
1544 | * one creator gets access to the same. But that expands the critical section |
1545 | * under the lock and may end up causing circular dependencies with frameworks |
1546 | * like debugfs, interconnect or clock framework as they may be direct or |
1547 | * indirect users of OPP core. |
1548 | * |
1549 | * And for that reason we have to go for a bit tricky implementation here, which |
1550 | * uses the opp_tables_busy flag to indicate if another creator is in the middle |
1551 | * of adding an OPP table and others should wait for it to finish. |
1552 | */ |
1553 | struct opp_table *_add_opp_table_indexed(struct device *dev, int index, |
1554 | bool getclk) |
1555 | { |
1556 | struct opp_table *opp_table; |
1557 | |
1558 | again: |
1559 | mutex_lock(&opp_table_lock); |
1560 | |
1561 | opp_table = _find_opp_table_unlocked(dev); |
1562 | if (!IS_ERR(ptr: opp_table)) |
1563 | goto unlock; |
1564 | |
1565 | /* |
1566 | * The opp_tables list or an OPP table's dev_list is getting updated by |
1567 | * another user, wait for it to finish. |
1568 | */ |
1569 | if (unlikely(opp_tables_busy)) { |
1570 | mutex_unlock(lock: &opp_table_lock); |
1571 | cpu_relax(); |
1572 | goto again; |
1573 | } |
1574 | |
1575 | opp_tables_busy = true; |
1576 | opp_table = _managed_opp(dev, index); |
1577 | |
1578 | /* Drop the lock to reduce the size of critical section */ |
1579 | mutex_unlock(lock: &opp_table_lock); |
1580 | |
1581 | if (opp_table) { |
1582 | if (!_add_opp_dev(dev, opp_table)) { |
1583 | dev_pm_opp_put_opp_table(opp_table); |
1584 | opp_table = ERR_PTR(error: -ENOMEM); |
1585 | } |
1586 | |
1587 | mutex_lock(&opp_table_lock); |
1588 | } else { |
1589 | opp_table = _allocate_opp_table(dev, index); |
1590 | |
1591 | mutex_lock(&opp_table_lock); |
1592 | if (!IS_ERR(ptr: opp_table)) |
1593 | list_add(new: &opp_table->node, head: &opp_tables); |
1594 | } |
1595 | |
1596 | opp_tables_busy = false; |
1597 | |
1598 | unlock: |
1599 | mutex_unlock(lock: &opp_table_lock); |
1600 | |
1601 | return _update_opp_table_clk(dev, opp_table, getclk); |
1602 | } |
1603 | |
1604 | static struct opp_table *_add_opp_table(struct device *dev, bool getclk) |
1605 | { |
1606 | return _add_opp_table_indexed(dev, index: 0, getclk); |
1607 | } |
1608 | |
1609 | struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) |
1610 | { |
1611 | return _find_opp_table(dev); |
1612 | } |
1613 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); |
1614 | |
1615 | static void _opp_table_kref_release(struct kref *kref) |
1616 | { |
1617 | struct opp_table *opp_table = container_of(kref, struct opp_table, kref); |
1618 | struct opp_device *opp_dev, *temp; |
1619 | int i; |
1620 | |
1621 | /* Drop the lock as soon as we can */ |
1622 | list_del(entry: &opp_table->node); |
1623 | mutex_unlock(lock: &opp_table_lock); |
1624 | |
1625 | if (opp_table->current_opp) |
1626 | dev_pm_opp_put(opp: opp_table->current_opp); |
1627 | |
1628 | _of_clear_opp_table(opp_table); |
1629 | |
1630 | /* Release automatically acquired single clk */ |
1631 | if (!IS_ERR(ptr: opp_table->clk)) |
1632 | clk_put(clk: opp_table->clk); |
1633 | |
1634 | if (opp_table->paths) { |
1635 | for (i = 0; i < opp_table->path_count; i++) |
1636 | icc_put(path: opp_table->paths[i]); |
1637 | kfree(objp: opp_table->paths); |
1638 | } |
1639 | |
1640 | WARN_ON(!list_empty(&opp_table->opp_list)); |
1641 | |
1642 | list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) |
1643 | _remove_opp_dev(opp_dev, opp_table); |
1644 | |
1645 | mutex_destroy(lock: &opp_table->lock); |
1646 | kfree(objp: opp_table); |
1647 | } |
1648 | |
1649 | void dev_pm_opp_put_opp_table(struct opp_table *opp_table) |
1650 | { |
1651 | kref_put_mutex(kref: &opp_table->kref, release: _opp_table_kref_release, |
1652 | lock: &opp_table_lock); |
1653 | } |
1654 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); |
1655 | |
1656 | void _opp_free(struct dev_pm_opp *opp) |
1657 | { |
1658 | kfree(objp: opp); |
1659 | } |
1660 | |
1661 | static void _opp_kref_release(struct kref *kref) |
1662 | { |
1663 | struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); |
1664 | struct opp_table *opp_table = opp->opp_table; |
1665 | |
1666 | list_del(entry: &opp->node); |
1667 | mutex_unlock(lock: &opp_table->lock); |
1668 | |
1669 | /* |
1670 | * Notify the changes in the availability of the operable |
1671 | * frequency/voltage list. |
1672 | */ |
1673 | blocking_notifier_call_chain(nh: &opp_table->head, val: OPP_EVENT_REMOVE, v: opp); |
1674 | _of_clear_opp(opp_table, opp); |
1675 | opp_debug_remove_one(opp); |
1676 | kfree(objp: opp); |
1677 | } |
1678 | |
1679 | void dev_pm_opp_get(struct dev_pm_opp *opp) |
1680 | { |
1681 | kref_get(kref: &opp->kref); |
1682 | } |
1683 | |
1684 | void dev_pm_opp_put(struct dev_pm_opp *opp) |
1685 | { |
1686 | kref_put_mutex(kref: &opp->kref, release: _opp_kref_release, lock: &opp->opp_table->lock); |
1687 | } |
1688 | EXPORT_SYMBOL_GPL(dev_pm_opp_put); |
1689 | |
1690 | /** |
1691 | * dev_pm_opp_remove() - Remove an OPP from OPP table |
1692 | * @dev: device for which we do this operation |
1693 | * @freq: OPP to remove with matching 'freq' |
1694 | * |
1695 | * This function removes an opp from the opp table. |
1696 | */ |
1697 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) |
1698 | { |
1699 | struct dev_pm_opp *opp = NULL, *iter; |
1700 | struct opp_table *opp_table; |
1701 | |
1702 | opp_table = _find_opp_table(dev); |
1703 | if (IS_ERR(ptr: opp_table)) |
1704 | return; |
1705 | |
1706 | if (!assert_single_clk(opp_table)) |
1707 | goto put_table; |
1708 | |
1709 | mutex_lock(&opp_table->lock); |
1710 | |
1711 | list_for_each_entry(iter, &opp_table->opp_list, node) { |
1712 | if (iter->rates[0] == freq) { |
1713 | opp = iter; |
1714 | break; |
1715 | } |
1716 | } |
1717 | |
1718 | mutex_unlock(lock: &opp_table->lock); |
1719 | |
1720 | if (opp) { |
1721 | dev_pm_opp_put(opp); |
1722 | |
1723 | /* Drop the reference taken by dev_pm_opp_add() */ |
1724 | dev_pm_opp_put_opp_table(opp_table); |
1725 | } else { |
1726 | dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n" , |
1727 | __func__, freq); |
1728 | } |
1729 | |
1730 | put_table: |
1731 | /* Drop the reference taken by _find_opp_table() */ |
1732 | dev_pm_opp_put_opp_table(opp_table); |
1733 | } |
1734 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); |
1735 | |
1736 | static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, |
1737 | bool dynamic) |
1738 | { |
1739 | struct dev_pm_opp *opp = NULL, *temp; |
1740 | |
1741 | mutex_lock(&opp_table->lock); |
1742 | list_for_each_entry(temp, &opp_table->opp_list, node) { |
1743 | /* |
1744 | * Refcount must be dropped only once for each OPP by OPP core, |
1745 | * do that with help of "removed" flag. |
1746 | */ |
1747 | if (!temp->removed && dynamic == temp->dynamic) { |
1748 | opp = temp; |
1749 | break; |
1750 | } |
1751 | } |
1752 | |
1753 | mutex_unlock(lock: &opp_table->lock); |
1754 | return opp; |
1755 | } |
1756 | |
1757 | /* |
1758 | * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to |
1759 | * happen lock less to avoid circular dependency issues. This routine must be |
1760 | * called without the opp_table->lock held. |
1761 | */ |
1762 | static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) |
1763 | { |
1764 | struct dev_pm_opp *opp; |
1765 | |
1766 | while ((opp = _opp_get_next(opp_table, dynamic))) { |
1767 | opp->removed = true; |
1768 | dev_pm_opp_put(opp); |
1769 | |
1770 | /* Drop the references taken by dev_pm_opp_add() */ |
1771 | if (dynamic) |
1772 | dev_pm_opp_put_opp_table(opp_table); |
1773 | } |
1774 | } |
1775 | |
1776 | bool _opp_remove_all_static(struct opp_table *opp_table) |
1777 | { |
1778 | mutex_lock(&opp_table->lock); |
1779 | |
1780 | if (!opp_table->parsed_static_opps) { |
1781 | mutex_unlock(lock: &opp_table->lock); |
1782 | return false; |
1783 | } |
1784 | |
1785 | if (--opp_table->parsed_static_opps) { |
1786 | mutex_unlock(lock: &opp_table->lock); |
1787 | return true; |
1788 | } |
1789 | |
1790 | mutex_unlock(lock: &opp_table->lock); |
1791 | |
1792 | _opp_remove_all(opp_table, dynamic: false); |
1793 | return true; |
1794 | } |
1795 | |
1796 | /** |
1797 | * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs |
1798 | * @dev: device for which we do this operation |
1799 | * |
1800 | * This function removes all dynamically created OPPs from the opp table. |
1801 | */ |
1802 | void dev_pm_opp_remove_all_dynamic(struct device *dev) |
1803 | { |
1804 | struct opp_table *opp_table; |
1805 | |
1806 | opp_table = _find_opp_table(dev); |
1807 | if (IS_ERR(ptr: opp_table)) |
1808 | return; |
1809 | |
1810 | _opp_remove_all(opp_table, dynamic: true); |
1811 | |
1812 | /* Drop the reference taken by _find_opp_table() */ |
1813 | dev_pm_opp_put_opp_table(opp_table); |
1814 | } |
1815 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); |
1816 | |
1817 | struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table) |
1818 | { |
1819 | struct dev_pm_opp *opp; |
1820 | int supply_count, supply_size, icc_size, clk_size; |
1821 | |
1822 | /* Allocate space for at least one supply */ |
1823 | supply_count = opp_table->regulator_count > 0 ? |
1824 | opp_table->regulator_count : 1; |
1825 | supply_size = sizeof(*opp->supplies) * supply_count; |
1826 | clk_size = sizeof(*opp->rates) * opp_table->clk_count; |
1827 | icc_size = sizeof(*opp->bandwidth) * opp_table->path_count; |
1828 | |
1829 | /* allocate new OPP node and supplies structures */ |
1830 | opp = kzalloc(size: sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL); |
1831 | if (!opp) |
1832 | return NULL; |
1833 | |
1834 | /* Put the supplies, bw and clock at the end of the OPP structure */ |
1835 | opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); |
1836 | |
1837 | opp->rates = (unsigned long *)(opp->supplies + supply_count); |
1838 | |
1839 | if (icc_size) |
1840 | opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count); |
1841 | |
1842 | INIT_LIST_HEAD(list: &opp->node); |
1843 | |
1844 | opp->level = OPP_LEVEL_UNSET; |
1845 | |
1846 | return opp; |
1847 | } |
1848 | |
1849 | static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, |
1850 | struct opp_table *opp_table) |
1851 | { |
1852 | struct regulator *reg; |
1853 | int i; |
1854 | |
1855 | if (!opp_table->regulators) |
1856 | return true; |
1857 | |
1858 | for (i = 0; i < opp_table->regulator_count; i++) { |
1859 | reg = opp_table->regulators[i]; |
1860 | |
1861 | if (!regulator_is_supported_voltage(regulator: reg, |
1862 | min_uV: opp->supplies[i].u_volt_min, |
1863 | max_uV: opp->supplies[i].u_volt_max)) { |
1864 | pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n" , |
1865 | __func__, opp->supplies[i].u_volt_min, |
1866 | opp->supplies[i].u_volt_max); |
1867 | return false; |
1868 | } |
1869 | } |
1870 | |
1871 | return true; |
1872 | } |
1873 | |
1874 | static int _opp_compare_rate(struct opp_table *opp_table, |
1875 | struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) |
1876 | { |
1877 | int i; |
1878 | |
1879 | for (i = 0; i < opp_table->clk_count; i++) { |
1880 | if (opp1->rates[i] != opp2->rates[i]) |
1881 | return opp1->rates[i] < opp2->rates[i] ? -1 : 1; |
1882 | } |
1883 | |
1884 | /* Same rates for both OPPs */ |
1885 | return 0; |
1886 | } |
1887 | |
1888 | static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1, |
1889 | struct dev_pm_opp *opp2) |
1890 | { |
1891 | int i; |
1892 | |
1893 | for (i = 0; i < opp_table->path_count; i++) { |
1894 | if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak) |
1895 | return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1; |
1896 | } |
1897 | |
1898 | /* Same bw for both OPPs */ |
1899 | return 0; |
1900 | } |
1901 | |
1902 | /* |
1903 | * Returns |
1904 | * 0: opp1 == opp2 |
1905 | * 1: opp1 > opp2 |
1906 | * -1: opp1 < opp2 |
1907 | */ |
1908 | int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, |
1909 | struct dev_pm_opp *opp2) |
1910 | { |
1911 | int ret; |
1912 | |
1913 | ret = _opp_compare_rate(opp_table, opp1, opp2); |
1914 | if (ret) |
1915 | return ret; |
1916 | |
1917 | ret = _opp_compare_bw(opp_table, opp1, opp2); |
1918 | if (ret) |
1919 | return ret; |
1920 | |
1921 | if (opp1->level != opp2->level) |
1922 | return opp1->level < opp2->level ? -1 : 1; |
1923 | |
1924 | /* Duplicate OPPs */ |
1925 | return 0; |
1926 | } |
1927 | |
1928 | static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, |
1929 | struct opp_table *opp_table, |
1930 | struct list_head **head) |
1931 | { |
1932 | struct dev_pm_opp *opp; |
1933 | int opp_cmp; |
1934 | |
1935 | /* |
1936 | * Insert new OPP in order of increasing frequency and discard if |
1937 | * already present. |
1938 | * |
1939 | * Need to use &opp_table->opp_list in the condition part of the 'for' |
1940 | * loop, don't replace it with head otherwise it will become an infinite |
1941 | * loop. |
1942 | */ |
1943 | list_for_each_entry(opp, &opp_table->opp_list, node) { |
1944 | opp_cmp = _opp_compare_key(opp_table, opp1: new_opp, opp2: opp); |
1945 | if (opp_cmp > 0) { |
1946 | *head = &opp->node; |
1947 | continue; |
1948 | } |
1949 | |
1950 | if (opp_cmp < 0) |
1951 | return 0; |
1952 | |
1953 | /* Duplicate OPPs */ |
1954 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n" , |
1955 | __func__, opp->rates[0], opp->supplies[0].u_volt, |
1956 | opp->available, new_opp->rates[0], |
1957 | new_opp->supplies[0].u_volt, new_opp->available); |
1958 | |
1959 | /* Should we compare voltages for all regulators here ? */ |
1960 | return opp->available && |
1961 | new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; |
1962 | } |
1963 | |
1964 | return 0; |
1965 | } |
1966 | |
1967 | void _required_opps_available(struct dev_pm_opp *opp, int count) |
1968 | { |
1969 | int i; |
1970 | |
1971 | for (i = 0; i < count; i++) { |
1972 | if (opp->required_opps[i]->available) |
1973 | continue; |
1974 | |
1975 | opp->available = false; |
1976 | pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n" , |
1977 | __func__, opp->required_opps[i]->np, opp->rates[0]); |
1978 | return; |
1979 | } |
1980 | } |
1981 | |
1982 | /* |
1983 | * Returns: |
1984 | * 0: On success. And appropriate error message for duplicate OPPs. |
1985 | * -EBUSY: For OPP with same freq/volt and is available. The callers of |
1986 | * _opp_add() must return 0 if they receive -EBUSY from it. This is to make |
1987 | * sure we don't print error messages unnecessarily if different parts of |
1988 | * kernel try to initialize the OPP table. |
1989 | * -EEXIST: For OPP with same freq but different volt or is unavailable. This |
1990 | * should be considered an error by the callers of _opp_add(). |
1991 | */ |
1992 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, |
1993 | struct opp_table *opp_table) |
1994 | { |
1995 | struct list_head *head; |
1996 | int ret; |
1997 | |
1998 | mutex_lock(&opp_table->lock); |
1999 | head = &opp_table->opp_list; |
2000 | |
2001 | ret = _opp_is_duplicate(dev, new_opp, opp_table, head: &head); |
2002 | if (ret) { |
2003 | mutex_unlock(lock: &opp_table->lock); |
2004 | return ret; |
2005 | } |
2006 | |
2007 | list_add(new: &new_opp->node, head); |
2008 | mutex_unlock(lock: &opp_table->lock); |
2009 | |
2010 | new_opp->opp_table = opp_table; |
2011 | kref_init(kref: &new_opp->kref); |
2012 | |
2013 | opp_debug_create_one(opp: new_opp, opp_table); |
2014 | |
2015 | if (!_opp_supported_by_regulators(opp: new_opp, opp_table)) { |
2016 | new_opp->available = false; |
2017 | dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n" , |
2018 | __func__, new_opp->rates[0]); |
2019 | } |
2020 | |
2021 | /* required-opps not fully initialized yet */ |
2022 | if (lazy_linking_pending(opp_table)) |
2023 | return 0; |
2024 | |
2025 | _required_opps_available(opp: new_opp, count: opp_table->required_opp_count); |
2026 | |
2027 | return 0; |
2028 | } |
2029 | |
2030 | /** |
2031 | * _opp_add_v1() - Allocate a OPP based on v1 bindings. |
2032 | * @opp_table: OPP table |
2033 | * @dev: device for which we do this operation |
2034 | * @data: The OPP data for the OPP to add |
2035 | * @dynamic: Dynamically added OPPs. |
2036 | * |
2037 | * This function adds an opp definition to the opp table and returns status. |
2038 | * The opp is made available by default and it can be controlled using |
2039 | * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. |
2040 | * |
2041 | * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table |
2042 | * and freed by dev_pm_opp_of_remove_table. |
2043 | * |
2044 | * Return: |
2045 | * 0 On success OR |
2046 | * Duplicate OPPs (both freq and volt are same) and opp->available |
2047 | * -EEXIST Freq are same and volt are different OR |
2048 | * Duplicate OPPs (both freq and volt are same) and !opp->available |
2049 | * -ENOMEM Memory allocation failure |
2050 | */ |
2051 | int _opp_add_v1(struct opp_table *opp_table, struct device *dev, |
2052 | struct dev_pm_opp_data *data, bool dynamic) |
2053 | { |
2054 | struct dev_pm_opp *new_opp; |
2055 | unsigned long tol, u_volt = data->u_volt; |
2056 | int ret; |
2057 | |
2058 | if (!assert_single_clk(opp_table)) |
2059 | return -EINVAL; |
2060 | |
2061 | new_opp = _opp_allocate(opp_table); |
2062 | if (!new_opp) |
2063 | return -ENOMEM; |
2064 | |
2065 | /* populate the opp table */ |
2066 | new_opp->rates[0] = data->freq; |
2067 | new_opp->level = data->level; |
2068 | new_opp->turbo = data->turbo; |
2069 | tol = u_volt * opp_table->voltage_tolerance_v1 / 100; |
2070 | new_opp->supplies[0].u_volt = u_volt; |
2071 | new_opp->supplies[0].u_volt_min = u_volt - tol; |
2072 | new_opp->supplies[0].u_volt_max = u_volt + tol; |
2073 | new_opp->available = true; |
2074 | new_opp->dynamic = dynamic; |
2075 | |
2076 | ret = _opp_add(dev, new_opp, opp_table); |
2077 | if (ret) { |
2078 | /* Don't return error for duplicate OPPs */ |
2079 | if (ret == -EBUSY) |
2080 | ret = 0; |
2081 | goto free_opp; |
2082 | } |
2083 | |
2084 | /* |
2085 | * Notify the changes in the availability of the operable |
2086 | * frequency/voltage list. |
2087 | */ |
2088 | blocking_notifier_call_chain(nh: &opp_table->head, val: OPP_EVENT_ADD, v: new_opp); |
2089 | return 0; |
2090 | |
2091 | free_opp: |
2092 | _opp_free(opp: new_opp); |
2093 | |
2094 | return ret; |
2095 | } |
2096 | |
2097 | /* |
2098 | * This is required only for the V2 bindings, and it enables a platform to |
2099 | * specify the hierarchy of versions it supports. OPP layer will then enable |
2100 | * OPPs, which are available for those versions, based on its 'opp-supported-hw' |
2101 | * property. |
2102 | */ |
2103 | static int _opp_set_supported_hw(struct opp_table *opp_table, |
2104 | const u32 *versions, unsigned int count) |
2105 | { |
2106 | /* Another CPU that shares the OPP table has set the property ? */ |
2107 | if (opp_table->supported_hw) |
2108 | return 0; |
2109 | |
2110 | opp_table->supported_hw = kmemdup(p: versions, size: count * sizeof(*versions), |
2111 | GFP_KERNEL); |
2112 | if (!opp_table->supported_hw) |
2113 | return -ENOMEM; |
2114 | |
2115 | opp_table->supported_hw_count = count; |
2116 | |
2117 | return 0; |
2118 | } |
2119 | |
2120 | static void _opp_put_supported_hw(struct opp_table *opp_table) |
2121 | { |
2122 | if (opp_table->supported_hw) { |
2123 | kfree(objp: opp_table->supported_hw); |
2124 | opp_table->supported_hw = NULL; |
2125 | opp_table->supported_hw_count = 0; |
2126 | } |
2127 | } |
2128 | |
2129 | /* |
2130 | * This is required only for the V2 bindings, and it enables a platform to |
2131 | * specify the extn to be used for certain property names. The properties to |
2132 | * which the extension will apply are opp-microvolt and opp-microamp. OPP core |
2133 | * should postfix the property name with -<name> while looking for them. |
2134 | */ |
2135 | static int _opp_set_prop_name(struct opp_table *opp_table, const char *name) |
2136 | { |
2137 | /* Another CPU that shares the OPP table has set the property ? */ |
2138 | if (!opp_table->prop_name) { |
2139 | opp_table->prop_name = kstrdup(s: name, GFP_KERNEL); |
2140 | if (!opp_table->prop_name) |
2141 | return -ENOMEM; |
2142 | } |
2143 | |
2144 | return 0; |
2145 | } |
2146 | |
2147 | static void _opp_put_prop_name(struct opp_table *opp_table) |
2148 | { |
2149 | if (opp_table->prop_name) { |
2150 | kfree(objp: opp_table->prop_name); |
2151 | opp_table->prop_name = NULL; |
2152 | } |
2153 | } |
2154 | |
2155 | /* |
2156 | * In order to support OPP switching, OPP layer needs to know the name of the |
2157 | * device's regulators, as the core would be required to switch voltages as |
2158 | * well. |
2159 | * |
2160 | * This must be called before any OPPs are initialized for the device. |
2161 | */ |
2162 | static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev, |
2163 | const char * const names[]) |
2164 | { |
2165 | const char * const *temp = names; |
2166 | struct regulator *reg; |
2167 | int count = 0, ret, i; |
2168 | |
2169 | /* Count number of regulators */ |
2170 | while (*temp++) |
2171 | count++; |
2172 | |
2173 | if (!count) |
2174 | return -EINVAL; |
2175 | |
2176 | /* Another CPU that shares the OPP table has set the regulators ? */ |
2177 | if (opp_table->regulators) |
2178 | return 0; |
2179 | |
2180 | opp_table->regulators = kmalloc_array(n: count, |
2181 | size: sizeof(*opp_table->regulators), |
2182 | GFP_KERNEL); |
2183 | if (!opp_table->regulators) |
2184 | return -ENOMEM; |
2185 | |
2186 | for (i = 0; i < count; i++) { |
2187 | reg = regulator_get_optional(dev, id: names[i]); |
2188 | if (IS_ERR(ptr: reg)) { |
2189 | ret = dev_err_probe(dev, err: PTR_ERR(ptr: reg), |
2190 | fmt: "%s: no regulator (%s) found\n" , |
2191 | __func__, names[i]); |
2192 | goto free_regulators; |
2193 | } |
2194 | |
2195 | opp_table->regulators[i] = reg; |
2196 | } |
2197 | |
2198 | opp_table->regulator_count = count; |
2199 | |
2200 | /* Set generic config_regulators() for single regulators here */ |
2201 | if (count == 1) |
2202 | opp_table->config_regulators = _opp_config_regulator_single; |
2203 | |
2204 | return 0; |
2205 | |
2206 | free_regulators: |
2207 | while (i != 0) |
2208 | regulator_put(regulator: opp_table->regulators[--i]); |
2209 | |
2210 | kfree(objp: opp_table->regulators); |
2211 | opp_table->regulators = NULL; |
2212 | opp_table->regulator_count = -1; |
2213 | |
2214 | return ret; |
2215 | } |
2216 | |
2217 | static void _opp_put_regulators(struct opp_table *opp_table) |
2218 | { |
2219 | int i; |
2220 | |
2221 | if (!opp_table->regulators) |
2222 | return; |
2223 | |
2224 | if (opp_table->enabled) { |
2225 | for (i = opp_table->regulator_count - 1; i >= 0; i--) |
2226 | regulator_disable(regulator: opp_table->regulators[i]); |
2227 | } |
2228 | |
2229 | for (i = opp_table->regulator_count - 1; i >= 0; i--) |
2230 | regulator_put(regulator: opp_table->regulators[i]); |
2231 | |
2232 | kfree(objp: opp_table->regulators); |
2233 | opp_table->regulators = NULL; |
2234 | opp_table->regulator_count = -1; |
2235 | } |
2236 | |
2237 | static void _put_clks(struct opp_table *opp_table, int count) |
2238 | { |
2239 | int i; |
2240 | |
2241 | for (i = count - 1; i >= 0; i--) |
2242 | clk_put(clk: opp_table->clks[i]); |
2243 | |
2244 | kfree(objp: opp_table->clks); |
2245 | opp_table->clks = NULL; |
2246 | } |
2247 | |
2248 | /* |
2249 | * In order to support OPP switching, OPP layer needs to get pointers to the |
2250 | * clocks for the device. Simple cases work fine without using this routine |
2251 | * (i.e. by passing connection-id as NULL), but for a device with multiple |
2252 | * clocks available, the OPP core needs to know the exact names of the clks to |
2253 | * use. |
2254 | * |
2255 | * This must be called before any OPPs are initialized for the device. |
2256 | */ |
2257 | static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev, |
2258 | const char * const names[], |
2259 | config_clks_t config_clks) |
2260 | { |
2261 | const char * const *temp = names; |
2262 | int count = 0, ret, i; |
2263 | struct clk *clk; |
2264 | |
2265 | /* Count number of clks */ |
2266 | while (*temp++) |
2267 | count++; |
2268 | |
2269 | /* |
2270 | * This is a special case where we have a single clock, whose connection |
2271 | * id name is NULL, i.e. first two entries are NULL in the array. |
2272 | */ |
2273 | if (!count && !names[1]) |
2274 | count = 1; |
2275 | |
2276 | /* Fail early for invalid configurations */ |
2277 | if (!count || (!config_clks && count > 1)) |
2278 | return -EINVAL; |
2279 | |
2280 | /* Another CPU that shares the OPP table has set the clkname ? */ |
2281 | if (opp_table->clks) |
2282 | return 0; |
2283 | |
2284 | opp_table->clks = kmalloc_array(n: count, size: sizeof(*opp_table->clks), |
2285 | GFP_KERNEL); |
2286 | if (!opp_table->clks) |
2287 | return -ENOMEM; |
2288 | |
2289 | /* Find clks for the device */ |
2290 | for (i = 0; i < count; i++) { |
2291 | clk = clk_get(dev, id: names[i]); |
2292 | if (IS_ERR(ptr: clk)) { |
2293 | ret = dev_err_probe(dev, err: PTR_ERR(ptr: clk), |
2294 | fmt: "%s: Couldn't find clock with name: %s\n" , |
2295 | __func__, names[i]); |
2296 | goto free_clks; |
2297 | } |
2298 | |
2299 | opp_table->clks[i] = clk; |
2300 | } |
2301 | |
2302 | opp_table->clk_count = count; |
2303 | opp_table->config_clks = config_clks; |
2304 | |
2305 | /* Set generic single clk set here */ |
2306 | if (count == 1) { |
2307 | if (!opp_table->config_clks) |
2308 | opp_table->config_clks = _opp_config_clk_single; |
2309 | |
2310 | /* |
2311 | * We could have just dropped the "clk" field and used "clks" |
2312 | * everywhere. Instead we kept the "clk" field around for |
2313 | * following reasons: |
2314 | * |
2315 | * - avoiding clks[0] everywhere else. |
2316 | * - not running single clk helpers for multiple clk usecase by |
2317 | * mistake. |
2318 | * |
2319 | * Since this is single-clk case, just update the clk pointer |
2320 | * too. |
2321 | */ |
2322 | opp_table->clk = opp_table->clks[0]; |
2323 | } |
2324 | |
2325 | return 0; |
2326 | |
2327 | free_clks: |
2328 | _put_clks(opp_table, count: i); |
2329 | return ret; |
2330 | } |
2331 | |
2332 | static void _opp_put_clknames(struct opp_table *opp_table) |
2333 | { |
2334 | if (!opp_table->clks) |
2335 | return; |
2336 | |
2337 | opp_table->config_clks = NULL; |
2338 | opp_table->clk = ERR_PTR(error: -ENODEV); |
2339 | |
2340 | _put_clks(opp_table, count: opp_table->clk_count); |
2341 | } |
2342 | |
2343 | /* |
2344 | * This is useful to support platforms with multiple regulators per device. |
2345 | * |
2346 | * This must be called before any OPPs are initialized for the device. |
2347 | */ |
2348 | static int _opp_set_config_regulators_helper(struct opp_table *opp_table, |
2349 | struct device *dev, config_regulators_t config_regulators) |
2350 | { |
2351 | /* Another CPU that shares the OPP table has set the helper ? */ |
2352 | if (!opp_table->config_regulators) |
2353 | opp_table->config_regulators = config_regulators; |
2354 | |
2355 | return 0; |
2356 | } |
2357 | |
2358 | static void _opp_put_config_regulators_helper(struct opp_table *opp_table) |
2359 | { |
2360 | if (opp_table->config_regulators) |
2361 | opp_table->config_regulators = NULL; |
2362 | } |
2363 | |
2364 | static void _opp_detach_genpd(struct opp_table *opp_table) |
2365 | { |
2366 | int index; |
2367 | |
2368 | for (index = 0; index < opp_table->required_opp_count; index++) { |
2369 | if (!opp_table->required_devs[index]) |
2370 | continue; |
2371 | |
2372 | dev_pm_domain_detach(dev: opp_table->required_devs[index], power_off: false); |
2373 | opp_table->required_devs[index] = NULL; |
2374 | } |
2375 | } |
2376 | |
2377 | /* |
2378 | * Multiple generic power domains for a device are supported with the help of |
2379 | * virtual genpd devices, which are created for each consumer device - genpd |
2380 | * pair. These are the device structures which are attached to the power domain |
2381 | * and are required by the OPP core to set the performance state of the genpd. |
2382 | * The same API also works for the case where single genpd is available and so |
2383 | * we don't need to support that separately. |
2384 | * |
2385 | * This helper will normally be called by the consumer driver of the device |
2386 | * "dev", as only that has details of the genpd names. |
2387 | * |
2388 | * This helper needs to be called once with a list of all genpd to attach. |
2389 | * Otherwise the original device structure will be used instead by the OPP core. |
2390 | * |
2391 | * The order of entries in the names array must match the order in which |
2392 | * "required-opps" are added in DT. |
2393 | */ |
2394 | static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, |
2395 | const char * const *names, struct device ***virt_devs) |
2396 | { |
2397 | struct device *virt_dev; |
2398 | int index = 0, ret = -EINVAL; |
2399 | const char * const *name = names; |
2400 | |
2401 | if (!opp_table->required_devs) { |
2402 | dev_err(dev, "Required OPPs not available, can't attach genpd\n" ); |
2403 | return -EINVAL; |
2404 | } |
2405 | |
2406 | /* Genpd core takes care of propagation to parent genpd */ |
2407 | if (opp_table->is_genpd) { |
2408 | dev_err(dev, "%s: Operation not supported for genpds\n" , __func__); |
2409 | return -EOPNOTSUPP; |
2410 | } |
2411 | |
2412 | /* Checking only the first one is enough ? */ |
2413 | if (opp_table->required_devs[0]) |
2414 | return 0; |
2415 | |
2416 | while (*name) { |
2417 | if (index >= opp_table->required_opp_count) { |
2418 | dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n" , |
2419 | *name, opp_table->required_opp_count, index); |
2420 | goto err; |
2421 | } |
2422 | |
2423 | virt_dev = dev_pm_domain_attach_by_name(dev, name: *name); |
2424 | if (IS_ERR_OR_NULL(ptr: virt_dev)) { |
2425 | ret = virt_dev ? PTR_ERR(ptr: virt_dev) : -ENODEV; |
2426 | dev_err(dev, "Couldn't attach to pm_domain: %d\n" , ret); |
2427 | goto err; |
2428 | } |
2429 | |
2430 | /* |
2431 | * Add the virtual genpd device as a user of the OPP table, so |
2432 | * we can call dev_pm_opp_set_opp() on it directly. |
2433 | * |
2434 | * This will be automatically removed when the OPP table is |
2435 | * removed, don't need to handle that here. |
2436 | */ |
2437 | if (!_add_opp_dev(dev: virt_dev, opp_table: opp_table->required_opp_tables[index])) { |
2438 | ret = -ENOMEM; |
2439 | goto err; |
2440 | } |
2441 | |
2442 | opp_table->required_devs[index] = virt_dev; |
2443 | index++; |
2444 | name++; |
2445 | } |
2446 | |
2447 | if (virt_devs) |
2448 | *virt_devs = opp_table->required_devs; |
2449 | |
2450 | return 0; |
2451 | |
2452 | err: |
2453 | _opp_detach_genpd(opp_table); |
2454 | return ret; |
2455 | |
2456 | } |
2457 | |
2458 | static int _opp_set_required_devs(struct opp_table *opp_table, |
2459 | struct device *dev, |
2460 | struct device **required_devs) |
2461 | { |
2462 | int i; |
2463 | |
2464 | if (!opp_table->required_devs) { |
2465 | dev_err(dev, "Required OPPs not available, can't set required devs\n" ); |
2466 | return -EINVAL; |
2467 | } |
2468 | |
2469 | /* Another device that shares the OPP table has set the required devs ? */ |
2470 | if (opp_table->required_devs[0]) |
2471 | return 0; |
2472 | |
2473 | for (i = 0; i < opp_table->required_opp_count; i++) { |
2474 | /* Genpd core takes care of propagation to parent genpd */ |
2475 | if (required_devs[i] && opp_table->is_genpd && |
2476 | opp_table->required_opp_tables[i]->is_genpd) { |
2477 | dev_err(dev, "%s: Operation not supported for genpds\n" , __func__); |
2478 | return -EOPNOTSUPP; |
2479 | } |
2480 | |
2481 | opp_table->required_devs[i] = required_devs[i]; |
2482 | } |
2483 | |
2484 | return 0; |
2485 | } |
2486 | |
2487 | static void _opp_put_required_devs(struct opp_table *opp_table) |
2488 | { |
2489 | int i; |
2490 | |
2491 | for (i = 0; i < opp_table->required_opp_count; i++) |
2492 | opp_table->required_devs[i] = NULL; |
2493 | } |
2494 | |
2495 | static void _opp_clear_config(struct opp_config_data *data) |
2496 | { |
2497 | if (data->flags & OPP_CONFIG_REQUIRED_DEVS) |
2498 | _opp_put_required_devs(opp_table: data->opp_table); |
2499 | else if (data->flags & OPP_CONFIG_GENPD) |
2500 | _opp_detach_genpd(opp_table: data->opp_table); |
2501 | |
2502 | if (data->flags & OPP_CONFIG_REGULATOR) |
2503 | _opp_put_regulators(opp_table: data->opp_table); |
2504 | if (data->flags & OPP_CONFIG_SUPPORTED_HW) |
2505 | _opp_put_supported_hw(opp_table: data->opp_table); |
2506 | if (data->flags & OPP_CONFIG_REGULATOR_HELPER) |
2507 | _opp_put_config_regulators_helper(opp_table: data->opp_table); |
2508 | if (data->flags & OPP_CONFIG_PROP_NAME) |
2509 | _opp_put_prop_name(opp_table: data->opp_table); |
2510 | if (data->flags & OPP_CONFIG_CLK) |
2511 | _opp_put_clknames(opp_table: data->opp_table); |
2512 | |
2513 | dev_pm_opp_put_opp_table(data->opp_table); |
2514 | kfree(objp: data); |
2515 | } |
2516 | |
2517 | /** |
2518 | * dev_pm_opp_set_config() - Set OPP configuration for the device. |
2519 | * @dev: Device for which configuration is being set. |
2520 | * @config: OPP configuration. |
2521 | * |
2522 | * This allows all device OPP configurations to be performed at once. |
2523 | * |
2524 | * This must be called before any OPPs are initialized for the device. This may |
2525 | * be called multiple times for the same OPP table, for example once for each |
2526 | * CPU that share the same table. This must be balanced by the same number of |
2527 | * calls to dev_pm_opp_clear_config() in order to free the OPP table properly. |
2528 | * |
2529 | * This returns a token to the caller, which must be passed to |
2530 | * dev_pm_opp_clear_config() to free the resources later. The value of the |
2531 | * returned token will be >= 1 for success and negative for errors. The minimum |
2532 | * value of 1 is chosen here to make it easy for callers to manage the resource. |
2533 | */ |
2534 | int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) |
2535 | { |
2536 | struct opp_table *opp_table; |
2537 | struct opp_config_data *data; |
2538 | unsigned int id; |
2539 | int ret; |
2540 | |
2541 | data = kmalloc(size: sizeof(*data), GFP_KERNEL); |
2542 | if (!data) |
2543 | return -ENOMEM; |
2544 | |
2545 | opp_table = _add_opp_table(dev, getclk: false); |
2546 | if (IS_ERR(ptr: opp_table)) { |
2547 | kfree(objp: data); |
2548 | return PTR_ERR(ptr: opp_table); |
2549 | } |
2550 | |
2551 | data->opp_table = opp_table; |
2552 | data->flags = 0; |
2553 | |
2554 | /* This should be called before OPPs are initialized */ |
2555 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { |
2556 | ret = -EBUSY; |
2557 | goto err; |
2558 | } |
2559 | |
2560 | /* Configure clocks */ |
2561 | if (config->clk_names) { |
2562 | ret = _opp_set_clknames(opp_table, dev, names: config->clk_names, |
2563 | config_clks: config->config_clks); |
2564 | if (ret) |
2565 | goto err; |
2566 | |
2567 | data->flags |= OPP_CONFIG_CLK; |
2568 | } else if (config->config_clks) { |
2569 | /* Don't allow config callback without clocks */ |
2570 | ret = -EINVAL; |
2571 | goto err; |
2572 | } |
2573 | |
2574 | /* Configure property names */ |
2575 | if (config->prop_name) { |
2576 | ret = _opp_set_prop_name(opp_table, name: config->prop_name); |
2577 | if (ret) |
2578 | goto err; |
2579 | |
2580 | data->flags |= OPP_CONFIG_PROP_NAME; |
2581 | } |
2582 | |
2583 | /* Configure config_regulators helper */ |
2584 | if (config->config_regulators) { |
2585 | ret = _opp_set_config_regulators_helper(opp_table, dev, |
2586 | config_regulators: config->config_regulators); |
2587 | if (ret) |
2588 | goto err; |
2589 | |
2590 | data->flags |= OPP_CONFIG_REGULATOR_HELPER; |
2591 | } |
2592 | |
2593 | /* Configure supported hardware */ |
2594 | if (config->supported_hw) { |
2595 | ret = _opp_set_supported_hw(opp_table, versions: config->supported_hw, |
2596 | count: config->supported_hw_count); |
2597 | if (ret) |
2598 | goto err; |
2599 | |
2600 | data->flags |= OPP_CONFIG_SUPPORTED_HW; |
2601 | } |
2602 | |
2603 | /* Configure supplies */ |
2604 | if (config->regulator_names) { |
2605 | ret = _opp_set_regulators(opp_table, dev, |
2606 | names: config->regulator_names); |
2607 | if (ret) |
2608 | goto err; |
2609 | |
2610 | data->flags |= OPP_CONFIG_REGULATOR; |
2611 | } |
2612 | |
2613 | /* Attach genpds */ |
2614 | if (config->genpd_names) { |
2615 | if (config->required_devs) |
2616 | goto err; |
2617 | |
2618 | ret = _opp_attach_genpd(opp_table, dev, names: config->genpd_names, |
2619 | virt_devs: config->virt_devs); |
2620 | if (ret) |
2621 | goto err; |
2622 | |
2623 | data->flags |= OPP_CONFIG_GENPD; |
2624 | } else if (config->required_devs) { |
2625 | ret = _opp_set_required_devs(opp_table, dev, |
2626 | required_devs: config->required_devs); |
2627 | if (ret) |
2628 | goto err; |
2629 | |
2630 | data->flags |= OPP_CONFIG_REQUIRED_DEVS; |
2631 | } |
2632 | |
2633 | ret = xa_alloc(xa: &opp_configs, id: &id, entry: data, XA_LIMIT(1, INT_MAX), |
2634 | GFP_KERNEL); |
2635 | if (ret) |
2636 | goto err; |
2637 | |
2638 | return id; |
2639 | |
2640 | err: |
2641 | _opp_clear_config(data); |
2642 | return ret; |
2643 | } |
2644 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_config); |
2645 | |
2646 | /** |
2647 | * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration. |
2648 | * @token: The token returned by dev_pm_opp_set_config() previously. |
2649 | * |
2650 | * This allows all device OPP configurations to be cleared at once. This must be |
2651 | * called once for each call made to dev_pm_opp_set_config(), in order to free |
2652 | * the OPPs properly. |
2653 | * |
2654 | * Currently the first call itself ends up freeing all the OPP configurations, |
2655 | * while the later ones only drop the OPP table reference. This works well for |
2656 | * now as we would never want to use an half initialized OPP table and want to |
2657 | * remove the configurations together. |
2658 | */ |
2659 | void dev_pm_opp_clear_config(int token) |
2660 | { |
2661 | struct opp_config_data *data; |
2662 | |
2663 | /* |
2664 | * This lets the callers call this unconditionally and keep their code |
2665 | * simple. |
2666 | */ |
2667 | if (unlikely(token <= 0)) |
2668 | return; |
2669 | |
2670 | data = xa_erase(&opp_configs, index: token); |
2671 | if (WARN_ON(!data)) |
2672 | return; |
2673 | |
2674 | _opp_clear_config(data); |
2675 | } |
2676 | EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config); |
2677 | |
2678 | static void devm_pm_opp_config_release(void *token) |
2679 | { |
2680 | dev_pm_opp_clear_config((unsigned long)token); |
2681 | } |
2682 | |
2683 | /** |
2684 | * devm_pm_opp_set_config() - Set OPP configuration for the device. |
2685 | * @dev: Device for which configuration is being set. |
2686 | * @config: OPP configuration. |
2687 | * |
2688 | * This allows all device OPP configurations to be performed at once. |
2689 | * This is a resource-managed variant of dev_pm_opp_set_config(). |
2690 | * |
2691 | * Return: 0 on success and errorno otherwise. |
2692 | */ |
2693 | int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) |
2694 | { |
2695 | int token = dev_pm_opp_set_config(dev, config); |
2696 | |
2697 | if (token < 0) |
2698 | return token; |
2699 | |
2700 | return devm_add_action_or_reset(dev, devm_pm_opp_config_release, |
2701 | (void *) ((unsigned long) token)); |
2702 | } |
2703 | EXPORT_SYMBOL_GPL(devm_pm_opp_set_config); |
2704 | |
2705 | /** |
2706 | * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP. |
2707 | * @src_table: OPP table which has @dst_table as one of its required OPP table. |
2708 | * @dst_table: Required OPP table of the @src_table. |
2709 | * @src_opp: OPP from the @src_table. |
2710 | * |
2711 | * This function returns the OPP (present in @dst_table) pointed out by the |
2712 | * "required-opps" property of the @src_opp (present in @src_table). |
2713 | * |
2714 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
2715 | * use. |
2716 | * |
2717 | * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise. |
2718 | */ |
2719 | struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, |
2720 | struct opp_table *dst_table, |
2721 | struct dev_pm_opp *src_opp) |
2722 | { |
2723 | struct dev_pm_opp *opp, *dest_opp = ERR_PTR(error: -ENODEV); |
2724 | int i; |
2725 | |
2726 | if (!src_table || !dst_table || !src_opp || |
2727 | !src_table->required_opp_tables) |
2728 | return ERR_PTR(error: -EINVAL); |
2729 | |
2730 | /* required-opps not fully initialized yet */ |
2731 | if (lazy_linking_pending(opp_table: src_table)) |
2732 | return ERR_PTR(error: -EBUSY); |
2733 | |
2734 | for (i = 0; i < src_table->required_opp_count; i++) { |
2735 | if (src_table->required_opp_tables[i] == dst_table) { |
2736 | mutex_lock(&src_table->lock); |
2737 | |
2738 | list_for_each_entry(opp, &src_table->opp_list, node) { |
2739 | if (opp == src_opp) { |
2740 | dest_opp = opp->required_opps[i]; |
2741 | dev_pm_opp_get(opp: dest_opp); |
2742 | break; |
2743 | } |
2744 | } |
2745 | |
2746 | mutex_unlock(lock: &src_table->lock); |
2747 | break; |
2748 | } |
2749 | } |
2750 | |
2751 | if (IS_ERR(ptr: dest_opp)) { |
2752 | pr_err("%s: Couldn't find matching OPP (%p: %p)\n" , __func__, |
2753 | src_table, dst_table); |
2754 | } |
2755 | |
2756 | return dest_opp; |
2757 | } |
2758 | EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp); |
2759 | |
2760 | /** |
2761 | * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. |
2762 | * @src_table: OPP table which has dst_table as one of its required OPP table. |
2763 | * @dst_table: Required OPP table of the src_table. |
2764 | * @pstate: Current performance state of the src_table. |
2765 | * |
2766 | * This Returns pstate of the OPP (present in @dst_table) pointed out by the |
2767 | * "required-opps" property of the OPP (present in @src_table) which has |
2768 | * performance state set to @pstate. |
2769 | * |
2770 | * Return: Zero or positive performance state on success, otherwise negative |
2771 | * value on errors. |
2772 | */ |
2773 | int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, |
2774 | struct opp_table *dst_table, |
2775 | unsigned int pstate) |
2776 | { |
2777 | struct dev_pm_opp *opp; |
2778 | int dest_pstate = -EINVAL; |
2779 | int i; |
2780 | |
2781 | /* |
2782 | * Normally the src_table will have the "required_opps" property set to |
2783 | * point to one of the OPPs in the dst_table, but in some cases the |
2784 | * genpd and its master have one to one mapping of performance states |
2785 | * and so none of them have the "required-opps" property set. Return the |
2786 | * pstate of the src_table as it is in such cases. |
2787 | */ |
2788 | if (!src_table || !src_table->required_opp_count) |
2789 | return pstate; |
2790 | |
2791 | /* Both OPP tables must belong to genpds */ |
2792 | if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) { |
2793 | pr_err("%s: Performance state is only valid for genpds.\n" , __func__); |
2794 | return -EINVAL; |
2795 | } |
2796 | |
2797 | /* required-opps not fully initialized yet */ |
2798 | if (lazy_linking_pending(opp_table: src_table)) |
2799 | return -EBUSY; |
2800 | |
2801 | for (i = 0; i < src_table->required_opp_count; i++) { |
2802 | if (src_table->required_opp_tables[i]->np == dst_table->np) |
2803 | break; |
2804 | } |
2805 | |
2806 | if (unlikely(i == src_table->required_opp_count)) { |
2807 | pr_err("%s: Couldn't find matching OPP table (%p: %p)\n" , |
2808 | __func__, src_table, dst_table); |
2809 | return -EINVAL; |
2810 | } |
2811 | |
2812 | mutex_lock(&src_table->lock); |
2813 | |
2814 | list_for_each_entry(opp, &src_table->opp_list, node) { |
2815 | if (opp->level == pstate) { |
2816 | dest_pstate = opp->required_opps[i]->level; |
2817 | goto unlock; |
2818 | } |
2819 | } |
2820 | |
2821 | pr_err("%s: Couldn't find matching OPP (%p: %p)\n" , __func__, src_table, |
2822 | dst_table); |
2823 | |
2824 | unlock: |
2825 | mutex_unlock(lock: &src_table->lock); |
2826 | |
2827 | return dest_pstate; |
2828 | } |
2829 | |
2830 | /** |
2831 | * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions |
2832 | * @dev: The device for which we do this operation |
2833 | * @data: The OPP data for the OPP to add |
2834 | * |
2835 | * This function adds an opp definition to the opp table and returns status. |
2836 | * The opp is made available by default and it can be controlled using |
2837 | * dev_pm_opp_enable/disable functions. |
2838 | * |
2839 | * Return: |
2840 | * 0 On success OR |
2841 | * Duplicate OPPs (both freq and volt are same) and opp->available |
2842 | * -EEXIST Freq are same and volt are different OR |
2843 | * Duplicate OPPs (both freq and volt are same) and !opp->available |
2844 | * -ENOMEM Memory allocation failure |
2845 | */ |
2846 | int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data) |
2847 | { |
2848 | struct opp_table *opp_table; |
2849 | int ret; |
2850 | |
2851 | opp_table = _add_opp_table(dev, getclk: true); |
2852 | if (IS_ERR(ptr: opp_table)) |
2853 | return PTR_ERR(ptr: opp_table); |
2854 | |
2855 | /* Fix regulator count for dynamic OPPs */ |
2856 | opp_table->regulator_count = 1; |
2857 | |
2858 | ret = _opp_add_v1(opp_table, dev, data, dynamic: true); |
2859 | if (ret) |
2860 | dev_pm_opp_put_opp_table(opp_table); |
2861 | |
2862 | return ret; |
2863 | } |
2864 | EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); |
2865 | |
2866 | /** |
2867 | * _opp_set_availability() - helper to set the availability of an opp |
2868 | * @dev: device for which we do this operation |
2869 | * @freq: OPP frequency to modify availability |
2870 | * @availability_req: availability status requested for this opp |
2871 | * |
2872 | * Set the availability of an OPP, opp_{enable,disable} share a common logic |
2873 | * which is isolated here. |
2874 | * |
2875 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
2876 | * copy operation, returns 0 if no modification was done OR modification was |
2877 | * successful. |
2878 | */ |
2879 | static int _opp_set_availability(struct device *dev, unsigned long freq, |
2880 | bool availability_req) |
2881 | { |
2882 | struct opp_table *opp_table; |
2883 | struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(error: -ENODEV); |
2884 | int r = 0; |
2885 | |
2886 | /* Find the opp_table */ |
2887 | opp_table = _find_opp_table(dev); |
2888 | if (IS_ERR(ptr: opp_table)) { |
2889 | r = PTR_ERR(ptr: opp_table); |
2890 | dev_warn(dev, "%s: Device OPP not found (%d)\n" , __func__, r); |
2891 | return r; |
2892 | } |
2893 | |
2894 | if (!assert_single_clk(opp_table)) { |
2895 | r = -EINVAL; |
2896 | goto put_table; |
2897 | } |
2898 | |
2899 | mutex_lock(&opp_table->lock); |
2900 | |
2901 | /* Do we have the frequency? */ |
2902 | list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { |
2903 | if (tmp_opp->rates[0] == freq) { |
2904 | opp = tmp_opp; |
2905 | break; |
2906 | } |
2907 | } |
2908 | |
2909 | if (IS_ERR(ptr: opp)) { |
2910 | r = PTR_ERR(ptr: opp); |
2911 | goto unlock; |
2912 | } |
2913 | |
2914 | /* Is update really needed? */ |
2915 | if (opp->available == availability_req) |
2916 | goto unlock; |
2917 | |
2918 | opp->available = availability_req; |
2919 | |
2920 | dev_pm_opp_get(opp); |
2921 | mutex_unlock(lock: &opp_table->lock); |
2922 | |
2923 | /* Notify the change of the OPP availability */ |
2924 | if (availability_req) |
2925 | blocking_notifier_call_chain(nh: &opp_table->head, val: OPP_EVENT_ENABLE, |
2926 | v: opp); |
2927 | else |
2928 | blocking_notifier_call_chain(nh: &opp_table->head, |
2929 | val: OPP_EVENT_DISABLE, v: opp); |
2930 | |
2931 | dev_pm_opp_put(opp); |
2932 | goto put_table; |
2933 | |
2934 | unlock: |
2935 | mutex_unlock(lock: &opp_table->lock); |
2936 | put_table: |
2937 | dev_pm_opp_put_opp_table(opp_table); |
2938 | return r; |
2939 | } |
2940 | |
2941 | /** |
2942 | * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP |
2943 | * @dev: device for which we do this operation |
2944 | * @freq: OPP frequency to adjust voltage of |
2945 | * @u_volt: new OPP target voltage |
2946 | * @u_volt_min: new OPP min voltage |
2947 | * @u_volt_max: new OPP max voltage |
2948 | * |
2949 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
2950 | * copy operation, returns 0 if no modifcation was done OR modification was |
2951 | * successful. |
2952 | */ |
2953 | int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, |
2954 | unsigned long u_volt, unsigned long u_volt_min, |
2955 | unsigned long u_volt_max) |
2956 | |
2957 | { |
2958 | struct opp_table *opp_table; |
2959 | struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(error: -ENODEV); |
2960 | int r = 0; |
2961 | |
2962 | /* Find the opp_table */ |
2963 | opp_table = _find_opp_table(dev); |
2964 | if (IS_ERR(ptr: opp_table)) { |
2965 | r = PTR_ERR(ptr: opp_table); |
2966 | dev_warn(dev, "%s: Device OPP not found (%d)\n" , __func__, r); |
2967 | return r; |
2968 | } |
2969 | |
2970 | if (!assert_single_clk(opp_table)) { |
2971 | r = -EINVAL; |
2972 | goto put_table; |
2973 | } |
2974 | |
2975 | mutex_lock(&opp_table->lock); |
2976 | |
2977 | /* Do we have the frequency? */ |
2978 | list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { |
2979 | if (tmp_opp->rates[0] == freq) { |
2980 | opp = tmp_opp; |
2981 | break; |
2982 | } |
2983 | } |
2984 | |
2985 | if (IS_ERR(ptr: opp)) { |
2986 | r = PTR_ERR(ptr: opp); |
2987 | goto adjust_unlock; |
2988 | } |
2989 | |
2990 | /* Is update really needed? */ |
2991 | if (opp->supplies->u_volt == u_volt) |
2992 | goto adjust_unlock; |
2993 | |
2994 | opp->supplies->u_volt = u_volt; |
2995 | opp->supplies->u_volt_min = u_volt_min; |
2996 | opp->supplies->u_volt_max = u_volt_max; |
2997 | |
2998 | dev_pm_opp_get(opp); |
2999 | mutex_unlock(lock: &opp_table->lock); |
3000 | |
3001 | /* Notify the voltage change of the OPP */ |
3002 | blocking_notifier_call_chain(nh: &opp_table->head, val: OPP_EVENT_ADJUST_VOLTAGE, |
3003 | v: opp); |
3004 | |
3005 | dev_pm_opp_put(opp); |
3006 | goto put_table; |
3007 | |
3008 | adjust_unlock: |
3009 | mutex_unlock(lock: &opp_table->lock); |
3010 | put_table: |
3011 | dev_pm_opp_put_opp_table(opp_table); |
3012 | return r; |
3013 | } |
3014 | EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); |
3015 | |
3016 | /** |
3017 | * dev_pm_opp_sync_regulators() - Sync state of voltage regulators |
3018 | * @dev: device for which we do this operation |
3019 | * |
3020 | * Sync voltage state of the OPP table regulators. |
3021 | * |
3022 | * Return: 0 on success or a negative error value. |
3023 | */ |
3024 | int dev_pm_opp_sync_regulators(struct device *dev) |
3025 | { |
3026 | struct opp_table *opp_table; |
3027 | struct regulator *reg; |
3028 | int i, ret = 0; |
3029 | |
3030 | /* Device may not have OPP table */ |
3031 | opp_table = _find_opp_table(dev); |
3032 | if (IS_ERR(ptr: opp_table)) |
3033 | return 0; |
3034 | |
3035 | /* Regulator may not be required for the device */ |
3036 | if (unlikely(!opp_table->regulators)) |
3037 | goto put_table; |
3038 | |
3039 | /* Nothing to sync if voltage wasn't changed */ |
3040 | if (!opp_table->enabled) |
3041 | goto put_table; |
3042 | |
3043 | for (i = 0; i < opp_table->regulator_count; i++) { |
3044 | reg = opp_table->regulators[i]; |
3045 | ret = regulator_sync_voltage(regulator: reg); |
3046 | if (ret) |
3047 | break; |
3048 | } |
3049 | put_table: |
3050 | /* Drop reference taken by _find_opp_table() */ |
3051 | dev_pm_opp_put_opp_table(opp_table); |
3052 | |
3053 | return ret; |
3054 | } |
3055 | EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); |
3056 | |
3057 | /** |
3058 | * dev_pm_opp_enable() - Enable a specific OPP |
3059 | * @dev: device for which we do this operation |
3060 | * @freq: OPP frequency to enable |
3061 | * |
3062 | * Enables a provided opp. If the operation is valid, this returns 0, else the |
3063 | * corresponding error value. It is meant to be used for users an OPP available |
3064 | * after being temporarily made unavailable with dev_pm_opp_disable. |
3065 | * |
3066 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
3067 | * copy operation, returns 0 if no modification was done OR modification was |
3068 | * successful. |
3069 | */ |
3070 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
3071 | { |
3072 | return _opp_set_availability(dev, freq, availability_req: true); |
3073 | } |
3074 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); |
3075 | |
3076 | /** |
3077 | * dev_pm_opp_disable() - Disable a specific OPP |
3078 | * @dev: device for which we do this operation |
3079 | * @freq: OPP frequency to disable |
3080 | * |
3081 | * Disables a provided opp. If the operation is valid, this returns |
3082 | * 0, else the corresponding error value. It is meant to be a temporary |
3083 | * control by users to make this OPP not available until the circumstances are |
3084 | * right to make it available again (with a call to dev_pm_opp_enable). |
3085 | * |
3086 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
3087 | * copy operation, returns 0 if no modification was done OR modification was |
3088 | * successful. |
3089 | */ |
3090 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
3091 | { |
3092 | return _opp_set_availability(dev, freq, availability_req: false); |
3093 | } |
3094 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
3095 | |
3096 | /** |
3097 | * dev_pm_opp_register_notifier() - Register OPP notifier for the device |
3098 | * @dev: Device for which notifier needs to be registered |
3099 | * @nb: Notifier block to be registered |
3100 | * |
3101 | * Return: 0 on success or a negative error value. |
3102 | */ |
3103 | int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) |
3104 | { |
3105 | struct opp_table *opp_table; |
3106 | int ret; |
3107 | |
3108 | opp_table = _find_opp_table(dev); |
3109 | if (IS_ERR(ptr: opp_table)) |
3110 | return PTR_ERR(ptr: opp_table); |
3111 | |
3112 | ret = blocking_notifier_chain_register(nh: &opp_table->head, nb); |
3113 | |
3114 | dev_pm_opp_put_opp_table(opp_table); |
3115 | |
3116 | return ret; |
3117 | } |
3118 | EXPORT_SYMBOL(dev_pm_opp_register_notifier); |
3119 | |
3120 | /** |
3121 | * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device |
3122 | * @dev: Device for which notifier needs to be unregistered |
3123 | * @nb: Notifier block to be unregistered |
3124 | * |
3125 | * Return: 0 on success or a negative error value. |
3126 | */ |
3127 | int dev_pm_opp_unregister_notifier(struct device *dev, |
3128 | struct notifier_block *nb) |
3129 | { |
3130 | struct opp_table *opp_table; |
3131 | int ret; |
3132 | |
3133 | opp_table = _find_opp_table(dev); |
3134 | if (IS_ERR(ptr: opp_table)) |
3135 | return PTR_ERR(ptr: opp_table); |
3136 | |
3137 | ret = blocking_notifier_chain_unregister(nh: &opp_table->head, nb); |
3138 | |
3139 | dev_pm_opp_put_opp_table(opp_table); |
3140 | |
3141 | return ret; |
3142 | } |
3143 | EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); |
3144 | |
3145 | /** |
3146 | * dev_pm_opp_remove_table() - Free all OPPs associated with the device |
3147 | * @dev: device pointer used to lookup OPP table. |
3148 | * |
3149 | * Free both OPPs created using static entries present in DT and the |
3150 | * dynamically added entries. |
3151 | */ |
3152 | void dev_pm_opp_remove_table(struct device *dev) |
3153 | { |
3154 | struct opp_table *opp_table; |
3155 | |
3156 | /* Check for existing table for 'dev' */ |
3157 | opp_table = _find_opp_table(dev); |
3158 | if (IS_ERR(ptr: opp_table)) { |
3159 | int error = PTR_ERR(ptr: opp_table); |
3160 | |
3161 | if (error != -ENODEV) |
3162 | WARN(1, "%s: opp_table: %d\n" , |
3163 | IS_ERR_OR_NULL(dev) ? |
3164 | "Invalid device" : dev_name(dev), |
3165 | error); |
3166 | return; |
3167 | } |
3168 | |
3169 | /* |
3170 | * Drop the extra reference only if the OPP table was successfully added |
3171 | * with dev_pm_opp_of_add_table() earlier. |
3172 | **/ |
3173 | if (_opp_remove_all_static(opp_table)) |
3174 | dev_pm_opp_put_opp_table(opp_table); |
3175 | |
3176 | /* Drop reference taken by _find_opp_table() */ |
3177 | dev_pm_opp_put_opp_table(opp_table); |
3178 | } |
3179 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); |
3180 | |