1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/include/linux/clk.h |
4 | * |
5 | * Copyright (C) 2004 ARM Limited. |
6 | * Written by Deep Blue Solutions Limited. |
7 | * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> |
8 | */ |
9 | #ifndef __LINUX_CLK_H |
10 | #define __LINUX_CLK_H |
11 | |
12 | #include <linux/err.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/notifier.h> |
15 | |
16 | struct device; |
17 | struct clk; |
18 | struct device_node; |
19 | struct of_phandle_args; |
20 | |
21 | /** |
22 | * DOC: clk notifier callback types |
23 | * |
24 | * PRE_RATE_CHANGE - called immediately before the clk rate is changed, |
25 | * to indicate that the rate change will proceed. Drivers must |
26 | * immediately terminate any operations that will be affected by the |
27 | * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, |
28 | * NOTIFY_STOP or NOTIFY_BAD. |
29 | * |
30 | * ABORT_RATE_CHANGE: called if the rate change failed for some reason |
31 | * after PRE_RATE_CHANGE. In this case, all registered notifiers on |
32 | * the clk will be called with ABORT_RATE_CHANGE. Callbacks must |
33 | * always return NOTIFY_DONE or NOTIFY_OK. |
34 | * |
35 | * POST_RATE_CHANGE - called after the clk rate change has successfully |
36 | * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. |
37 | * |
38 | */ |
39 | #define PRE_RATE_CHANGE BIT(0) |
40 | #define POST_RATE_CHANGE BIT(1) |
41 | #define ABORT_RATE_CHANGE BIT(2) |
42 | |
43 | /** |
44 | * struct clk_notifier - associate a clk with a notifier |
45 | * @clk: struct clk * to associate the notifier with |
46 | * @notifier_head: a blocking_notifier_head for this clk |
47 | * @node: linked list pointers |
48 | * |
49 | * A list of struct clk_notifier is maintained by the notifier code. |
50 | * An entry is created whenever code registers the first notifier on a |
51 | * particular @clk. Future notifiers on that @clk are added to the |
52 | * @notifier_head. |
53 | */ |
54 | struct clk_notifier { |
55 | struct clk *clk; |
56 | struct srcu_notifier_head notifier_head; |
57 | struct list_head node; |
58 | }; |
59 | |
60 | /** |
61 | * struct clk_notifier_data - rate data to pass to the notifier callback |
62 | * @clk: struct clk * being changed |
63 | * @old_rate: previous rate of this clk |
64 | * @new_rate: new rate of this clk |
65 | * |
66 | * For a pre-notifier, old_rate is the clk's rate before this rate |
67 | * change, and new_rate is what the rate will be in the future. For a |
68 | * post-notifier, old_rate and new_rate are both set to the clk's |
69 | * current rate (this was done to optimize the implementation). |
70 | */ |
71 | struct clk_notifier_data { |
72 | struct clk *clk; |
73 | unsigned long old_rate; |
74 | unsigned long new_rate; |
75 | }; |
76 | |
77 | /** |
78 | * struct clk_bulk_data - Data used for bulk clk operations. |
79 | * |
80 | * @id: clock consumer ID |
81 | * @clk: struct clk * to store the associated clock |
82 | * |
83 | * The CLK APIs provide a series of clk_bulk_() API calls as |
84 | * a convenience to consumers which require multiple clks. This |
85 | * structure is used to manage data for these calls. |
86 | */ |
87 | struct clk_bulk_data { |
88 | const char *id; |
89 | struct clk *clk; |
90 | }; |
91 | |
92 | #ifdef CONFIG_COMMON_CLK |
93 | |
94 | /** |
95 | * clk_notifier_register - register a clock rate-change notifier callback |
96 | * @clk: clock whose rate we are interested in |
97 | * @nb: notifier block with callback function pointer |
98 | * |
99 | * ProTip: debugging across notifier chains can be frustrating. Make sure that |
100 | * your notifier callback function prints a nice big warning in case of |
101 | * failure. |
102 | */ |
103 | int clk_notifier_register(struct clk *clk, struct notifier_block *nb); |
104 | |
105 | /** |
106 | * clk_notifier_unregister - unregister a clock rate-change notifier callback |
107 | * @clk: clock whose rate we are no longer interested in |
108 | * @nb: notifier block which will be unregistered |
109 | */ |
110 | int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); |
111 | |
112 | /** |
113 | * devm_clk_notifier_register - register a managed rate-change notifier callback |
114 | * @dev: device for clock "consumer" |
115 | * @clk: clock whose rate we are interested in |
116 | * @nb: notifier block with callback function pointer |
117 | * |
118 | * Returns 0 on success, -EERROR otherwise |
119 | */ |
120 | int devm_clk_notifier_register(struct device *dev, struct clk *clk, |
121 | struct notifier_block *nb); |
122 | |
123 | /** |
124 | * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) |
125 | * for a clock source. |
126 | * @clk: clock source |
127 | * |
128 | * This gets the clock source accuracy expressed in ppb. |
129 | * A perfect clock returns 0. |
130 | */ |
131 | long clk_get_accuracy(struct clk *clk); |
132 | |
133 | /** |
134 | * clk_set_phase - adjust the phase shift of a clock signal |
135 | * @clk: clock signal source |
136 | * @degrees: number of degrees the signal is shifted |
137 | * |
138 | * Shifts the phase of a clock signal by the specified degrees. Returns 0 on |
139 | * success, -EERROR otherwise. |
140 | */ |
141 | int clk_set_phase(struct clk *clk, int degrees); |
142 | |
143 | /** |
144 | * clk_get_phase - return the phase shift of a clock signal |
145 | * @clk: clock signal source |
146 | * |
147 | * Returns the phase shift of a clock node in degrees, otherwise returns |
148 | * -EERROR. |
149 | */ |
150 | int clk_get_phase(struct clk *clk); |
151 | |
152 | /** |
153 | * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal |
154 | * @clk: clock signal source |
155 | * @num: numerator of the duty cycle ratio to be applied |
156 | * @den: denominator of the duty cycle ratio to be applied |
157 | * |
158 | * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on |
159 | * success, -EERROR otherwise. |
160 | */ |
161 | int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); |
162 | |
163 | /** |
164 | * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal |
165 | * @clk: clock signal source |
166 | * @scale: scaling factor to be applied to represent the ratio as an integer |
167 | * |
168 | * Returns the duty cycle ratio multiplied by the scale provided, otherwise |
169 | * returns -EERROR. |
170 | */ |
171 | int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); |
172 | |
173 | /** |
174 | * clk_is_match - check if two clk's point to the same hardware clock |
175 | * @p: clk compared against q |
176 | * @q: clk compared against p |
177 | * |
178 | * Returns true if the two struct clk pointers both point to the same hardware |
179 | * clock node. Put differently, returns true if @p and @q |
180 | * share the same &struct clk_core object. |
181 | * |
182 | * Returns false otherwise. Note that two NULL clks are treated as matching. |
183 | */ |
184 | bool clk_is_match(const struct clk *p, const struct clk *q); |
185 | |
186 | /** |
187 | * clk_rate_exclusive_get - get exclusivity over the rate control of a |
188 | * producer |
189 | * @clk: clock source |
190 | * |
191 | * This function allows drivers to get exclusive control over the rate of a |
192 | * provider. It prevents any other consumer to execute, even indirectly, |
193 | * opereation which could alter the rate of the provider or cause glitches |
194 | * |
195 | * If exlusivity is claimed more than once on clock, even by the same driver, |
196 | * the rate effectively gets locked as exclusivity can't be preempted. |
197 | * |
198 | * Must not be called from within atomic context. |
199 | * |
200 | * Returns success (0) or negative errno. |
201 | */ |
202 | int clk_rate_exclusive_get(struct clk *clk); |
203 | |
204 | /** |
205 | * clk_rate_exclusive_put - release exclusivity over the rate control of a |
206 | * producer |
207 | * @clk: clock source |
208 | * |
209 | * This function allows drivers to release the exclusivity it previously got |
210 | * from clk_rate_exclusive_get() |
211 | * |
212 | * The caller must balance the number of clk_rate_exclusive_get() and |
213 | * clk_rate_exclusive_put() calls. |
214 | * |
215 | * Must not be called from within atomic context. |
216 | */ |
217 | void clk_rate_exclusive_put(struct clk *clk); |
218 | |
219 | #else |
220 | |
221 | static inline int clk_notifier_register(struct clk *clk, |
222 | struct notifier_block *nb) |
223 | { |
224 | return -ENOTSUPP; |
225 | } |
226 | |
227 | static inline int clk_notifier_unregister(struct clk *clk, |
228 | struct notifier_block *nb) |
229 | { |
230 | return -ENOTSUPP; |
231 | } |
232 | |
233 | static inline int devm_clk_notifier_register(struct device *dev, |
234 | struct clk *clk, |
235 | struct notifier_block *nb) |
236 | { |
237 | return -ENOTSUPP; |
238 | } |
239 | |
240 | static inline long clk_get_accuracy(struct clk *clk) |
241 | { |
242 | return -ENOTSUPP; |
243 | } |
244 | |
245 | static inline long clk_set_phase(struct clk *clk, int phase) |
246 | { |
247 | return -ENOTSUPP; |
248 | } |
249 | |
250 | static inline long clk_get_phase(struct clk *clk) |
251 | { |
252 | return -ENOTSUPP; |
253 | } |
254 | |
255 | static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, |
256 | unsigned int den) |
257 | { |
258 | return -ENOTSUPP; |
259 | } |
260 | |
261 | static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, |
262 | unsigned int scale) |
263 | { |
264 | return 0; |
265 | } |
266 | |
267 | static inline bool clk_is_match(const struct clk *p, const struct clk *q) |
268 | { |
269 | return p == q; |
270 | } |
271 | |
272 | static inline int clk_rate_exclusive_get(struct clk *clk) |
273 | { |
274 | return 0; |
275 | } |
276 | |
277 | static inline void clk_rate_exclusive_put(struct clk *clk) {} |
278 | |
279 | #endif |
280 | |
281 | #ifdef CONFIG_HAVE_CLK_PREPARE |
282 | /** |
283 | * clk_prepare - prepare a clock source |
284 | * @clk: clock source |
285 | * |
286 | * This prepares the clock source for use. |
287 | * |
288 | * Must not be called from within atomic context. |
289 | */ |
290 | int clk_prepare(struct clk *clk); |
291 | int __must_check clk_bulk_prepare(int num_clks, |
292 | const struct clk_bulk_data *clks); |
293 | |
294 | /** |
295 | * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. |
296 | * @clk: clock source |
297 | * |
298 | * Returns true if clk_prepare() implicitly enables the clock, effectively |
299 | * making clk_enable()/clk_disable() no-ops, false otherwise. |
300 | * |
301 | * This is of interest mainly to the power management code where actually |
302 | * disabling the clock also requires unpreparing it to have any material |
303 | * effect. |
304 | * |
305 | * Regardless of the value returned here, the caller must always invoke |
306 | * clk_enable() or clk_prepare_enable() and counterparts for usage counts |
307 | * to be right. |
308 | */ |
309 | bool clk_is_enabled_when_prepared(struct clk *clk); |
310 | #else |
311 | static inline int clk_prepare(struct clk *clk) |
312 | { |
313 | might_sleep(); |
314 | return 0; |
315 | } |
316 | |
317 | static inline int __must_check |
318 | clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) |
319 | { |
320 | might_sleep(); |
321 | return 0; |
322 | } |
323 | |
324 | static inline bool clk_is_enabled_when_prepared(struct clk *clk) |
325 | { |
326 | return false; |
327 | } |
328 | #endif |
329 | |
330 | /** |
331 | * clk_unprepare - undo preparation of a clock source |
332 | * @clk: clock source |
333 | * |
334 | * This undoes a previously prepared clock. The caller must balance |
335 | * the number of prepare and unprepare calls. |
336 | * |
337 | * Must not be called from within atomic context. |
338 | */ |
339 | #ifdef CONFIG_HAVE_CLK_PREPARE |
340 | void clk_unprepare(struct clk *clk); |
341 | void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); |
342 | #else |
343 | static inline void clk_unprepare(struct clk *clk) |
344 | { |
345 | might_sleep(); |
346 | } |
347 | static inline void clk_bulk_unprepare(int num_clks, |
348 | const struct clk_bulk_data *clks) |
349 | { |
350 | might_sleep(); |
351 | } |
352 | #endif |
353 | |
354 | #ifdef CONFIG_HAVE_CLK |
355 | /** |
356 | * clk_get - lookup and obtain a reference to a clock producer. |
357 | * @dev: device for clock "consumer" |
358 | * @id: clock consumer ID |
359 | * |
360 | * Returns a struct clk corresponding to the clock producer, or |
361 | * valid IS_ERR() condition containing errno. The implementation |
362 | * uses @dev and @id to determine the clock consumer, and thereby |
363 | * the clock producer. (IOW, @id may be identical strings, but |
364 | * clk_get may return different clock producers depending on @dev.) |
365 | * |
366 | * Drivers must assume that the clock source is not enabled. |
367 | * |
368 | * clk_get should not be called from within interrupt context. |
369 | */ |
370 | struct clk *clk_get(struct device *dev, const char *id); |
371 | |
372 | /** |
373 | * clk_bulk_get - lookup and obtain a number of references to clock producer. |
374 | * @dev: device for clock "consumer" |
375 | * @num_clks: the number of clk_bulk_data |
376 | * @clks: the clk_bulk_data table of consumer |
377 | * |
378 | * This helper function allows drivers to get several clk consumers in one |
379 | * operation. If any of the clk cannot be acquired then any clks |
380 | * that were obtained will be freed before returning to the caller. |
381 | * |
382 | * Returns 0 if all clocks specified in clk_bulk_data table are obtained |
383 | * successfully, or valid IS_ERR() condition containing errno. |
384 | * The implementation uses @dev and @clk_bulk_data.id to determine the |
385 | * clock consumer, and thereby the clock producer. |
386 | * The clock returned is stored in each @clk_bulk_data.clk field. |
387 | * |
388 | * Drivers must assume that the clock source is not enabled. |
389 | * |
390 | * clk_bulk_get should not be called from within interrupt context. |
391 | */ |
392 | int __must_check clk_bulk_get(struct device *dev, int num_clks, |
393 | struct clk_bulk_data *clks); |
394 | /** |
395 | * clk_bulk_get_all - lookup and obtain all available references to clock |
396 | * producer. |
397 | * @dev: device for clock "consumer" |
398 | * @clks: pointer to the clk_bulk_data table of consumer |
399 | * |
400 | * This helper function allows drivers to get all clk consumers in one |
401 | * operation. If any of the clk cannot be acquired then any clks |
402 | * that were obtained will be freed before returning to the caller. |
403 | * |
404 | * Returns a positive value for the number of clocks obtained while the |
405 | * clock references are stored in the clk_bulk_data table in @clks field. |
406 | * Returns 0 if there're none and a negative value if something failed. |
407 | * |
408 | * Drivers must assume that the clock source is not enabled. |
409 | * |
410 | * clk_bulk_get should not be called from within interrupt context. |
411 | */ |
412 | int __must_check clk_bulk_get_all(struct device *dev, |
413 | struct clk_bulk_data **clks); |
414 | |
415 | /** |
416 | * clk_bulk_get_optional - lookup and obtain a number of references to clock producer |
417 | * @dev: device for clock "consumer" |
418 | * @num_clks: the number of clk_bulk_data |
419 | * @clks: the clk_bulk_data table of consumer |
420 | * |
421 | * Behaves the same as clk_bulk_get() except where there is no clock producer. |
422 | * In this case, instead of returning -ENOENT, the function returns 0 and |
423 | * NULL for a clk for which a clock producer could not be determined. |
424 | */ |
425 | int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, |
426 | struct clk_bulk_data *clks); |
427 | /** |
428 | * devm_clk_bulk_get - managed get multiple clk consumers |
429 | * @dev: device for clock "consumer" |
430 | * @num_clks: the number of clk_bulk_data |
431 | * @clks: the clk_bulk_data table of consumer |
432 | * |
433 | * Return 0 on success, an errno on failure. |
434 | * |
435 | * This helper function allows drivers to get several clk |
436 | * consumers in one operation with management, the clks will |
437 | * automatically be freed when the device is unbound. |
438 | */ |
439 | int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, |
440 | struct clk_bulk_data *clks); |
441 | /** |
442 | * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks |
443 | * @dev: device for clock "consumer" |
444 | * @num_clks: the number of clk_bulk_data |
445 | * @clks: pointer to the clk_bulk_data table of consumer |
446 | * |
447 | * Behaves the same as devm_clk_bulk_get() except where there is no clock |
448 | * producer. In this case, instead of returning -ENOENT, the function returns |
449 | * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. |
450 | * |
451 | * Returns 0 if all clocks specified in clk_bulk_data table are obtained |
452 | * successfully or for any clk there was no clk provider available, otherwise |
453 | * returns valid IS_ERR() condition containing errno. |
454 | * The implementation uses @dev and @clk_bulk_data.id to determine the |
455 | * clock consumer, and thereby the clock producer. |
456 | * The clock returned is stored in each @clk_bulk_data.clk field. |
457 | * |
458 | * Drivers must assume that the clock source is not enabled. |
459 | * |
460 | * clk_bulk_get should not be called from within interrupt context. |
461 | */ |
462 | int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, |
463 | struct clk_bulk_data *clks); |
464 | /** |
465 | * devm_clk_bulk_get_all - managed get multiple clk consumers |
466 | * @dev: device for clock "consumer" |
467 | * @clks: pointer to the clk_bulk_data table of consumer |
468 | * |
469 | * Returns a positive value for the number of clocks obtained while the |
470 | * clock references are stored in the clk_bulk_data table in @clks field. |
471 | * Returns 0 if there're none and a negative value if something failed. |
472 | * |
473 | * This helper function allows drivers to get several clk |
474 | * consumers in one operation with management, the clks will |
475 | * automatically be freed when the device is unbound. |
476 | */ |
477 | |
478 | int __must_check devm_clk_bulk_get_all(struct device *dev, |
479 | struct clk_bulk_data **clks); |
480 | |
481 | /** |
482 | * devm_clk_get - lookup and obtain a managed reference to a clock producer. |
483 | * @dev: device for clock "consumer" |
484 | * @id: clock consumer ID |
485 | * |
486 | * Context: May sleep. |
487 | * |
488 | * Return: a struct clk corresponding to the clock producer, or |
489 | * valid IS_ERR() condition containing errno. The implementation |
490 | * uses @dev and @id to determine the clock consumer, and thereby |
491 | * the clock producer. (IOW, @id may be identical strings, but |
492 | * clk_get may return different clock producers depending on @dev.) |
493 | * |
494 | * Drivers must assume that the clock source is neither prepared nor |
495 | * enabled. |
496 | * |
497 | * The clock will automatically be freed when the device is unbound |
498 | * from the bus. |
499 | */ |
500 | struct clk *devm_clk_get(struct device *dev, const char *id); |
501 | |
502 | /** |
503 | * devm_clk_get_prepared - devm_clk_get() + clk_prepare() |
504 | * @dev: device for clock "consumer" |
505 | * @id: clock consumer ID |
506 | * |
507 | * Context: May sleep. |
508 | * |
509 | * Return: a struct clk corresponding to the clock producer, or |
510 | * valid IS_ERR() condition containing errno. The implementation |
511 | * uses @dev and @id to determine the clock consumer, and thereby |
512 | * the clock producer. (IOW, @id may be identical strings, but |
513 | * clk_get may return different clock producers depending on @dev.) |
514 | * |
515 | * The returned clk (if valid) is prepared. Drivers must however assume |
516 | * that the clock is not enabled. |
517 | * |
518 | * The clock will automatically be unprepared and freed when the device |
519 | * is unbound from the bus. |
520 | */ |
521 | struct clk *devm_clk_get_prepared(struct device *dev, const char *id); |
522 | |
523 | /** |
524 | * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() |
525 | * @dev: device for clock "consumer" |
526 | * @id: clock consumer ID |
527 | * |
528 | * Context: May sleep. |
529 | * |
530 | * Return: a struct clk corresponding to the clock producer, or |
531 | * valid IS_ERR() condition containing errno. The implementation |
532 | * uses @dev and @id to determine the clock consumer, and thereby |
533 | * the clock producer. (IOW, @id may be identical strings, but |
534 | * clk_get may return different clock producers depending on @dev.) |
535 | * |
536 | * The returned clk (if valid) is prepared and enabled. |
537 | * |
538 | * The clock will automatically be disabled, unprepared and freed |
539 | * when the device is unbound from the bus. |
540 | */ |
541 | struct clk *devm_clk_get_enabled(struct device *dev, const char *id); |
542 | |
543 | /** |
544 | * devm_clk_get_optional - lookup and obtain a managed reference to an optional |
545 | * clock producer. |
546 | * @dev: device for clock "consumer" |
547 | * @id: clock consumer ID |
548 | * |
549 | * Context: May sleep. |
550 | * |
551 | * Return: a struct clk corresponding to the clock producer, or |
552 | * valid IS_ERR() condition containing errno. The implementation |
553 | * uses @dev and @id to determine the clock consumer, and thereby |
554 | * the clock producer. If no such clk is found, it returns NULL |
555 | * which serves as a dummy clk. That's the only difference compared |
556 | * to devm_clk_get(). |
557 | * |
558 | * Drivers must assume that the clock source is neither prepared nor |
559 | * enabled. |
560 | * |
561 | * The clock will automatically be freed when the device is unbound |
562 | * from the bus. |
563 | */ |
564 | struct clk *devm_clk_get_optional(struct device *dev, const char *id); |
565 | |
566 | /** |
567 | * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() |
568 | * @dev: device for clock "consumer" |
569 | * @id: clock consumer ID |
570 | * |
571 | * Context: May sleep. |
572 | * |
573 | * Return: a struct clk corresponding to the clock producer, or |
574 | * valid IS_ERR() condition containing errno. The implementation |
575 | * uses @dev and @id to determine the clock consumer, and thereby |
576 | * the clock producer. If no such clk is found, it returns NULL |
577 | * which serves as a dummy clk. That's the only difference compared |
578 | * to devm_clk_get_prepared(). |
579 | * |
580 | * The returned clk (if valid) is prepared. Drivers must however |
581 | * assume that the clock is not enabled. |
582 | * |
583 | * The clock will automatically be unprepared and freed when the |
584 | * device is unbound from the bus. |
585 | */ |
586 | struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); |
587 | |
588 | /** |
589 | * devm_clk_get_optional_enabled - devm_clk_get_optional() + |
590 | * clk_prepare_enable() |
591 | * @dev: device for clock "consumer" |
592 | * @id: clock consumer ID |
593 | * |
594 | * Context: May sleep. |
595 | * |
596 | * Return: a struct clk corresponding to the clock producer, or |
597 | * valid IS_ERR() condition containing errno. The implementation |
598 | * uses @dev and @id to determine the clock consumer, and thereby |
599 | * the clock producer. If no such clk is found, it returns NULL |
600 | * which serves as a dummy clk. That's the only difference compared |
601 | * to devm_clk_get_enabled(). |
602 | * |
603 | * The returned clk (if valid) is prepared and enabled. |
604 | * |
605 | * The clock will automatically be disabled, unprepared and freed |
606 | * when the device is unbound from the bus. |
607 | */ |
608 | struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); |
609 | |
610 | /** |
611 | * devm_get_clk_from_child - lookup and obtain a managed reference to a |
612 | * clock producer from child node. |
613 | * @dev: device for clock "consumer" |
614 | * @np: pointer to clock consumer node |
615 | * @con_id: clock consumer ID |
616 | * |
617 | * This function parses the clocks, and uses them to look up the |
618 | * struct clk from the registered list of clock providers by using |
619 | * @np and @con_id |
620 | * |
621 | * The clock will automatically be freed when the device is unbound |
622 | * from the bus. |
623 | */ |
624 | struct clk *devm_get_clk_from_child(struct device *dev, |
625 | struct device_node *np, const char *con_id); |
626 | |
627 | /** |
628 | * clk_enable - inform the system when the clock source should be running. |
629 | * @clk: clock source |
630 | * |
631 | * If the clock can not be enabled/disabled, this should return success. |
632 | * |
633 | * May be called from atomic contexts. |
634 | * |
635 | * Returns success (0) or negative errno. |
636 | */ |
637 | int clk_enable(struct clk *clk); |
638 | |
639 | /** |
640 | * clk_bulk_enable - inform the system when the set of clks should be running. |
641 | * @num_clks: the number of clk_bulk_data |
642 | * @clks: the clk_bulk_data table of consumer |
643 | * |
644 | * May be called from atomic contexts. |
645 | * |
646 | * Returns success (0) or negative errno. |
647 | */ |
648 | int __must_check clk_bulk_enable(int num_clks, |
649 | const struct clk_bulk_data *clks); |
650 | |
651 | /** |
652 | * clk_disable - inform the system when the clock source is no longer required. |
653 | * @clk: clock source |
654 | * |
655 | * Inform the system that a clock source is no longer required by |
656 | * a driver and may be shut down. |
657 | * |
658 | * May be called from atomic contexts. |
659 | * |
660 | * Implementation detail: if the clock source is shared between |
661 | * multiple drivers, clk_enable() calls must be balanced by the |
662 | * same number of clk_disable() calls for the clock source to be |
663 | * disabled. |
664 | */ |
665 | void clk_disable(struct clk *clk); |
666 | |
667 | /** |
668 | * clk_bulk_disable - inform the system when the set of clks is no |
669 | * longer required. |
670 | * @num_clks: the number of clk_bulk_data |
671 | * @clks: the clk_bulk_data table of consumer |
672 | * |
673 | * Inform the system that a set of clks is no longer required by |
674 | * a driver and may be shut down. |
675 | * |
676 | * May be called from atomic contexts. |
677 | * |
678 | * Implementation detail: if the set of clks is shared between |
679 | * multiple drivers, clk_bulk_enable() calls must be balanced by the |
680 | * same number of clk_bulk_disable() calls for the clock source to be |
681 | * disabled. |
682 | */ |
683 | void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); |
684 | |
685 | /** |
686 | * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. |
687 | * This is only valid once the clock source has been enabled. |
688 | * @clk: clock source |
689 | */ |
690 | unsigned long clk_get_rate(struct clk *clk); |
691 | |
692 | /** |
693 | * clk_put - "free" the clock source |
694 | * @clk: clock source |
695 | * |
696 | * Note: drivers must ensure that all clk_enable calls made on this |
697 | * clock source are balanced by clk_disable calls prior to calling |
698 | * this function. |
699 | * |
700 | * clk_put should not be called from within interrupt context. |
701 | */ |
702 | void clk_put(struct clk *clk); |
703 | |
704 | /** |
705 | * clk_bulk_put - "free" the clock source |
706 | * @num_clks: the number of clk_bulk_data |
707 | * @clks: the clk_bulk_data table of consumer |
708 | * |
709 | * Note: drivers must ensure that all clk_bulk_enable calls made on this |
710 | * clock source are balanced by clk_bulk_disable calls prior to calling |
711 | * this function. |
712 | * |
713 | * clk_bulk_put should not be called from within interrupt context. |
714 | */ |
715 | void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); |
716 | |
717 | /** |
718 | * clk_bulk_put_all - "free" all the clock source |
719 | * @num_clks: the number of clk_bulk_data |
720 | * @clks: the clk_bulk_data table of consumer |
721 | * |
722 | * Note: drivers must ensure that all clk_bulk_enable calls made on this |
723 | * clock source are balanced by clk_bulk_disable calls prior to calling |
724 | * this function. |
725 | * |
726 | * clk_bulk_put_all should not be called from within interrupt context. |
727 | */ |
728 | void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); |
729 | |
730 | /** |
731 | * devm_clk_put - "free" a managed clock source |
732 | * @dev: device used to acquire the clock |
733 | * @clk: clock source acquired with devm_clk_get() |
734 | * |
735 | * Note: drivers must ensure that all clk_enable calls made on this |
736 | * clock source are balanced by clk_disable calls prior to calling |
737 | * this function. |
738 | * |
739 | * clk_put should not be called from within interrupt context. |
740 | */ |
741 | void devm_clk_put(struct device *dev, struct clk *clk); |
742 | |
743 | /* |
744 | * The remaining APIs are optional for machine class support. |
745 | */ |
746 | |
747 | |
748 | /** |
749 | * clk_round_rate - adjust a rate to the exact rate a clock can provide |
750 | * @clk: clock source |
751 | * @rate: desired clock rate in Hz |
752 | * |
753 | * This answers the question "if I were to pass @rate to clk_set_rate(), |
754 | * what clock rate would I end up with?" without changing the hardware |
755 | * in any way. In other words: |
756 | * |
757 | * rate = clk_round_rate(clk, r); |
758 | * |
759 | * and: |
760 | * |
761 | * clk_set_rate(clk, r); |
762 | * rate = clk_get_rate(clk); |
763 | * |
764 | * are equivalent except the former does not modify the clock hardware |
765 | * in any way. |
766 | * |
767 | * Returns rounded clock rate in Hz, or negative errno. |
768 | */ |
769 | long clk_round_rate(struct clk *clk, unsigned long rate); |
770 | |
771 | /** |
772 | * clk_set_rate - set the clock rate for a clock source |
773 | * @clk: clock source |
774 | * @rate: desired clock rate in Hz |
775 | * |
776 | * Updating the rate starts at the top-most affected clock and then |
777 | * walks the tree down to the bottom-most clock that needs updating. |
778 | * |
779 | * Returns success (0) or negative errno. |
780 | */ |
781 | int clk_set_rate(struct clk *clk, unsigned long rate); |
782 | |
783 | /** |
784 | * clk_set_rate_exclusive- set the clock rate and claim exclusivity over |
785 | * clock source |
786 | * @clk: clock source |
787 | * @rate: desired clock rate in Hz |
788 | * |
789 | * This helper function allows drivers to atomically set the rate of a producer |
790 | * and claim exclusivity over the rate control of the producer. |
791 | * |
792 | * It is essentially a combination of clk_set_rate() and |
793 | * clk_rate_exclusite_get(). Caller must balance this call with a call to |
794 | * clk_rate_exclusive_put() |
795 | * |
796 | * Returns success (0) or negative errno. |
797 | */ |
798 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); |
799 | |
800 | /** |
801 | * clk_has_parent - check if a clock is a possible parent for another |
802 | * @clk: clock source |
803 | * @parent: parent clock source |
804 | * |
805 | * This function can be used in drivers that need to check that a clock can be |
806 | * the parent of another without actually changing the parent. |
807 | * |
808 | * Returns true if @parent is a possible parent for @clk, false otherwise. |
809 | */ |
810 | bool clk_has_parent(const struct clk *clk, const struct clk *parent); |
811 | |
812 | /** |
813 | * clk_set_rate_range - set a rate range for a clock source |
814 | * @clk: clock source |
815 | * @min: desired minimum clock rate in Hz, inclusive |
816 | * @max: desired maximum clock rate in Hz, inclusive |
817 | * |
818 | * Returns success (0) or negative errno. |
819 | */ |
820 | int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); |
821 | |
822 | /** |
823 | * clk_set_min_rate - set a minimum clock rate for a clock source |
824 | * @clk: clock source |
825 | * @rate: desired minimum clock rate in Hz, inclusive |
826 | * |
827 | * Returns success (0) or negative errno. |
828 | */ |
829 | int clk_set_min_rate(struct clk *clk, unsigned long rate); |
830 | |
831 | /** |
832 | * clk_set_max_rate - set a maximum clock rate for a clock source |
833 | * @clk: clock source |
834 | * @rate: desired maximum clock rate in Hz, inclusive |
835 | * |
836 | * Returns success (0) or negative errno. |
837 | */ |
838 | int clk_set_max_rate(struct clk *clk, unsigned long rate); |
839 | |
840 | /** |
841 | * clk_set_parent - set the parent clock source for this clock |
842 | * @clk: clock source |
843 | * @parent: parent clock source |
844 | * |
845 | * Returns success (0) or negative errno. |
846 | */ |
847 | int clk_set_parent(struct clk *clk, struct clk *parent); |
848 | |
849 | /** |
850 | * clk_get_parent - get the parent clock source for this clock |
851 | * @clk: clock source |
852 | * |
853 | * Returns struct clk corresponding to parent clock source, or |
854 | * valid IS_ERR() condition containing errno. |
855 | */ |
856 | struct clk *clk_get_parent(struct clk *clk); |
857 | |
858 | /** |
859 | * clk_get_sys - get a clock based upon the device name |
860 | * @dev_id: device name |
861 | * @con_id: connection ID |
862 | * |
863 | * Returns a struct clk corresponding to the clock producer, or |
864 | * valid IS_ERR() condition containing errno. The implementation |
865 | * uses @dev_id and @con_id to determine the clock consumer, and |
866 | * thereby the clock producer. In contrast to clk_get() this function |
867 | * takes the device name instead of the device itself for identification. |
868 | * |
869 | * Drivers must assume that the clock source is not enabled. |
870 | * |
871 | * clk_get_sys should not be called from within interrupt context. |
872 | */ |
873 | struct clk *clk_get_sys(const char *dev_id, const char *con_id); |
874 | |
875 | /** |
876 | * clk_save_context - save clock context for poweroff |
877 | * |
878 | * Saves the context of the clock register for powerstates in which the |
879 | * contents of the registers will be lost. Occurs deep within the suspend |
880 | * code so locking is not necessary. |
881 | */ |
882 | int clk_save_context(void); |
883 | |
884 | /** |
885 | * clk_restore_context - restore clock context after poweroff |
886 | * |
887 | * This occurs with all clocks enabled. Occurs deep within the resume code |
888 | * so locking is not necessary. |
889 | */ |
890 | void clk_restore_context(void); |
891 | |
892 | #else /* !CONFIG_HAVE_CLK */ |
893 | |
894 | static inline struct clk *clk_get(struct device *dev, const char *id) |
895 | { |
896 | return NULL; |
897 | } |
898 | |
899 | static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, |
900 | struct clk_bulk_data *clks) |
901 | { |
902 | return 0; |
903 | } |
904 | |
905 | static inline int __must_check clk_bulk_get_optional(struct device *dev, |
906 | int num_clks, struct clk_bulk_data *clks) |
907 | { |
908 | return 0; |
909 | } |
910 | |
911 | static inline int __must_check clk_bulk_get_all(struct device *dev, |
912 | struct clk_bulk_data **clks) |
913 | { |
914 | return 0; |
915 | } |
916 | |
917 | static inline struct clk *devm_clk_get(struct device *dev, const char *id) |
918 | { |
919 | return NULL; |
920 | } |
921 | |
922 | static inline struct clk *devm_clk_get_prepared(struct device *dev, |
923 | const char *id) |
924 | { |
925 | return NULL; |
926 | } |
927 | |
928 | static inline struct clk *devm_clk_get_enabled(struct device *dev, |
929 | const char *id) |
930 | { |
931 | return NULL; |
932 | } |
933 | |
934 | static inline struct clk *devm_clk_get_optional(struct device *dev, |
935 | const char *id) |
936 | { |
937 | return NULL; |
938 | } |
939 | |
940 | static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, |
941 | const char *id) |
942 | { |
943 | return NULL; |
944 | } |
945 | |
946 | static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, |
947 | const char *id) |
948 | { |
949 | return NULL; |
950 | } |
951 | |
952 | static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, |
953 | struct clk_bulk_data *clks) |
954 | { |
955 | return 0; |
956 | } |
957 | |
958 | static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, |
959 | int num_clks, struct clk_bulk_data *clks) |
960 | { |
961 | return 0; |
962 | } |
963 | |
964 | static inline int __must_check devm_clk_bulk_get_all(struct device *dev, |
965 | struct clk_bulk_data **clks) |
966 | { |
967 | |
968 | return 0; |
969 | } |
970 | |
971 | static inline struct clk *devm_get_clk_from_child(struct device *dev, |
972 | struct device_node *np, const char *con_id) |
973 | { |
974 | return NULL; |
975 | } |
976 | |
977 | static inline void clk_put(struct clk *clk) {} |
978 | |
979 | static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} |
980 | |
981 | static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} |
982 | |
983 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} |
984 | |
985 | static inline int clk_enable(struct clk *clk) |
986 | { |
987 | return 0; |
988 | } |
989 | |
990 | static inline int __must_check clk_bulk_enable(int num_clks, |
991 | const struct clk_bulk_data *clks) |
992 | { |
993 | return 0; |
994 | } |
995 | |
996 | static inline void clk_disable(struct clk *clk) {} |
997 | |
998 | |
999 | static inline void clk_bulk_disable(int num_clks, |
1000 | const struct clk_bulk_data *clks) {} |
1001 | |
1002 | static inline unsigned long clk_get_rate(struct clk *clk) |
1003 | { |
1004 | return 0; |
1005 | } |
1006 | |
1007 | static inline int clk_set_rate(struct clk *clk, unsigned long rate) |
1008 | { |
1009 | return 0; |
1010 | } |
1011 | |
1012 | static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) |
1013 | { |
1014 | return 0; |
1015 | } |
1016 | |
1017 | static inline long clk_round_rate(struct clk *clk, unsigned long rate) |
1018 | { |
1019 | return 0; |
1020 | } |
1021 | |
1022 | static inline bool clk_has_parent(struct clk *clk, struct clk *parent) |
1023 | { |
1024 | return true; |
1025 | } |
1026 | |
1027 | static inline int clk_set_rate_range(struct clk *clk, unsigned long min, |
1028 | unsigned long max) |
1029 | { |
1030 | return 0; |
1031 | } |
1032 | |
1033 | static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) |
1034 | { |
1035 | return 0; |
1036 | } |
1037 | |
1038 | static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) |
1039 | { |
1040 | return 0; |
1041 | } |
1042 | |
1043 | static inline int clk_set_parent(struct clk *clk, struct clk *parent) |
1044 | { |
1045 | return 0; |
1046 | } |
1047 | |
1048 | static inline struct clk *clk_get_parent(struct clk *clk) |
1049 | { |
1050 | return NULL; |
1051 | } |
1052 | |
1053 | static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) |
1054 | { |
1055 | return NULL; |
1056 | } |
1057 | |
1058 | static inline int clk_save_context(void) |
1059 | { |
1060 | return 0; |
1061 | } |
1062 | |
1063 | static inline void clk_restore_context(void) {} |
1064 | |
1065 | #endif |
1066 | |
1067 | /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ |
1068 | static inline int clk_prepare_enable(struct clk *clk) |
1069 | { |
1070 | int ret; |
1071 | |
1072 | ret = clk_prepare(clk); |
1073 | if (ret) |
1074 | return ret; |
1075 | ret = clk_enable(clk); |
1076 | if (ret) |
1077 | clk_unprepare(clk); |
1078 | |
1079 | return ret; |
1080 | } |
1081 | |
1082 | /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ |
1083 | static inline void clk_disable_unprepare(struct clk *clk) |
1084 | { |
1085 | clk_disable(clk); |
1086 | clk_unprepare(clk); |
1087 | } |
1088 | |
1089 | static inline int __must_check |
1090 | clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) |
1091 | { |
1092 | int ret; |
1093 | |
1094 | ret = clk_bulk_prepare(num_clks, clks); |
1095 | if (ret) |
1096 | return ret; |
1097 | ret = clk_bulk_enable(num_clks, clks); |
1098 | if (ret) |
1099 | clk_bulk_unprepare(num_clks, clks); |
1100 | |
1101 | return ret; |
1102 | } |
1103 | |
1104 | static inline void clk_bulk_disable_unprepare(int num_clks, |
1105 | const struct clk_bulk_data *clks) |
1106 | { |
1107 | clk_bulk_disable(num_clks, clks); |
1108 | clk_bulk_unprepare(num_clks, clks); |
1109 | } |
1110 | |
1111 | /** |
1112 | * clk_drop_range - Reset any range set on that clock |
1113 | * @clk: clock source |
1114 | * |
1115 | * Returns success (0) or negative errno. |
1116 | */ |
1117 | static inline int clk_drop_range(struct clk *clk) |
1118 | { |
1119 | return clk_set_rate_range(clk, min: 0, ULONG_MAX); |
1120 | } |
1121 | |
1122 | /** |
1123 | * clk_get_optional - lookup and obtain a reference to an optional clock |
1124 | * producer. |
1125 | * @dev: device for clock "consumer" |
1126 | * @id: clock consumer ID |
1127 | * |
1128 | * Behaves the same as clk_get() except where there is no clock producer. In |
1129 | * this case, instead of returning -ENOENT, the function returns NULL. |
1130 | */ |
1131 | static inline struct clk *clk_get_optional(struct device *dev, const char *id) |
1132 | { |
1133 | struct clk *clk = clk_get(dev, id); |
1134 | |
1135 | if (clk == ERR_PTR(error: -ENOENT)) |
1136 | return NULL; |
1137 | |
1138 | return clk; |
1139 | } |
1140 | |
1141 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) |
1142 | struct clk *of_clk_get(struct device_node *np, int index); |
1143 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name); |
1144 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); |
1145 | #else |
1146 | static inline struct clk *of_clk_get(struct device_node *np, int index) |
1147 | { |
1148 | return ERR_PTR(-ENOENT); |
1149 | } |
1150 | static inline struct clk *of_clk_get_by_name(struct device_node *np, |
1151 | const char *name) |
1152 | { |
1153 | return ERR_PTR(-ENOENT); |
1154 | } |
1155 | static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) |
1156 | { |
1157 | return ERR_PTR(-ENOENT); |
1158 | } |
1159 | #endif |
1160 | |
1161 | #endif |
1162 | |