1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC |
4 | * |
5 | * Authors: |
6 | * Serge Semin <Sergey.Semin@baikalelectronics.ru> |
7 | * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru> |
8 | * |
9 | * Baikal-T1 CCU Dividers interface driver |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "bt1-ccu-div: " fmt |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/printk.h> |
16 | #include <linux/bits.h> |
17 | #include <linux/bitfield.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/clk-provider.h> |
20 | #include <linux/of.h> |
21 | #include <linux/spinlock.h> |
22 | #include <linux/regmap.h> |
23 | #include <linux/delay.h> |
24 | #include <linux/time64.h> |
25 | #include <linux/debugfs.h> |
26 | |
27 | #include "ccu-div.h" |
28 | |
29 | #define CCU_DIV_CTL 0x00 |
30 | #define CCU_DIV_CTL_EN BIT(0) |
31 | #define CCU_DIV_CTL_RST BIT(1) |
32 | #define CCU_DIV_CTL_SET_CLKDIV BIT(2) |
33 | #define CCU_DIV_CTL_CLKDIV_FLD 4 |
34 | #define CCU_DIV_CTL_CLKDIV_MASK(_width) \ |
35 | GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD) |
36 | #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27) |
37 | #define CCU_DIV_CTL_GATE_REF_BUF BIT(28) |
38 | #define CCU_DIV_CTL_LOCK_NORMAL BIT(31) |
39 | |
40 | #define CCU_DIV_LOCK_CHECK_RETRIES 50 |
41 | |
42 | #define CCU_DIV_CLKDIV_MIN 0 |
43 | #define CCU_DIV_CLKDIV_MAX(_mask) \ |
44 | ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD) |
45 | |
46 | /* |
47 | * Use the next two methods until there are generic field setter and |
48 | * getter available with non-constant mask support. |
49 | */ |
50 | static inline u32 ccu_div_get(u32 mask, u32 val) |
51 | { |
52 | return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD; |
53 | } |
54 | |
55 | static inline u32 ccu_div_prep(u32 mask, u32 val) |
56 | { |
57 | return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask; |
58 | } |
59 | |
60 | static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk, |
61 | unsigned long div) |
62 | { |
63 | u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC; |
64 | |
65 | do_div(ns, ref_clk); |
66 | |
67 | return ns; |
68 | } |
69 | |
70 | static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk, |
71 | unsigned long div) |
72 | { |
73 | return ref_clk / (div ?: 1); |
74 | } |
75 | |
76 | static int ccu_div_var_update_clkdiv(struct ccu_div *div, |
77 | unsigned long parent_rate, |
78 | unsigned long divider) |
79 | { |
80 | unsigned long nd; |
81 | u32 val = 0; |
82 | u32 lock; |
83 | int count; |
84 | |
85 | nd = ccu_div_lock_delay_ns(ref_clk: parent_rate, div: divider); |
86 | |
87 | if (div->features & CCU_DIV_LOCK_SHIFTED) |
88 | lock = CCU_DIV_CTL_LOCK_SHIFTED; |
89 | else |
90 | lock = CCU_DIV_CTL_LOCK_NORMAL; |
91 | |
92 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, |
93 | CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV); |
94 | |
95 | /* |
96 | * Until there is nsec-version of readl_poll_timeout() is available |
97 | * we have to implement the next polling loop. |
98 | */ |
99 | count = CCU_DIV_LOCK_CHECK_RETRIES; |
100 | do { |
101 | ndelay(nd); |
102 | regmap_read(map: div->sys_regs, reg: div->reg_ctl, val: &val); |
103 | if (val & lock) |
104 | return 0; |
105 | } while (--count); |
106 | |
107 | return -ETIMEDOUT; |
108 | } |
109 | |
110 | static int ccu_div_var_enable(struct clk_hw *hw) |
111 | { |
112 | struct clk_hw *parent_hw = clk_hw_get_parent(hw); |
113 | struct ccu_div *div = to_ccu_div(hw); |
114 | unsigned long flags; |
115 | u32 val = 0; |
116 | int ret; |
117 | |
118 | if (!parent_hw) { |
119 | pr_err("Can't enable '%s' with no parent" , clk_hw_get_name(hw)); |
120 | return -EINVAL; |
121 | } |
122 | |
123 | regmap_read(map: div->sys_regs, reg: div->reg_ctl, val: &val); |
124 | if (val & CCU_DIV_CTL_EN) |
125 | return 0; |
126 | |
127 | spin_lock_irqsave(&div->lock, flags); |
128 | ret = ccu_div_var_update_clkdiv(div, parent_rate: clk_hw_get_rate(hw: parent_hw), |
129 | divider: ccu_div_get(mask: div->mask, val)); |
130 | if (!ret) |
131 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, |
132 | CCU_DIV_CTL_EN, CCU_DIV_CTL_EN); |
133 | spin_unlock_irqrestore(lock: &div->lock, flags); |
134 | if (ret) |
135 | pr_err("Divider '%s' lock timed out\n" , clk_hw_get_name(hw)); |
136 | |
137 | return ret; |
138 | } |
139 | |
140 | static int ccu_div_gate_enable(struct clk_hw *hw) |
141 | { |
142 | struct ccu_div *div = to_ccu_div(hw); |
143 | unsigned long flags; |
144 | |
145 | spin_lock_irqsave(&div->lock, flags); |
146 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, |
147 | CCU_DIV_CTL_EN, CCU_DIV_CTL_EN); |
148 | spin_unlock_irqrestore(lock: &div->lock, flags); |
149 | |
150 | return 0; |
151 | } |
152 | |
153 | static void ccu_div_gate_disable(struct clk_hw *hw) |
154 | { |
155 | struct ccu_div *div = to_ccu_div(hw); |
156 | unsigned long flags; |
157 | |
158 | spin_lock_irqsave(&div->lock, flags); |
159 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, CCU_DIV_CTL_EN, val: 0); |
160 | spin_unlock_irqrestore(lock: &div->lock, flags); |
161 | } |
162 | |
163 | static int ccu_div_gate_is_enabled(struct clk_hw *hw) |
164 | { |
165 | struct ccu_div *div = to_ccu_div(hw); |
166 | u32 val = 0; |
167 | |
168 | regmap_read(map: div->sys_regs, reg: div->reg_ctl, val: &val); |
169 | |
170 | return !!(val & CCU_DIV_CTL_EN); |
171 | } |
172 | |
173 | static int ccu_div_buf_enable(struct clk_hw *hw) |
174 | { |
175 | struct ccu_div *div = to_ccu_div(hw); |
176 | unsigned long flags; |
177 | |
178 | spin_lock_irqsave(&div->lock, flags); |
179 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, |
180 | CCU_DIV_CTL_GATE_REF_BUF, val: 0); |
181 | spin_unlock_irqrestore(lock: &div->lock, flags); |
182 | |
183 | return 0; |
184 | } |
185 | |
186 | static void ccu_div_buf_disable(struct clk_hw *hw) |
187 | { |
188 | struct ccu_div *div = to_ccu_div(hw); |
189 | unsigned long flags; |
190 | |
191 | spin_lock_irqsave(&div->lock, flags); |
192 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, |
193 | CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF); |
194 | spin_unlock_irqrestore(lock: &div->lock, flags); |
195 | } |
196 | |
197 | static int ccu_div_buf_is_enabled(struct clk_hw *hw) |
198 | { |
199 | struct ccu_div *div = to_ccu_div(hw); |
200 | u32 val = 0; |
201 | |
202 | regmap_read(map: div->sys_regs, reg: div->reg_ctl, val: &val); |
203 | |
204 | return !(val & CCU_DIV_CTL_GATE_REF_BUF); |
205 | } |
206 | |
207 | static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw, |
208 | unsigned long parent_rate) |
209 | { |
210 | struct ccu_div *div = to_ccu_div(hw); |
211 | unsigned long divider; |
212 | u32 val = 0; |
213 | |
214 | regmap_read(map: div->sys_regs, reg: div->reg_ctl, val: &val); |
215 | divider = ccu_div_get(mask: div->mask, val); |
216 | |
217 | return ccu_div_calc_freq(ref_clk: parent_rate, div: divider); |
218 | } |
219 | |
220 | static inline unsigned long ccu_div_var_calc_divider(unsigned long rate, |
221 | unsigned long parent_rate, |
222 | unsigned int mask) |
223 | { |
224 | unsigned long divider; |
225 | |
226 | divider = parent_rate / rate; |
227 | return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN, |
228 | CCU_DIV_CLKDIV_MAX(mask)); |
229 | } |
230 | |
231 | static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate, |
232 | unsigned long *parent_rate) |
233 | { |
234 | struct ccu_div *div = to_ccu_div(hw); |
235 | unsigned long divider; |
236 | |
237 | divider = ccu_div_var_calc_divider(rate, parent_rate: *parent_rate, mask: div->mask); |
238 | |
239 | return ccu_div_calc_freq(ref_clk: *parent_rate, div: divider); |
240 | } |
241 | |
242 | /* |
243 | * This method is used for the clock divider blocks, which support the |
244 | * on-the-fly rate change. So due to lacking the EN bit functionality |
245 | * they can't be gated before the rate adjustment. |
246 | */ |
247 | static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate, |
248 | unsigned long parent_rate) |
249 | { |
250 | struct ccu_div *div = to_ccu_div(hw); |
251 | unsigned long flags, divider; |
252 | u32 val; |
253 | int ret; |
254 | |
255 | divider = ccu_div_var_calc_divider(rate, parent_rate, mask: div->mask); |
256 | if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) { |
257 | divider = 0; |
258 | } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) { |
259 | if (divider == 1 || divider == 2) |
260 | divider = 0; |
261 | else if (divider == 3) |
262 | divider = 4; |
263 | } |
264 | |
265 | val = ccu_div_prep(mask: div->mask, val: divider); |
266 | |
267 | spin_lock_irqsave(&div->lock, flags); |
268 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, mask: div->mask, val); |
269 | ret = ccu_div_var_update_clkdiv(div, parent_rate, divider); |
270 | spin_unlock_irqrestore(lock: &div->lock, flags); |
271 | if (ret) |
272 | pr_err("Divider '%s' lock timed out\n" , clk_hw_get_name(hw)); |
273 | |
274 | return ret; |
275 | } |
276 | |
277 | /* |
278 | * This method is used for the clock divider blocks, which don't support |
279 | * the on-the-fly rate change. |
280 | */ |
281 | static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate, |
282 | unsigned long parent_rate) |
283 | { |
284 | struct ccu_div *div = to_ccu_div(hw); |
285 | unsigned long flags, divider; |
286 | u32 val; |
287 | |
288 | divider = ccu_div_var_calc_divider(rate, parent_rate, mask: div->mask); |
289 | val = ccu_div_prep(mask: div->mask, val: divider); |
290 | |
291 | /* |
292 | * Also disable the clock divider block if it was enabled by default |
293 | * or by the bootloader. |
294 | */ |
295 | spin_lock_irqsave(&div->lock, flags); |
296 | regmap_update_bits(map: div->sys_regs, reg: div->reg_ctl, |
297 | mask: div->mask | CCU_DIV_CTL_EN, val); |
298 | spin_unlock_irqrestore(lock: &div->lock, flags); |
299 | |
300 | return 0; |
301 | } |
302 | |
303 | static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw, |
304 | unsigned long parent_rate) |
305 | { |
306 | struct ccu_div *div = to_ccu_div(hw); |
307 | |
308 | return ccu_div_calc_freq(ref_clk: parent_rate, div: div->divider); |
309 | } |
310 | |
311 | static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate, |
312 | unsigned long *parent_rate) |
313 | { |
314 | struct ccu_div *div = to_ccu_div(hw); |
315 | |
316 | return ccu_div_calc_freq(ref_clk: *parent_rate, div: div->divider); |
317 | } |
318 | |
319 | static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate, |
320 | unsigned long parent_rate) |
321 | { |
322 | return 0; |
323 | } |
324 | |
325 | #ifdef CONFIG_DEBUG_FS |
326 | |
327 | struct ccu_div_dbgfs_bit { |
328 | struct ccu_div *div; |
329 | const char *name; |
330 | u32 mask; |
331 | }; |
332 | |
333 | #define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \ |
334 | .name = _name, \ |
335 | .mask = _mask \ |
336 | } |
337 | |
338 | static const struct ccu_div_dbgfs_bit ccu_div_bits[] = { |
339 | CCU_DIV_DBGFS_BIT_ATTR("div_en" , CCU_DIV_CTL_EN), |
340 | CCU_DIV_DBGFS_BIT_ATTR("div_rst" , CCU_DIV_CTL_RST), |
341 | CCU_DIV_DBGFS_BIT_ATTR("div_bypass" , CCU_DIV_CTL_SET_CLKDIV), |
342 | CCU_DIV_DBGFS_BIT_ATTR("div_buf" , CCU_DIV_CTL_GATE_REF_BUF), |
343 | CCU_DIV_DBGFS_BIT_ATTR("div_lock" , CCU_DIV_CTL_LOCK_NORMAL) |
344 | }; |
345 | |
346 | #define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits) |
347 | |
348 | /* |
349 | * It can be dangerous to change the Divider settings behind clock framework |
350 | * back, therefore we don't provide any kernel config based compile time option |
351 | * for this feature to enable. |
352 | */ |
353 | #undef CCU_DIV_ALLOW_WRITE_DEBUGFS |
354 | #ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS |
355 | |
356 | static int ccu_div_dbgfs_bit_set(void *priv, u64 val) |
357 | { |
358 | const struct ccu_div_dbgfs_bit *bit = priv; |
359 | struct ccu_div *div = bit->div; |
360 | unsigned long flags; |
361 | |
362 | spin_lock_irqsave(&div->lock, flags); |
363 | regmap_update_bits(div->sys_regs, div->reg_ctl, |
364 | bit->mask, val ? bit->mask : 0); |
365 | spin_unlock_irqrestore(&div->lock, flags); |
366 | |
367 | return 0; |
368 | } |
369 | |
370 | static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val) |
371 | { |
372 | struct ccu_div *div = priv; |
373 | unsigned long flags; |
374 | u32 data; |
375 | |
376 | val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN, |
377 | CCU_DIV_CLKDIV_MAX(div->mask)); |
378 | data = ccu_div_prep(div->mask, val); |
379 | |
380 | spin_lock_irqsave(&div->lock, flags); |
381 | regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data); |
382 | spin_unlock_irqrestore(&div->lock, flags); |
383 | |
384 | return 0; |
385 | } |
386 | |
387 | #define ccu_div_dbgfs_mode 0644 |
388 | |
389 | #else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */ |
390 | |
391 | #define ccu_div_dbgfs_bit_set NULL |
392 | #define ccu_div_dbgfs_var_clkdiv_set NULL |
393 | #define ccu_div_dbgfs_mode 0444 |
394 | |
395 | #endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */ |
396 | |
397 | static int ccu_div_dbgfs_bit_get(void *priv, u64 *val) |
398 | { |
399 | const struct ccu_div_dbgfs_bit *bit = priv; |
400 | struct ccu_div *div = bit->div; |
401 | u32 data = 0; |
402 | |
403 | regmap_read(map: div->sys_regs, reg: div->reg_ctl, val: &data); |
404 | *val = !!(data & bit->mask); |
405 | |
406 | return 0; |
407 | } |
408 | DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops, |
409 | ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n" ); |
410 | |
411 | static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val) |
412 | { |
413 | struct ccu_div *div = priv; |
414 | u32 data = 0; |
415 | |
416 | regmap_read(map: div->sys_regs, reg: div->reg_ctl, val: &data); |
417 | *val = ccu_div_get(mask: div->mask, val: data); |
418 | |
419 | return 0; |
420 | } |
421 | DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops, |
422 | ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n" ); |
423 | |
424 | static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val) |
425 | { |
426 | struct ccu_div *div = priv; |
427 | |
428 | *val = div->divider; |
429 | |
430 | return 0; |
431 | } |
432 | DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops, |
433 | ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n" ); |
434 | |
435 | static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry) |
436 | { |
437 | struct ccu_div *div = to_ccu_div(hw); |
438 | struct ccu_div_dbgfs_bit *bits; |
439 | int didx, bidx, num = 2; |
440 | const char *name; |
441 | |
442 | num += !!(div->flags & CLK_SET_RATE_GATE) + |
443 | !!(div->features & CCU_DIV_RESET_DOMAIN); |
444 | |
445 | bits = kcalloc(n: num, size: sizeof(*bits), GFP_KERNEL); |
446 | if (!bits) |
447 | return; |
448 | |
449 | for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) { |
450 | name = ccu_div_bits[bidx].name; |
451 | if (!(div->flags & CLK_SET_RATE_GATE) && |
452 | !strcmp("div_en" , name)) { |
453 | continue; |
454 | } |
455 | |
456 | if (!(div->features & CCU_DIV_RESET_DOMAIN) && |
457 | !strcmp("div_rst" , name)) { |
458 | continue; |
459 | } |
460 | |
461 | if (!strcmp("div_buf" , name)) |
462 | continue; |
463 | |
464 | bits[didx] = ccu_div_bits[bidx]; |
465 | bits[didx].div = div; |
466 | |
467 | if (div->features & CCU_DIV_LOCK_SHIFTED && |
468 | !strcmp("div_lock" , name)) { |
469 | bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED; |
470 | } |
471 | |
472 | debugfs_create_file_unsafe(name: bits[didx].name, ccu_div_dbgfs_mode, |
473 | parent: dentry, data: &bits[didx], |
474 | fops: &ccu_div_dbgfs_bit_fops); |
475 | ++didx; |
476 | } |
477 | |
478 | debugfs_create_file_unsafe(name: "div_clkdiv" , ccu_div_dbgfs_mode, parent: dentry, |
479 | data: div, fops: &ccu_div_dbgfs_var_clkdiv_fops); |
480 | } |
481 | |
482 | static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry) |
483 | { |
484 | struct ccu_div *div = to_ccu_div(hw); |
485 | struct ccu_div_dbgfs_bit *bit; |
486 | |
487 | bit = kmalloc(size: sizeof(*bit), GFP_KERNEL); |
488 | if (!bit) |
489 | return; |
490 | |
491 | *bit = ccu_div_bits[0]; |
492 | bit->div = div; |
493 | debugfs_create_file_unsafe(name: bit->name, ccu_div_dbgfs_mode, parent: dentry, data: bit, |
494 | fops: &ccu_div_dbgfs_bit_fops); |
495 | |
496 | debugfs_create_file_unsafe(name: "div_clkdiv" , mode: 0400, parent: dentry, data: div, |
497 | fops: &ccu_div_dbgfs_fixed_clkdiv_fops); |
498 | } |
499 | |
500 | static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry) |
501 | { |
502 | struct ccu_div *div = to_ccu_div(hw); |
503 | struct ccu_div_dbgfs_bit *bit; |
504 | |
505 | bit = kmalloc(size: sizeof(*bit), GFP_KERNEL); |
506 | if (!bit) |
507 | return; |
508 | |
509 | *bit = ccu_div_bits[3]; |
510 | bit->div = div; |
511 | debugfs_create_file_unsafe(name: bit->name, ccu_div_dbgfs_mode, parent: dentry, data: bit, |
512 | fops: &ccu_div_dbgfs_bit_fops); |
513 | } |
514 | |
515 | static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) |
516 | { |
517 | struct ccu_div *div = to_ccu_div(hw); |
518 | |
519 | debugfs_create_file_unsafe(name: "div_clkdiv" , mode: 0400, parent: dentry, data: div, |
520 | fops: &ccu_div_dbgfs_fixed_clkdiv_fops); |
521 | } |
522 | |
523 | #else /* !CONFIG_DEBUG_FS */ |
524 | |
525 | #define ccu_div_var_debug_init NULL |
526 | #define ccu_div_gate_debug_init NULL |
527 | #define ccu_div_buf_debug_init NULL |
528 | #define ccu_div_fixed_debug_init NULL |
529 | |
530 | #endif /* !CONFIG_DEBUG_FS */ |
531 | |
532 | static const struct clk_ops ccu_div_var_gate_to_set_ops = { |
533 | .enable = ccu_div_var_enable, |
534 | .disable = ccu_div_gate_disable, |
535 | .is_enabled = ccu_div_gate_is_enabled, |
536 | .recalc_rate = ccu_div_var_recalc_rate, |
537 | .round_rate = ccu_div_var_round_rate, |
538 | .set_rate = ccu_div_var_set_rate_fast, |
539 | .debug_init = ccu_div_var_debug_init |
540 | }; |
541 | |
542 | static const struct clk_ops ccu_div_var_nogate_ops = { |
543 | .recalc_rate = ccu_div_var_recalc_rate, |
544 | .round_rate = ccu_div_var_round_rate, |
545 | .set_rate = ccu_div_var_set_rate_slow, |
546 | .debug_init = ccu_div_var_debug_init |
547 | }; |
548 | |
549 | static const struct clk_ops ccu_div_gate_ops = { |
550 | .enable = ccu_div_gate_enable, |
551 | .disable = ccu_div_gate_disable, |
552 | .is_enabled = ccu_div_gate_is_enabled, |
553 | .recalc_rate = ccu_div_fixed_recalc_rate, |
554 | .round_rate = ccu_div_fixed_round_rate, |
555 | .set_rate = ccu_div_fixed_set_rate, |
556 | .debug_init = ccu_div_gate_debug_init |
557 | }; |
558 | |
559 | static const struct clk_ops ccu_div_buf_ops = { |
560 | .enable = ccu_div_buf_enable, |
561 | .disable = ccu_div_buf_disable, |
562 | .is_enabled = ccu_div_buf_is_enabled, |
563 | .debug_init = ccu_div_buf_debug_init |
564 | }; |
565 | |
566 | static const struct clk_ops ccu_div_fixed_ops = { |
567 | .recalc_rate = ccu_div_fixed_recalc_rate, |
568 | .round_rate = ccu_div_fixed_round_rate, |
569 | .set_rate = ccu_div_fixed_set_rate, |
570 | .debug_init = ccu_div_fixed_debug_init |
571 | }; |
572 | |
573 | struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) |
574 | { |
575 | struct clk_parent_data parent_data = { }; |
576 | struct clk_init_data hw_init = { }; |
577 | struct ccu_div *div; |
578 | int ret; |
579 | |
580 | if (!div_init) |
581 | return ERR_PTR(error: -EINVAL); |
582 | |
583 | div = kzalloc(size: sizeof(*div), GFP_KERNEL); |
584 | if (!div) |
585 | return ERR_PTR(error: -ENOMEM); |
586 | |
587 | /* |
588 | * Note since Baikal-T1 System Controller registers are MMIO-backed |
589 | * we won't check the regmap IO operations return status, because it |
590 | * must be zero anyway. |
591 | */ |
592 | div->hw.init = &hw_init; |
593 | div->id = div_init->id; |
594 | div->reg_ctl = div_init->base + CCU_DIV_CTL; |
595 | div->sys_regs = div_init->sys_regs; |
596 | div->flags = div_init->flags; |
597 | div->features = div_init->features; |
598 | spin_lock_init(&div->lock); |
599 | |
600 | hw_init.name = div_init->name; |
601 | hw_init.flags = div_init->flags; |
602 | |
603 | if (div_init->type == CCU_DIV_VAR) { |
604 | if (hw_init.flags & CLK_SET_RATE_GATE) |
605 | hw_init.ops = &ccu_div_var_gate_to_set_ops; |
606 | else |
607 | hw_init.ops = &ccu_div_var_nogate_ops; |
608 | div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width); |
609 | } else if (div_init->type == CCU_DIV_GATE) { |
610 | hw_init.ops = &ccu_div_gate_ops; |
611 | div->divider = div_init->divider; |
612 | } else if (div_init->type == CCU_DIV_BUF) { |
613 | hw_init.ops = &ccu_div_buf_ops; |
614 | } else if (div_init->type == CCU_DIV_FIXED) { |
615 | hw_init.ops = &ccu_div_fixed_ops; |
616 | div->divider = div_init->divider; |
617 | } else { |
618 | ret = -EINVAL; |
619 | goto err_free_div; |
620 | } |
621 | |
622 | if (!div_init->parent_name) { |
623 | ret = -EINVAL; |
624 | goto err_free_div; |
625 | } |
626 | parent_data.fw_name = div_init->parent_name; |
627 | parent_data.name = div_init->parent_name; |
628 | hw_init.parent_data = &parent_data; |
629 | hw_init.num_parents = 1; |
630 | |
631 | ret = of_clk_hw_register(node: div_init->np, hw: &div->hw); |
632 | if (ret) |
633 | goto err_free_div; |
634 | |
635 | return div; |
636 | |
637 | err_free_div: |
638 | kfree(objp: div); |
639 | |
640 | return ERR_PTR(error: ret); |
641 | } |
642 | |
643 | void ccu_div_hw_unregister(struct ccu_div *div) |
644 | { |
645 | clk_hw_unregister(hw: &div->hw); |
646 | |
647 | kfree(objp: div); |
648 | } |
649 | |