1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel Atom platform clocks driver for BayTrail and CherryTrail SoCs
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Irina Tirdea <irina.tirdea@intel.com>
7 */
8
9#include <linux/clk-provider.h>
10#include <linux/clkdev.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/platform_data/x86/clk-pmc-atom.h>
14#include <linux/platform_data/x86/pmc_atom.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17
18#define PLT_CLK_NAME_BASE "pmc_plt_clk"
19
20struct clk_plt_fixed {
21 struct clk_hw *clk;
22 struct clk_lookup *lookup;
23};
24
25struct clk_plt {
26 struct clk_hw hw;
27 void __iomem *reg;
28 struct clk_lookup *lookup;
29 /* protect access to PMC registers */
30 spinlock_t lock;
31};
32
33#define to_clk_plt(_hw) container_of(_hw, struct clk_plt, hw)
34
35struct clk_plt_data {
36 struct clk_plt_fixed **parents;
37 u8 nparents;
38 struct clk_plt *clks[PMC_CLK_NUM];
39 struct clk_lookup *mclk_lookup;
40 struct clk_lookup *ether_clk_lookup;
41};
42
43/* Return an index in parent table */
44static inline int plt_reg_to_parent(int reg)
45{
46 switch (reg & PMC_MASK_CLK_FREQ) {
47 default:
48 case PMC_CLK_FREQ_XTAL:
49 return 0;
50 case PMC_CLK_FREQ_PLL:
51 return 1;
52 }
53}
54
55/* Return clk index of parent */
56static inline int plt_parent_to_reg(int index)
57{
58 switch (index) {
59 default:
60 case 0:
61 return PMC_CLK_FREQ_XTAL;
62 case 1:
63 return PMC_CLK_FREQ_PLL;
64 }
65}
66
67/* Abstract status in simpler enabled/disabled value */
68static inline int plt_reg_to_enabled(int reg)
69{
70 switch (reg & PMC_MASK_CLK_CTL) {
71 case PMC_CLK_CTL_GATED_ON_D3:
72 case PMC_CLK_CTL_FORCE_ON:
73 return 1; /* enabled */
74 case PMC_CLK_CTL_FORCE_OFF:
75 case PMC_CLK_CTL_RESERVED:
76 default:
77 return 0; /* disabled */
78 }
79}
80
81static void plt_clk_reg_update(struct clk_plt *clk, u32 mask, u32 val)
82{
83 u32 tmp;
84 unsigned long flags;
85
86 spin_lock_irqsave(&clk->lock, flags);
87
88 tmp = readl(addr: clk->reg);
89 tmp = (tmp & ~mask) | (val & mask);
90 writel(val: tmp, addr: clk->reg);
91
92 spin_unlock_irqrestore(lock: &clk->lock, flags);
93}
94
95static int plt_clk_set_parent(struct clk_hw *hw, u8 index)
96{
97 struct clk_plt *clk = to_clk_plt(hw);
98
99 plt_clk_reg_update(clk, PMC_MASK_CLK_FREQ, val: plt_parent_to_reg(index));
100
101 return 0;
102}
103
104static u8 plt_clk_get_parent(struct clk_hw *hw)
105{
106 struct clk_plt *clk = to_clk_plt(hw);
107 u32 value;
108
109 value = readl(addr: clk->reg);
110
111 return plt_reg_to_parent(reg: value);
112}
113
114static int plt_clk_enable(struct clk_hw *hw)
115{
116 struct clk_plt *clk = to_clk_plt(hw);
117
118 plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_ON);
119
120 return 0;
121}
122
123static void plt_clk_disable(struct clk_hw *hw)
124{
125 struct clk_plt *clk = to_clk_plt(hw);
126
127 plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_OFF);
128}
129
130static int plt_clk_is_enabled(struct clk_hw *hw)
131{
132 struct clk_plt *clk = to_clk_plt(hw);
133 u32 value;
134
135 value = readl(addr: clk->reg);
136
137 return plt_reg_to_enabled(reg: value);
138}
139
140static const struct clk_ops plt_clk_ops = {
141 .enable = plt_clk_enable,
142 .disable = plt_clk_disable,
143 .is_enabled = plt_clk_is_enabled,
144 .get_parent = plt_clk_get_parent,
145 .set_parent = plt_clk_set_parent,
146 .determine_rate = __clk_mux_determine_rate,
147};
148
149static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
150 const struct pmc_clk_data *pmc_data,
151 const char **parent_names,
152 int num_parents)
153{
154 struct clk_plt *pclk;
155 struct clk_init_data init;
156 int ret;
157
158 pclk = devm_kzalloc(dev: &pdev->dev, size: sizeof(*pclk), GFP_KERNEL);
159 if (!pclk)
160 return ERR_PTR(error: -ENOMEM);
161
162 init.name = kasprintf(GFP_KERNEL, fmt: "%s_%d", PLT_CLK_NAME_BASE, id);
163 init.ops = &plt_clk_ops;
164 init.flags = 0;
165 init.parent_names = parent_names;
166 init.num_parents = num_parents;
167
168 pclk->hw.init = &init;
169 pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
170 spin_lock_init(&pclk->lock);
171
172 /*
173 * On some systems, the pmc_plt_clocks already enabled by the
174 * firmware are being marked as critical to avoid them being
175 * gated by the clock framework.
176 */
177 if (pmc_data->critical && plt_clk_is_enabled(hw: &pclk->hw))
178 init.flags |= CLK_IS_CRITICAL;
179
180 ret = devm_clk_hw_register(dev: &pdev->dev, hw: &pclk->hw);
181 if (ret) {
182 pclk = ERR_PTR(error: ret);
183 goto err_free_init;
184 }
185
186 pclk->lookup = clkdev_hw_create(hw: &pclk->hw, con_id: init.name, NULL);
187 if (!pclk->lookup) {
188 pclk = ERR_PTR(error: -ENOMEM);
189 goto err_free_init;
190 }
191
192err_free_init:
193 kfree(objp: init.name);
194 return pclk;
195}
196
197static void plt_clk_unregister(struct clk_plt *pclk)
198{
199 clkdev_drop(cl: pclk->lookup);
200}
201
202static struct clk_plt_fixed *plt_clk_register_fixed_rate(struct platform_device *pdev,
203 const char *name,
204 const char *parent_name,
205 unsigned long fixed_rate)
206{
207 struct clk_plt_fixed *pclk;
208
209 pclk = devm_kzalloc(dev: &pdev->dev, size: sizeof(*pclk), GFP_KERNEL);
210 if (!pclk)
211 return ERR_PTR(error: -ENOMEM);
212
213 pclk->clk = clk_hw_register_fixed_rate(&pdev->dev, name, parent_name,
214 0, fixed_rate);
215 if (IS_ERR(ptr: pclk->clk))
216 return ERR_CAST(ptr: pclk->clk);
217
218 pclk->lookup = clkdev_hw_create(hw: pclk->clk, con_id: name, NULL);
219 if (!pclk->lookup) {
220 clk_hw_unregister_fixed_rate(hw: pclk->clk);
221 return ERR_PTR(error: -ENOMEM);
222 }
223
224 return pclk;
225}
226
227static void plt_clk_unregister_fixed_rate(struct clk_plt_fixed *pclk)
228{
229 clkdev_drop(cl: pclk->lookup);
230 clk_hw_unregister_fixed_rate(hw: pclk->clk);
231}
232
233static void plt_clk_unregister_fixed_rate_loop(struct clk_plt_data *data,
234 unsigned int i)
235{
236 while (i--)
237 plt_clk_unregister_fixed_rate(pclk: data->parents[i]);
238}
239
240static void plt_clk_free_parent_names_loop(const char **parent_names,
241 unsigned int i)
242{
243 while (i--)
244 kfree_const(x: parent_names[i]);
245 kfree(objp: parent_names);
246}
247
248static void plt_clk_unregister_loop(struct clk_plt_data *data,
249 unsigned int i)
250{
251 while (i--)
252 plt_clk_unregister(pclk: data->clks[i]);
253}
254
255static const char **plt_clk_register_parents(struct platform_device *pdev,
256 struct clk_plt_data *data,
257 const struct pmc_clk *clks)
258{
259 const char **parent_names;
260 unsigned int i;
261 int err;
262 int nparents = 0;
263
264 data->nparents = 0;
265 while (clks[nparents].name)
266 nparents++;
267
268 data->parents = devm_kcalloc(dev: &pdev->dev, n: nparents,
269 size: sizeof(*data->parents), GFP_KERNEL);
270 if (!data->parents)
271 return ERR_PTR(error: -ENOMEM);
272
273 parent_names = kcalloc(n: nparents, size: sizeof(*parent_names),
274 GFP_KERNEL);
275 if (!parent_names)
276 return ERR_PTR(error: -ENOMEM);
277
278 for (i = 0; i < nparents; i++) {
279 data->parents[i] =
280 plt_clk_register_fixed_rate(pdev, name: clks[i].name,
281 parent_name: clks[i].parent_name,
282 fixed_rate: clks[i].freq);
283 if (IS_ERR(ptr: data->parents[i])) {
284 err = PTR_ERR(ptr: data->parents[i]);
285 goto err_unreg;
286 }
287 parent_names[i] = kstrdup_const(s: clks[i].name, GFP_KERNEL);
288 }
289
290 data->nparents = nparents;
291 return parent_names;
292
293err_unreg:
294 plt_clk_unregister_fixed_rate_loop(data, i);
295 plt_clk_free_parent_names_loop(parent_names, i);
296 return ERR_PTR(error: err);
297}
298
299static void plt_clk_unregister_parents(struct clk_plt_data *data)
300{
301 plt_clk_unregister_fixed_rate_loop(data, i: data->nparents);
302}
303
304static int plt_clk_probe(struct platform_device *pdev)
305{
306 const struct pmc_clk_data *pmc_data;
307 const char **parent_names;
308 struct clk_plt_data *data;
309 unsigned int i;
310 int err;
311
312 pmc_data = dev_get_platdata(dev: &pdev->dev);
313 if (!pmc_data || !pmc_data->clks)
314 return -EINVAL;
315
316 data = devm_kzalloc(dev: &pdev->dev, size: sizeof(*data), GFP_KERNEL);
317 if (!data)
318 return -ENOMEM;
319
320 parent_names = plt_clk_register_parents(pdev, data, clks: pmc_data->clks);
321 if (IS_ERR(ptr: parent_names))
322 return PTR_ERR(ptr: parent_names);
323
324 for (i = 0; i < PMC_CLK_NUM; i++) {
325 data->clks[i] = plt_clk_register(pdev, id: i, pmc_data,
326 parent_names, num_parents: data->nparents);
327 if (IS_ERR(ptr: data->clks[i])) {
328 err = PTR_ERR(ptr: data->clks[i]);
329 goto err_unreg_clk_plt;
330 }
331 }
332 data->mclk_lookup = clkdev_hw_create(hw: &data->clks[3]->hw, con_id: "mclk", NULL);
333 if (!data->mclk_lookup) {
334 err = -ENOMEM;
335 goto err_unreg_clk_plt;
336 }
337
338 data->ether_clk_lookup = clkdev_hw_create(hw: &data->clks[4]->hw,
339 con_id: "ether_clk", NULL);
340 if (!data->ether_clk_lookup) {
341 err = -ENOMEM;
342 goto err_drop_mclk;
343 }
344
345 plt_clk_free_parent_names_loop(parent_names, i: data->nparents);
346
347 platform_set_drvdata(pdev, data);
348 return 0;
349
350err_drop_mclk:
351 clkdev_drop(cl: data->mclk_lookup);
352err_unreg_clk_plt:
353 plt_clk_unregister_loop(data, i);
354 plt_clk_unregister_parents(data);
355 plt_clk_free_parent_names_loop(parent_names, i: data->nparents);
356 return err;
357}
358
359static void plt_clk_remove(struct platform_device *pdev)
360{
361 struct clk_plt_data *data;
362
363 data = platform_get_drvdata(pdev);
364
365 clkdev_drop(cl: data->ether_clk_lookup);
366 clkdev_drop(cl: data->mclk_lookup);
367 plt_clk_unregister_loop(data, PMC_CLK_NUM);
368 plt_clk_unregister_parents(data);
369}
370
371static struct platform_driver plt_clk_driver = {
372 .driver = {
373 .name = "clk-pmc-atom",
374 },
375 .probe = plt_clk_probe,
376 .remove_new = plt_clk_remove,
377};
378builtin_platform_driver(plt_clk_driver);
379

source code of linux/drivers/clk/x86/clk-pmc-atom.c