1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * drivers/uio/uio_dmem_genirq.c |
4 | * |
5 | * Userspace I/O platform driver with generic IRQ handling code. |
6 | * |
7 | * Copyright (C) 2012 Damian Hobson-Garcia |
8 | * |
9 | * Based on uio_pdrv_genirq.c by Magnus Damm |
10 | */ |
11 | |
12 | #include <linux/platform_device.h> |
13 | #include <linux/uio_driver.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/bitops.h> |
16 | #include <linux/module.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/platform_data/uio_dmem_genirq.h> |
19 | #include <linux/stringify.h> |
20 | #include <linux/pm_runtime.h> |
21 | #include <linux/dma-mapping.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/irq.h> |
24 | |
25 | #include <linux/of.h> |
26 | #include <linux/of_platform.h> |
27 | #include <linux/of_address.h> |
28 | |
29 | #define DRIVER_NAME "uio_dmem_genirq" |
30 | #define DMEM_MAP_ERROR (~0) |
31 | |
32 | struct uio_dmem_genirq_platdata { |
33 | struct uio_info *uioinfo; |
34 | spinlock_t lock; |
35 | unsigned long flags; |
36 | struct platform_device *pdev; |
37 | unsigned int dmem_region_start; |
38 | unsigned int num_dmem_regions; |
39 | struct mutex alloc_lock; |
40 | unsigned int refcnt; |
41 | }; |
42 | |
43 | /* Bits in uio_dmem_genirq_platdata.flags */ |
44 | enum { |
45 | UIO_IRQ_DISABLED = 0, |
46 | }; |
47 | |
48 | static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode) |
49 | { |
50 | struct uio_dmem_genirq_platdata *priv = info->priv; |
51 | struct uio_mem *uiomem; |
52 | |
53 | uiomem = &priv->uioinfo->mem[priv->dmem_region_start]; |
54 | |
55 | mutex_lock(&priv->alloc_lock); |
56 | while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) { |
57 | void *addr; |
58 | if (!uiomem->size) |
59 | break; |
60 | |
61 | addr = dma_alloc_coherent(dev: &priv->pdev->dev, size: uiomem->size, |
62 | dma_handle: &uiomem->dma_addr, GFP_KERNEL); |
63 | uiomem->addr = addr ? (uintptr_t) addr : DMEM_MAP_ERROR; |
64 | ++uiomem; |
65 | } |
66 | priv->refcnt++; |
67 | |
68 | mutex_unlock(lock: &priv->alloc_lock); |
69 | /* Wait until the Runtime PM code has woken up the device */ |
70 | pm_runtime_get_sync(dev: &priv->pdev->dev); |
71 | return 0; |
72 | } |
73 | |
74 | static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode) |
75 | { |
76 | struct uio_dmem_genirq_platdata *priv = info->priv; |
77 | struct uio_mem *uiomem; |
78 | |
79 | /* Tell the Runtime PM code that the device has become idle */ |
80 | pm_runtime_put_sync(dev: &priv->pdev->dev); |
81 | |
82 | uiomem = &priv->uioinfo->mem[priv->dmem_region_start]; |
83 | |
84 | mutex_lock(&priv->alloc_lock); |
85 | |
86 | priv->refcnt--; |
87 | while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) { |
88 | if (!uiomem->size) |
89 | break; |
90 | if (uiomem->addr) { |
91 | dma_free_coherent(dev: uiomem->dma_device, size: uiomem->size, |
92 | cpu_addr: (void *) (uintptr_t) uiomem->addr, |
93 | dma_handle: uiomem->dma_addr); |
94 | } |
95 | uiomem->addr = DMEM_MAP_ERROR; |
96 | ++uiomem; |
97 | } |
98 | |
99 | mutex_unlock(lock: &priv->alloc_lock); |
100 | return 0; |
101 | } |
102 | |
103 | static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info) |
104 | { |
105 | struct uio_dmem_genirq_platdata *priv = dev_info->priv; |
106 | |
107 | /* Just disable the interrupt in the interrupt controller, and |
108 | * remember the state so we can allow user space to enable it later. |
109 | */ |
110 | |
111 | spin_lock(lock: &priv->lock); |
112 | if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) |
113 | disable_irq_nosync(irq); |
114 | spin_unlock(lock: &priv->lock); |
115 | |
116 | return IRQ_HANDLED; |
117 | } |
118 | |
119 | static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) |
120 | { |
121 | struct uio_dmem_genirq_platdata *priv = dev_info->priv; |
122 | unsigned long flags; |
123 | |
124 | /* Allow user space to enable and disable the interrupt |
125 | * in the interrupt controller, but keep track of the |
126 | * state to prevent per-irq depth damage. |
127 | * |
128 | * Serialize this operation to support multiple tasks and concurrency |
129 | * with irq handler on SMP systems. |
130 | */ |
131 | |
132 | spin_lock_irqsave(&priv->lock, flags); |
133 | if (irq_on) { |
134 | if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags)) |
135 | enable_irq(irq: dev_info->irq); |
136 | } else { |
137 | if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags)) |
138 | disable_irq_nosync(irq: dev_info->irq); |
139 | } |
140 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
141 | |
142 | return 0; |
143 | } |
144 | |
145 | static void uio_dmem_genirq_pm_disable(void *data) |
146 | { |
147 | struct device *dev = data; |
148 | |
149 | pm_runtime_disable(dev); |
150 | } |
151 | |
152 | static int uio_dmem_genirq_probe(struct platform_device *pdev) |
153 | { |
154 | struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(dev: &pdev->dev); |
155 | struct uio_info *uioinfo = &pdata->uioinfo; |
156 | struct uio_dmem_genirq_platdata *priv; |
157 | struct uio_mem *uiomem; |
158 | int ret = -EINVAL; |
159 | int i; |
160 | |
161 | if (pdev->dev.of_node) { |
162 | /* alloc uioinfo for one device */ |
163 | uioinfo = devm_kzalloc(dev: &pdev->dev, size: sizeof(*uioinfo), GFP_KERNEL); |
164 | if (!uioinfo) { |
165 | dev_err(&pdev->dev, "unable to kmalloc\n" ); |
166 | return -ENOMEM; |
167 | } |
168 | uioinfo->name = devm_kasprintf(dev: &pdev->dev, GFP_KERNEL, fmt: "%pOFn" , |
169 | pdev->dev.of_node); |
170 | uioinfo->version = "devicetree" ; |
171 | } |
172 | |
173 | if (!uioinfo || !uioinfo->name || !uioinfo->version) { |
174 | dev_err(&pdev->dev, "missing platform_data\n" ); |
175 | return -EINVAL; |
176 | } |
177 | |
178 | if (uioinfo->handler || uioinfo->irqcontrol || |
179 | uioinfo->irq_flags & IRQF_SHARED) { |
180 | dev_err(&pdev->dev, "interrupt configuration error\n" ); |
181 | return -EINVAL; |
182 | } |
183 | |
184 | priv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*priv), GFP_KERNEL); |
185 | if (!priv) { |
186 | dev_err(&pdev->dev, "unable to kmalloc\n" ); |
187 | return -ENOMEM; |
188 | } |
189 | |
190 | ret = dma_set_coherent_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
191 | if (ret) { |
192 | dev_err(&pdev->dev, "DMA enable failed\n" ); |
193 | return ret; |
194 | } |
195 | |
196 | priv->uioinfo = uioinfo; |
197 | spin_lock_init(&priv->lock); |
198 | priv->flags = 0; /* interrupt is enabled to begin with */ |
199 | priv->pdev = pdev; |
200 | mutex_init(&priv->alloc_lock); |
201 | |
202 | if (!uioinfo->irq) { |
203 | /* Multiple IRQs are not supported */ |
204 | ret = platform_get_irq(pdev, 0); |
205 | if (ret == -ENXIO && pdev->dev.of_node) |
206 | ret = UIO_IRQ_NONE; |
207 | else if (ret < 0) |
208 | return ret; |
209 | uioinfo->irq = ret; |
210 | } |
211 | |
212 | if (uioinfo->irq) { |
213 | struct irq_data *irq_data = irq_get_irq_data(irq: uioinfo->irq); |
214 | |
215 | /* |
216 | * If a level interrupt, dont do lazy disable. Otherwise the |
217 | * irq will fire again since clearing of the actual cause, on |
218 | * device level, is done in userspace |
219 | * irqd_is_level_type() isn't used since isn't valid until |
220 | * irq is configured. |
221 | */ |
222 | if (irq_data && |
223 | irqd_get_trigger_type(d: irq_data) & IRQ_TYPE_LEVEL_MASK) { |
224 | dev_dbg(&pdev->dev, "disable lazy unmask\n" ); |
225 | irq_set_status_flags(irq: uioinfo->irq, set: IRQ_DISABLE_UNLAZY); |
226 | } |
227 | } |
228 | |
229 | uiomem = &uioinfo->mem[0]; |
230 | |
231 | for (i = 0; i < pdev->num_resources; ++i) { |
232 | struct resource *r = &pdev->resource[i]; |
233 | |
234 | if (r->flags != IORESOURCE_MEM) |
235 | continue; |
236 | |
237 | if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { |
238 | dev_warn(&pdev->dev, "device has more than " |
239 | __stringify(MAX_UIO_MAPS) |
240 | " I/O memory resources.\n" ); |
241 | break; |
242 | } |
243 | |
244 | uiomem->memtype = UIO_MEM_PHYS; |
245 | uiomem->addr = r->start; |
246 | uiomem->size = resource_size(res: r); |
247 | ++uiomem; |
248 | } |
249 | |
250 | priv->dmem_region_start = uiomem - &uioinfo->mem[0]; |
251 | priv->num_dmem_regions = pdata->num_dynamic_regions; |
252 | |
253 | for (i = 0; i < pdata->num_dynamic_regions; ++i) { |
254 | if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { |
255 | dev_warn(&pdev->dev, "device has more than " |
256 | __stringify(MAX_UIO_MAPS) |
257 | " dynamic and fixed memory regions.\n" ); |
258 | break; |
259 | } |
260 | uiomem->memtype = UIO_MEM_DMA_COHERENT; |
261 | uiomem->dma_device = &pdev->dev; |
262 | uiomem->addr = DMEM_MAP_ERROR; |
263 | uiomem->size = pdata->dynamic_region_sizes[i]; |
264 | ++uiomem; |
265 | } |
266 | |
267 | while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { |
268 | uiomem->size = 0; |
269 | ++uiomem; |
270 | } |
271 | |
272 | /* This driver requires no hardware specific kernel code to handle |
273 | * interrupts. Instead, the interrupt handler simply disables the |
274 | * interrupt in the interrupt controller. User space is responsible |
275 | * for performing hardware specific acknowledge and re-enabling of |
276 | * the interrupt in the interrupt controller. |
277 | * |
278 | * Interrupt sharing is not supported. |
279 | */ |
280 | |
281 | uioinfo->handler = uio_dmem_genirq_handler; |
282 | uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol; |
283 | uioinfo->open = uio_dmem_genirq_open; |
284 | uioinfo->release = uio_dmem_genirq_release; |
285 | uioinfo->priv = priv; |
286 | |
287 | /* Enable Runtime PM for this device: |
288 | * The device starts in suspended state to allow the hardware to be |
289 | * turned off by default. The Runtime PM bus code should power on the |
290 | * hardware and enable clocks at open(). |
291 | */ |
292 | pm_runtime_enable(dev: &pdev->dev); |
293 | |
294 | ret = devm_add_action_or_reset(&pdev->dev, uio_dmem_genirq_pm_disable, &pdev->dev); |
295 | if (ret) |
296 | return ret; |
297 | |
298 | return devm_uio_register_device(&pdev->dev, priv->uioinfo); |
299 | } |
300 | |
301 | static int uio_dmem_genirq_runtime_nop(struct device *dev) |
302 | { |
303 | /* Runtime PM callback shared between ->runtime_suspend() |
304 | * and ->runtime_resume(). Simply returns success. |
305 | * |
306 | * In this driver pm_runtime_get_sync() and pm_runtime_put_sync() |
307 | * are used at open() and release() time. This allows the |
308 | * Runtime PM code to turn off power to the device while the |
309 | * device is unused, ie before open() and after release(). |
310 | * |
311 | * This Runtime PM callback does not need to save or restore |
312 | * any registers since user space is responsbile for hardware |
313 | * register reinitialization after open(). |
314 | */ |
315 | return 0; |
316 | } |
317 | |
318 | static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = { |
319 | .runtime_suspend = uio_dmem_genirq_runtime_nop, |
320 | .runtime_resume = uio_dmem_genirq_runtime_nop, |
321 | }; |
322 | |
323 | #ifdef CONFIG_OF |
324 | static const struct of_device_id uio_of_genirq_match[] = { |
325 | { /* empty for now */ }, |
326 | }; |
327 | MODULE_DEVICE_TABLE(of, uio_of_genirq_match); |
328 | #endif |
329 | |
330 | static struct platform_driver uio_dmem_genirq = { |
331 | .probe = uio_dmem_genirq_probe, |
332 | .driver = { |
333 | .name = DRIVER_NAME, |
334 | .pm = &uio_dmem_genirq_dev_pm_ops, |
335 | .of_match_table = of_match_ptr(uio_of_genirq_match), |
336 | }, |
337 | }; |
338 | |
339 | module_platform_driver(uio_dmem_genirq); |
340 | |
341 | MODULE_AUTHOR("Damian Hobson-Garcia" ); |
342 | MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory." ); |
343 | MODULE_LICENSE("GPL v2" ); |
344 | MODULE_ALIAS("platform:" DRIVER_NAME); |
345 | |