1// SPDX-License-Identifier: GPL-2.0
2/*
3 * cdev.c - Character device component for Mostcore
4 *
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 */
7
8#include <linux/module.h>
9#include <linux/sched.h>
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/cdev.h>
14#include <linux/poll.h>
15#include <linux/kfifo.h>
16#include <linux/uaccess.h>
17#include <linux/idr.h>
18#include <linux/most.h>
19
20#define CHRDEV_REGION_SIZE 50
21
22static struct cdev_component {
23 dev_t devno;
24 struct ida minor_id;
25 unsigned int major;
26 struct class *class;
27 struct most_component cc;
28} comp;
29
30struct comp_channel {
31 wait_queue_head_t wq;
32 spinlock_t unlink; /* synchronization lock to unlink channels */
33 struct cdev cdev;
34 struct device *dev;
35 struct mutex io_mutex;
36 struct most_interface *iface;
37 struct most_channel_config *cfg;
38 unsigned int channel_id;
39 dev_t devno;
40 size_t mbo_offs;
41 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
42 int access_ref;
43 struct list_head list;
44};
45
46#define to_channel(d) container_of(d, struct comp_channel, cdev)
47static LIST_HEAD(channel_list);
48static DEFINE_SPINLOCK(ch_list_lock);
49
50static inline bool ch_has_mbo(struct comp_channel *c)
51{
52 return channel_has_mbo(iface: c->iface, channel_idx: c->channel_id, comp: &comp.cc) > 0;
53}
54
55static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
56{
57 if (!kfifo_peek(&c->fifo, mbo)) {
58 *mbo = most_get_mbo(iface: c->iface, channel_idx: c->channel_id, comp: &comp.cc);
59 if (*mbo)
60 kfifo_in(&c->fifo, mbo, 1);
61 }
62 return *mbo;
63}
64
65static struct comp_channel *get_channel(struct most_interface *iface, int id)
66{
67 struct comp_channel *c, *tmp;
68 unsigned long flags;
69
70 spin_lock_irqsave(&ch_list_lock, flags);
71 list_for_each_entry_safe(c, tmp, &channel_list, list) {
72 if ((c->iface == iface) && (c->channel_id == id)) {
73 spin_unlock_irqrestore(lock: &ch_list_lock, flags);
74 return c;
75 }
76 }
77 spin_unlock_irqrestore(lock: &ch_list_lock, flags);
78 return NULL;
79}
80
81static void stop_channel(struct comp_channel *c)
82{
83 struct mbo *mbo;
84
85 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
86 most_put_mbo(mbo);
87 most_stop_channel(iface: c->iface, channel_idx: c->channel_id, comp: &comp.cc);
88}
89
90static void destroy_cdev(struct comp_channel *c)
91{
92 unsigned long flags;
93
94 device_destroy(cls: comp.class, devt: c->devno);
95 cdev_del(&c->cdev);
96 spin_lock_irqsave(&ch_list_lock, flags);
97 list_del(entry: &c->list);
98 spin_unlock_irqrestore(lock: &ch_list_lock, flags);
99}
100
101static void destroy_channel(struct comp_channel *c)
102{
103 ida_simple_remove(&comp.minor_id, MINOR(c->devno));
104 kfifo_free(&c->fifo);
105 kfree(objp: c);
106}
107
108/**
109 * comp_open - implements the syscall to open the device
110 * @inode: inode pointer
111 * @filp: file pointer
112 *
113 * This stores the channel pointer in the private data field of
114 * the file structure and activates the channel within the core.
115 */
116static int comp_open(struct inode *inode, struct file *filp)
117{
118 struct comp_channel *c;
119 int ret;
120
121 c = to_channel(inode->i_cdev);
122 filp->private_data = c;
123
124 if (((c->cfg->direction == MOST_CH_RX) &&
125 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
126 ((c->cfg->direction == MOST_CH_TX) &&
127 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
128 return -EACCES;
129 }
130
131 mutex_lock(&c->io_mutex);
132 if (!c->dev) {
133 mutex_unlock(lock: &c->io_mutex);
134 return -ENODEV;
135 }
136
137 if (c->access_ref) {
138 mutex_unlock(lock: &c->io_mutex);
139 return -EBUSY;
140 }
141
142 c->mbo_offs = 0;
143 ret = most_start_channel(iface: c->iface, channel_idx: c->channel_id, comp: &comp.cc);
144 if (!ret)
145 c->access_ref = 1;
146 mutex_unlock(lock: &c->io_mutex);
147 return ret;
148}
149
150/**
151 * comp_close - implements the syscall to close the device
152 * @inode: inode pointer
153 * @filp: file pointer
154 *
155 * This stops the channel within the core.
156 */
157static int comp_close(struct inode *inode, struct file *filp)
158{
159 struct comp_channel *c = to_channel(inode->i_cdev);
160
161 mutex_lock(&c->io_mutex);
162 spin_lock(lock: &c->unlink);
163 c->access_ref = 0;
164 spin_unlock(lock: &c->unlink);
165 if (c->dev) {
166 stop_channel(c);
167 mutex_unlock(lock: &c->io_mutex);
168 } else {
169 mutex_unlock(lock: &c->io_mutex);
170 destroy_channel(c);
171 }
172 return 0;
173}
174
175/**
176 * comp_write - implements the syscall to write to the device
177 * @filp: file pointer
178 * @buf: pointer to user buffer
179 * @count: number of bytes to write
180 * @offset: offset from where to start writing
181 */
182static ssize_t comp_write(struct file *filp, const char __user *buf,
183 size_t count, loff_t *offset)
184{
185 int ret;
186 size_t to_copy, left;
187 struct mbo *mbo = NULL;
188 struct comp_channel *c = filp->private_data;
189
190 mutex_lock(&c->io_mutex);
191 while (c->dev && !ch_get_mbo(c, mbo: &mbo)) {
192 mutex_unlock(lock: &c->io_mutex);
193
194 if ((filp->f_flags & O_NONBLOCK))
195 return -EAGAIN;
196 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
197 return -ERESTARTSYS;
198 mutex_lock(&c->io_mutex);
199 }
200
201 if (unlikely(!c->dev)) {
202 ret = -ENODEV;
203 goto unlock;
204 }
205
206 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
207 left = copy_from_user(to: mbo->virt_address + c->mbo_offs, from: buf, n: to_copy);
208 if (left == to_copy) {
209 ret = -EFAULT;
210 goto unlock;
211 }
212
213 c->mbo_offs += to_copy - left;
214 if (c->mbo_offs >= c->cfg->buffer_size ||
215 c->cfg->data_type == MOST_CH_CONTROL ||
216 c->cfg->data_type == MOST_CH_ASYNC) {
217 kfifo_skip(&c->fifo);
218 mbo->buffer_length = c->mbo_offs;
219 c->mbo_offs = 0;
220 most_submit_mbo(mbo);
221 }
222
223 ret = to_copy - left;
224unlock:
225 mutex_unlock(lock: &c->io_mutex);
226 return ret;
227}
228
229/**
230 * comp_read - implements the syscall to read from the device
231 * @filp: file pointer
232 * @buf: pointer to user buffer
233 * @count: number of bytes to read
234 * @offset: offset from where to start reading
235 */
236static ssize_t
237comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
238{
239 size_t to_copy, not_copied, copied;
240 struct mbo *mbo = NULL;
241 struct comp_channel *c = filp->private_data;
242
243 mutex_lock(&c->io_mutex);
244 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
245 mutex_unlock(lock: &c->io_mutex);
246 if (filp->f_flags & O_NONBLOCK)
247 return -EAGAIN;
248 if (wait_event_interruptible(c->wq,
249 (!kfifo_is_empty(&c->fifo) ||
250 (!c->dev))))
251 return -ERESTARTSYS;
252 mutex_lock(&c->io_mutex);
253 }
254
255 /* make sure we don't submit to gone devices */
256 if (unlikely(!c->dev)) {
257 mutex_unlock(lock: &c->io_mutex);
258 return -ENODEV;
259 }
260
261 to_copy = min_t(size_t,
262 count,
263 mbo->processed_length - c->mbo_offs);
264
265 not_copied = copy_to_user(to: buf,
266 from: mbo->virt_address + c->mbo_offs,
267 n: to_copy);
268
269 copied = to_copy - not_copied;
270
271 c->mbo_offs += copied;
272 if (c->mbo_offs >= mbo->processed_length) {
273 kfifo_skip(&c->fifo);
274 most_put_mbo(mbo);
275 c->mbo_offs = 0;
276 }
277 mutex_unlock(lock: &c->io_mutex);
278 return copied;
279}
280
281static __poll_t comp_poll(struct file *filp, poll_table *wait)
282{
283 struct comp_channel *c = filp->private_data;
284 __poll_t mask = 0;
285
286 poll_wait(filp, wait_address: &c->wq, p: wait);
287
288 mutex_lock(&c->io_mutex);
289 if (c->cfg->direction == MOST_CH_RX) {
290 if (!c->dev || !kfifo_is_empty(&c->fifo))
291 mask |= EPOLLIN | EPOLLRDNORM;
292 } else {
293 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
294 mask |= EPOLLOUT | EPOLLWRNORM;
295 }
296 mutex_unlock(lock: &c->io_mutex);
297 return mask;
298}
299
300/*
301 * Initialization of struct file_operations
302 */
303static const struct file_operations channel_fops = {
304 .owner = THIS_MODULE,
305 .read = comp_read,
306 .write = comp_write,
307 .open = comp_open,
308 .release = comp_close,
309 .poll = comp_poll,
310};
311
312/**
313 * comp_disconnect_channel - disconnect a channel
314 * @iface: pointer to interface instance
315 * @channel_id: channel index
316 *
317 * This frees allocated memory and removes the cdev that represents this
318 * channel in user space.
319 */
320static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
321{
322 struct comp_channel *c;
323
324 c = get_channel(iface, id: channel_id);
325 if (!c)
326 return -EINVAL;
327
328 mutex_lock(&c->io_mutex);
329 spin_lock(lock: &c->unlink);
330 c->dev = NULL;
331 spin_unlock(lock: &c->unlink);
332 destroy_cdev(c);
333 if (c->access_ref) {
334 stop_channel(c);
335 wake_up_interruptible(&c->wq);
336 mutex_unlock(lock: &c->io_mutex);
337 } else {
338 mutex_unlock(lock: &c->io_mutex);
339 destroy_channel(c);
340 }
341 return 0;
342}
343
344/**
345 * comp_rx_completion - completion handler for rx channels
346 * @mbo: pointer to buffer object that has completed
347 *
348 * This searches for the channel linked to this MBO and stores it in the local
349 * fifo buffer.
350 */
351static int comp_rx_completion(struct mbo *mbo)
352{
353 struct comp_channel *c;
354
355 if (!mbo)
356 return -EINVAL;
357
358 c = get_channel(iface: mbo->ifp, id: mbo->hdm_channel_id);
359 if (!c)
360 return -EINVAL;
361
362 spin_lock(lock: &c->unlink);
363 if (!c->access_ref || !c->dev) {
364 spin_unlock(lock: &c->unlink);
365 return -ENODEV;
366 }
367 kfifo_in(&c->fifo, &mbo, 1);
368 spin_unlock(lock: &c->unlink);
369#ifdef DEBUG_MESG
370 if (kfifo_is_full(&c->fifo))
371 dev_warn(c->dev, "Fifo is full\n");
372#endif
373 wake_up_interruptible(&c->wq);
374 return 0;
375}
376
377/**
378 * comp_tx_completion - completion handler for tx channels
379 * @iface: pointer to interface instance
380 * @channel_id: channel index/ID
381 *
382 * This wakes sleeping processes in the wait-queue.
383 */
384static int comp_tx_completion(struct most_interface *iface, int channel_id)
385{
386 struct comp_channel *c;
387
388 c = get_channel(iface, id: channel_id);
389 if (!c)
390 return -EINVAL;
391
392 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
393 dev_warn(c->dev, "Channel ID out of range\n");
394 return -EINVAL;
395 }
396
397 wake_up_interruptible(&c->wq);
398 return 0;
399}
400
401/**
402 * comp_probe - probe function of the driver module
403 * @iface: pointer to interface instance
404 * @channel_id: channel index/ID
405 * @cfg: pointer to actual channel configuration
406 * @name: name of the device to be created
407 * @args: pointer to array of component parameters (from configfs)
408 *
409 * This allocates a channel object and creates the device node in /dev
410 *
411 * Returns 0 on success or error code otherwise.
412 */
413static int comp_probe(struct most_interface *iface, int channel_id,
414 struct most_channel_config *cfg, char *name, char *args)
415{
416 struct comp_channel *c;
417 unsigned long cl_flags;
418 int retval;
419 int current_minor;
420
421 if (!cfg || !name)
422 return -EINVAL;
423
424 c = get_channel(iface, id: channel_id);
425 if (c)
426 return -EEXIST;
427
428 current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
429 if (current_minor < 0)
430 return current_minor;
431
432 c = kzalloc(size: sizeof(*c), GFP_KERNEL);
433 if (!c) {
434 retval = -ENOMEM;
435 goto err_remove_ida;
436 }
437
438 c->devno = MKDEV(comp.major, current_minor);
439 cdev_init(&c->cdev, &channel_fops);
440 c->cdev.owner = THIS_MODULE;
441 retval = cdev_add(&c->cdev, c->devno, 1);
442 if (retval < 0)
443 goto err_free_c;
444 c->iface = iface;
445 c->cfg = cfg;
446 c->channel_id = channel_id;
447 c->access_ref = 0;
448 spin_lock_init(&c->unlink);
449 INIT_KFIFO(c->fifo);
450 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
451 if (retval)
452 goto err_del_cdev_and_free_channel;
453 init_waitqueue_head(&c->wq);
454 mutex_init(&c->io_mutex);
455 spin_lock_irqsave(&ch_list_lock, cl_flags);
456 list_add_tail(new: &c->list, head: &channel_list);
457 spin_unlock_irqrestore(lock: &ch_list_lock, flags: cl_flags);
458 c->dev = device_create(cls: comp.class, NULL, devt: c->devno, NULL, fmt: "%s", name);
459
460 if (IS_ERR(ptr: c->dev)) {
461 retval = PTR_ERR(ptr: c->dev);
462 goto err_free_kfifo_and_del_list;
463 }
464 kobject_uevent(kobj: &c->dev->kobj, action: KOBJ_ADD);
465 return 0;
466
467err_free_kfifo_and_del_list:
468 kfifo_free(&c->fifo);
469 list_del(entry: &c->list);
470err_del_cdev_and_free_channel:
471 cdev_del(&c->cdev);
472err_free_c:
473 kfree(objp: c);
474err_remove_ida:
475 ida_simple_remove(&comp.minor_id, current_minor);
476 return retval;
477}
478
479static struct cdev_component comp = {
480 .cc = {
481 .mod = THIS_MODULE,
482 .name = "cdev",
483 .probe_channel = comp_probe,
484 .disconnect_channel = comp_disconnect_channel,
485 .rx_completion = comp_rx_completion,
486 .tx_completion = comp_tx_completion,
487 },
488};
489
490static int __init most_cdev_init(void)
491{
492 int err;
493
494 comp.class = class_create(name: "most_cdev");
495 if (IS_ERR(ptr: comp.class))
496 return PTR_ERR(ptr: comp.class);
497
498 ida_init(ida: &comp.minor_id);
499
500 err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
501 if (err < 0)
502 goto dest_ida;
503 comp.major = MAJOR(comp.devno);
504 err = most_register_component(comp: &comp.cc);
505 if (err)
506 goto free_cdev;
507 err = most_register_configfs_subsys(comp: &comp.cc);
508 if (err)
509 goto deregister_comp;
510 return 0;
511
512deregister_comp:
513 most_deregister_component(comp: &comp.cc);
514free_cdev:
515 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
516dest_ida:
517 ida_destroy(ida: &comp.minor_id);
518 class_destroy(cls: comp.class);
519 return err;
520}
521
522static void __exit most_cdev_exit(void)
523{
524 struct comp_channel *c, *tmp;
525
526 most_deregister_configfs_subsys(comp: &comp.cc);
527 most_deregister_component(comp: &comp.cc);
528
529 list_for_each_entry_safe(c, tmp, &channel_list, list) {
530 destroy_cdev(c);
531 destroy_channel(c);
532 }
533 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
534 ida_destroy(ida: &comp.minor_id);
535 class_destroy(cls: comp.class);
536}
537
538module_init(most_cdev_init);
539module_exit(most_cdev_exit);
540MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
541MODULE_LICENSE("GPL");
542MODULE_DESCRIPTION("character device component for mostcore");
543

source code of linux/drivers/most/most_cdev.c