1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core
4 *
5 * Copyright (C) 2018 Jacob Feder
6 *
7 * Authors: Jacob Feder <jacobsfeder@gmail.com>
8 *
9 * See Xilinx PG080 document for IP details
10 */
11
12/* ----------------------------
13 * includes
14 * ----------------------------
15 */
16
17#include <linux/kernel.h>
18#include <linux/of.h>
19#include <linux/platform_device.h>
20#include <linux/wait.h>
21#include <linux/mutex.h>
22#include <linux/device.h>
23#include <linux/cdev.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/io.h>
28#include <linux/moduleparam.h>
29#include <linux/interrupt.h>
30#include <linux/param.h>
31#include <linux/fs.h>
32#include <linux/types.h>
33#include <linux/uaccess.h>
34#include <linux/jiffies.h>
35#include <linux/miscdevice.h>
36
37/* ----------------------------
38 * driver parameters
39 * ----------------------------
40 */
41
42#define DRIVER_NAME "axis_fifo"
43
44#define READ_BUF_SIZE 128U /* read buffer length in words */
45#define WRITE_BUF_SIZE 128U /* write buffer length in words */
46
47/* ----------------------------
48 * IP register offsets
49 * ----------------------------
50 */
51
52#define XLLF_ISR_OFFSET 0x00000000 /* Interrupt Status */
53#define XLLF_IER_OFFSET 0x00000004 /* Interrupt Enable */
54
55#define XLLF_TDFR_OFFSET 0x00000008 /* Transmit Reset */
56#define XLLF_TDFV_OFFSET 0x0000000c /* Transmit Vacancy */
57#define XLLF_TDFD_OFFSET 0x00000010 /* Transmit Data */
58#define XLLF_TLR_OFFSET 0x00000014 /* Transmit Length */
59
60#define XLLF_RDFR_OFFSET 0x00000018 /* Receive Reset */
61#define XLLF_RDFO_OFFSET 0x0000001c /* Receive Occupancy */
62#define XLLF_RDFD_OFFSET 0x00000020 /* Receive Data */
63#define XLLF_RLR_OFFSET 0x00000024 /* Receive Length */
64#define XLLF_SRR_OFFSET 0x00000028 /* Local Link Reset */
65#define XLLF_TDR_OFFSET 0x0000002C /* Transmit Destination */
66#define XLLF_RDR_OFFSET 0x00000030 /* Receive Destination */
67
68/* ----------------------------
69 * reset register masks
70 * ----------------------------
71 */
72
73#define XLLF_RDFR_RESET_MASK 0x000000a5 /* receive reset value */
74#define XLLF_TDFR_RESET_MASK 0x000000a5 /* Transmit reset value */
75#define XLLF_SRR_RESET_MASK 0x000000a5 /* Local Link reset value */
76
77/* ----------------------------
78 * interrupt masks
79 * ----------------------------
80 */
81
82#define XLLF_INT_RPURE_MASK 0x80000000 /* Receive under-read */
83#define XLLF_INT_RPORE_MASK 0x40000000 /* Receive over-read */
84#define XLLF_INT_RPUE_MASK 0x20000000 /* Receive underrun (empty) */
85#define XLLF_INT_TPOE_MASK 0x10000000 /* Transmit overrun */
86#define XLLF_INT_TC_MASK 0x08000000 /* Transmit complete */
87#define XLLF_INT_RC_MASK 0x04000000 /* Receive complete */
88#define XLLF_INT_TSE_MASK 0x02000000 /* Transmit length mismatch */
89#define XLLF_INT_TRC_MASK 0x01000000 /* Transmit reset complete */
90#define XLLF_INT_RRC_MASK 0x00800000 /* Receive reset complete */
91#define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */
92#define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */
93#define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */
94#define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */
95#define XLLF_INT_ALL_MASK 0xfff80000 /* All the ints */
96#define XLLF_INT_ERROR_MASK 0xf2000000 /* Error status ints */
97#define XLLF_INT_RXERROR_MASK 0xe0000000 /* Receive Error status ints */
98#define XLLF_INT_TXERROR_MASK 0x12000000 /* Transmit Error status ints */
99
100/* ----------------------------
101 * globals
102 * ----------------------------
103 */
104static long read_timeout = 1000; /* ms to wait before read() times out */
105static long write_timeout = 1000; /* ms to wait before write() times out */
106
107/* ----------------------------
108 * module command-line arguments
109 * ----------------------------
110 */
111
112module_param(read_timeout, long, 0444);
113MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout");
114module_param(write_timeout, long, 0444);
115MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout");
116
117/* ----------------------------
118 * types
119 * ----------------------------
120 */
121
122struct axis_fifo {
123 int irq; /* interrupt */
124 void __iomem *base_addr; /* kernel space memory */
125
126 unsigned int rx_fifo_depth; /* max words in the receive fifo */
127 unsigned int tx_fifo_depth; /* max words in the transmit fifo */
128 int has_rx_fifo; /* whether the IP has the rx fifo enabled */
129 int has_tx_fifo; /* whether the IP has the tx fifo enabled */
130
131 wait_queue_head_t read_queue; /* wait queue for asynchronos read */
132 struct mutex read_lock; /* lock for reading */
133 wait_queue_head_t write_queue; /* wait queue for asynchronos write */
134 struct mutex write_lock; /* lock for writing */
135 unsigned int write_flags; /* write file flags */
136 unsigned int read_flags; /* read file flags */
137
138 struct device *dt_device; /* device created from the device tree */
139 struct miscdevice miscdev;
140};
141
142/* ----------------------------
143 * sysfs entries
144 * ----------------------------
145 */
146
147static ssize_t sysfs_write(struct device *dev, const char *buf,
148 size_t count, unsigned int addr_offset)
149{
150 struct axis_fifo *fifo = dev_get_drvdata(dev);
151 unsigned long tmp;
152 int rc;
153
154 rc = kstrtoul(s: buf, base: 0, res: &tmp);
155 if (rc < 0)
156 return rc;
157
158 iowrite32(tmp, fifo->base_addr + addr_offset);
159
160 return count;
161}
162
163static ssize_t sysfs_read(struct device *dev, char *buf,
164 unsigned int addr_offset)
165{
166 struct axis_fifo *fifo = dev_get_drvdata(dev);
167 unsigned int read_val;
168
169 read_val = ioread32(fifo->base_addr + addr_offset);
170 return sysfs_emit(buf, fmt: "0x%x\n", read_val);
171}
172
173static ssize_t isr_store(struct device *dev, struct device_attribute *attr,
174 const char *buf, size_t count)
175{
176 return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET);
177}
178
179static ssize_t isr_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
181{
182 return sysfs_read(dev, buf, XLLF_ISR_OFFSET);
183}
184
185static DEVICE_ATTR_RW(isr);
186
187static ssize_t ier_store(struct device *dev, struct device_attribute *attr,
188 const char *buf, size_t count)
189{
190 return sysfs_write(dev, buf, count, XLLF_IER_OFFSET);
191}
192
193static ssize_t ier_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 return sysfs_read(dev, buf, XLLF_IER_OFFSET);
197}
198
199static DEVICE_ATTR_RW(ier);
200
201static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr,
202 const char *buf, size_t count)
203{
204 return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET);
205}
206
207static DEVICE_ATTR_WO(tdfr);
208
209static ssize_t tdfv_show(struct device *dev,
210 struct device_attribute *attr, char *buf)
211{
212 return sysfs_read(dev, buf, XLLF_TDFV_OFFSET);
213}
214
215static DEVICE_ATTR_RO(tdfv);
216
217static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr,
218 const char *buf, size_t count)
219{
220 return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET);
221}
222
223static DEVICE_ATTR_WO(tdfd);
224
225static ssize_t tlr_store(struct device *dev, struct device_attribute *attr,
226 const char *buf, size_t count)
227{
228 return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET);
229}
230
231static DEVICE_ATTR_WO(tlr);
232
233static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr,
234 const char *buf, size_t count)
235{
236 return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET);
237}
238
239static DEVICE_ATTR_WO(rdfr);
240
241static ssize_t rdfo_show(struct device *dev,
242 struct device_attribute *attr, char *buf)
243{
244 return sysfs_read(dev, buf, XLLF_RDFO_OFFSET);
245}
246
247static DEVICE_ATTR_RO(rdfo);
248
249static ssize_t rdfd_show(struct device *dev,
250 struct device_attribute *attr, char *buf)
251{
252 return sysfs_read(dev, buf, XLLF_RDFD_OFFSET);
253}
254
255static DEVICE_ATTR_RO(rdfd);
256
257static ssize_t rlr_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 return sysfs_read(dev, buf, XLLF_RLR_OFFSET);
261}
262
263static DEVICE_ATTR_RO(rlr);
264
265static ssize_t srr_store(struct device *dev, struct device_attribute *attr,
266 const char *buf, size_t count)
267{
268 return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET);
269}
270
271static DEVICE_ATTR_WO(srr);
272
273static ssize_t tdr_store(struct device *dev, struct device_attribute *attr,
274 const char *buf, size_t count)
275{
276 return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET);
277}
278
279static DEVICE_ATTR_WO(tdr);
280
281static ssize_t rdr_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 return sysfs_read(dev, buf, XLLF_RDR_OFFSET);
285}
286
287static DEVICE_ATTR_RO(rdr);
288
289static struct attribute *axis_fifo_attrs[] = {
290 &dev_attr_isr.attr,
291 &dev_attr_ier.attr,
292 &dev_attr_tdfr.attr,
293 &dev_attr_tdfv.attr,
294 &dev_attr_tdfd.attr,
295 &dev_attr_tlr.attr,
296 &dev_attr_rdfr.attr,
297 &dev_attr_rdfo.attr,
298 &dev_attr_rdfd.attr,
299 &dev_attr_rlr.attr,
300 &dev_attr_srr.attr,
301 &dev_attr_tdr.attr,
302 &dev_attr_rdr.attr,
303 NULL,
304};
305
306static const struct attribute_group axis_fifo_attrs_group = {
307 .name = "ip_registers",
308 .attrs = axis_fifo_attrs,
309};
310
311static const struct attribute_group *axis_fifo_attrs_groups[] = {
312 &axis_fifo_attrs_group,
313 NULL,
314};
315
316/* ----------------------------
317 * implementation
318 * ----------------------------
319 */
320
321static void reset_ip_core(struct axis_fifo *fifo)
322{
323 iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET);
324 iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET);
325 iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET);
326 iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK |
327 XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK |
328 XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK,
329 fifo->base_addr + XLLF_IER_OFFSET);
330 iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
331}
332
333/**
334 * axis_fifo_read() - Read a packet from AXIS-FIFO character device.
335 * @f: Open file.
336 * @buf: User space buffer to read to.
337 * @len: User space buffer length.
338 * @off: Buffer offset.
339 *
340 * As defined by the device's documentation, we need to check the device's
341 * occupancy before reading the length register and then the data. All these
342 * operations must be executed atomically, in order and one after the other
343 * without missing any.
344 *
345 * Returns the number of bytes read from the device or negative error code
346 * on failure.
347 */
348static ssize_t axis_fifo_read(struct file *f, char __user *buf,
349 size_t len, loff_t *off)
350{
351 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
352 size_t bytes_available;
353 unsigned int words_available;
354 unsigned int copied;
355 unsigned int copy;
356 unsigned int i;
357 int ret;
358 u32 tmp_buf[READ_BUF_SIZE];
359
360 if (fifo->read_flags & O_NONBLOCK) {
361 /*
362 * Device opened in non-blocking mode. Try to lock it and then
363 * check if any packet is available.
364 */
365 if (!mutex_trylock(&fifo->read_lock))
366 return -EAGAIN;
367
368 if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) {
369 ret = -EAGAIN;
370 goto end_unlock;
371 }
372 } else {
373 /* opened in blocking mode
374 * wait for a packet available interrupt (or timeout)
375 * if nothing is currently available
376 */
377 mutex_lock(&fifo->read_lock);
378 ret = wait_event_interruptible_timeout(fifo->read_queue,
379 ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
380 read_timeout);
381
382 if (ret <= 0) {
383 if (ret == 0) {
384 ret = -EAGAIN;
385 } else if (ret != -ERESTARTSYS) {
386 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
387 ret);
388 }
389
390 goto end_unlock;
391 }
392 }
393
394 bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
395 if (!bytes_available) {
396 dev_err(fifo->dt_device, "received a packet of length 0\n");
397 ret = -EIO;
398 goto end_unlock;
399 }
400
401 if (bytes_available > len) {
402 dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n",
403 bytes_available, len);
404 ret = -EINVAL;
405 goto end_unlock;
406 }
407
408 if (bytes_available % sizeof(u32)) {
409 /* this probably can't happen unless IP
410 * registers were previously mishandled
411 */
412 dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n");
413 ret = -EIO;
414 goto end_unlock;
415 }
416
417 words_available = bytes_available / sizeof(u32);
418
419 /* read data into an intermediate buffer, copying the contents
420 * to userspace when the buffer is full
421 */
422 copied = 0;
423 while (words_available > 0) {
424 copy = min(words_available, READ_BUF_SIZE);
425
426 for (i = 0; i < copy; i++) {
427 tmp_buf[i] = ioread32(fifo->base_addr +
428 XLLF_RDFD_OFFSET);
429 }
430
431 if (copy_to_user(to: buf + copied * sizeof(u32), from: tmp_buf,
432 n: copy * sizeof(u32))) {
433 ret = -EFAULT;
434 goto end_unlock;
435 }
436
437 copied += copy;
438 words_available -= copy;
439 }
440
441 ret = bytes_available;
442
443end_unlock:
444 mutex_unlock(lock: &fifo->read_lock);
445
446 return ret;
447}
448
449/**
450 * axis_fifo_write() - Write buffer to AXIS-FIFO character device.
451 * @f: Open file.
452 * @buf: User space buffer to write to the device.
453 * @len: User space buffer length.
454 * @off: Buffer offset.
455 *
456 * As defined by the device's documentation, we need to write to the device's
457 * data buffer then to the device's packet length register atomically. Also,
458 * we need to lock before checking if the device has available space to avoid
459 * any concurrency issue.
460 *
461 * Returns the number of bytes written to the device or negative error code
462 * on failure.
463 */
464static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
465 size_t len, loff_t *off)
466{
467 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
468 unsigned int words_to_write;
469 unsigned int copied;
470 unsigned int copy;
471 unsigned int i;
472 int ret;
473 u32 tmp_buf[WRITE_BUF_SIZE];
474
475 if (len % sizeof(u32)) {
476 dev_err(fifo->dt_device,
477 "tried to send a packet that isn't word-aligned\n");
478 return -EINVAL;
479 }
480
481 words_to_write = len / sizeof(u32);
482
483 if (!words_to_write) {
484 dev_err(fifo->dt_device,
485 "tried to send a packet of length 0\n");
486 return -EINVAL;
487 }
488
489 if (words_to_write > fifo->tx_fifo_depth) {
490 dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n",
491 words_to_write, fifo->tx_fifo_depth);
492 return -EINVAL;
493 }
494
495 if (fifo->write_flags & O_NONBLOCK) {
496 /*
497 * Device opened in non-blocking mode. Try to lock it and then
498 * check if there is any room to write the given buffer.
499 */
500 if (!mutex_trylock(&fifo->write_lock))
501 return -EAGAIN;
502
503 if (words_to_write > ioread32(fifo->base_addr +
504 XLLF_TDFV_OFFSET)) {
505 ret = -EAGAIN;
506 goto end_unlock;
507 }
508 } else {
509 /* opened in blocking mode */
510
511 /* wait for an interrupt (or timeout) if there isn't
512 * currently enough room in the fifo
513 */
514 mutex_lock(&fifo->write_lock);
515 ret = wait_event_interruptible_timeout(fifo->write_queue,
516 ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
517 >= words_to_write,
518 write_timeout);
519
520 if (ret <= 0) {
521 if (ret == 0) {
522 ret = -EAGAIN;
523 } else if (ret != -ERESTARTSYS) {
524 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n",
525 ret);
526 }
527
528 goto end_unlock;
529 }
530 }
531
532 /* write data from an intermediate buffer into the fifo IP, refilling
533 * the buffer with userspace data as needed
534 */
535 copied = 0;
536 while (words_to_write > 0) {
537 copy = min(words_to_write, WRITE_BUF_SIZE);
538
539 if (copy_from_user(to: tmp_buf, from: buf + copied * sizeof(u32),
540 n: copy * sizeof(u32))) {
541 ret = -EFAULT;
542 goto end_unlock;
543 }
544
545 for (i = 0; i < copy; i++)
546 iowrite32(tmp_buf[i], fifo->base_addr +
547 XLLF_TDFD_OFFSET);
548
549 copied += copy;
550 words_to_write -= copy;
551 }
552
553 ret = copied * sizeof(u32);
554
555 /* write packet size to fifo */
556 iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET);
557
558end_unlock:
559 mutex_unlock(lock: &fifo->write_lock);
560
561 return ret;
562}
563
564static irqreturn_t axis_fifo_irq(int irq, void *dw)
565{
566 struct axis_fifo *fifo = (struct axis_fifo *)dw;
567 unsigned int pending_interrupts;
568
569 do {
570 pending_interrupts = ioread32(fifo->base_addr +
571 XLLF_IER_OFFSET) &
572 ioread32(fifo->base_addr
573 + XLLF_ISR_OFFSET);
574 if (pending_interrupts & XLLF_INT_RC_MASK) {
575 /* packet received */
576
577 /* wake the reader process if it is waiting */
578 wake_up(&fifo->read_queue);
579
580 /* clear interrupt */
581 iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK,
582 fifo->base_addr + XLLF_ISR_OFFSET);
583 } else if (pending_interrupts & XLLF_INT_TC_MASK) {
584 /* packet sent */
585
586 /* wake the writer process if it is waiting */
587 wake_up(&fifo->write_queue);
588
589 iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK,
590 fifo->base_addr + XLLF_ISR_OFFSET);
591 } else if (pending_interrupts & XLLF_INT_TFPF_MASK) {
592 /* transmit fifo programmable full */
593
594 iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK,
595 fifo->base_addr + XLLF_ISR_OFFSET);
596 } else if (pending_interrupts & XLLF_INT_TFPE_MASK) {
597 /* transmit fifo programmable empty */
598
599 iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK,
600 fifo->base_addr + XLLF_ISR_OFFSET);
601 } else if (pending_interrupts & XLLF_INT_RFPF_MASK) {
602 /* receive fifo programmable full */
603
604 iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK,
605 fifo->base_addr + XLLF_ISR_OFFSET);
606 } else if (pending_interrupts & XLLF_INT_RFPE_MASK) {
607 /* receive fifo programmable empty */
608
609 iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK,
610 fifo->base_addr + XLLF_ISR_OFFSET);
611 } else if (pending_interrupts & XLLF_INT_TRC_MASK) {
612 /* transmit reset complete interrupt */
613
614 iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK,
615 fifo->base_addr + XLLF_ISR_OFFSET);
616 } else if (pending_interrupts & XLLF_INT_RRC_MASK) {
617 /* receive reset complete interrupt */
618
619 iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK,
620 fifo->base_addr + XLLF_ISR_OFFSET);
621 } else if (pending_interrupts & XLLF_INT_RPURE_MASK) {
622 /* receive fifo under-read error interrupt */
623 dev_err(fifo->dt_device,
624 "receive under-read interrupt\n");
625
626 iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK,
627 fifo->base_addr + XLLF_ISR_OFFSET);
628 } else if (pending_interrupts & XLLF_INT_RPORE_MASK) {
629 /* receive over-read error interrupt */
630 dev_err(fifo->dt_device,
631 "receive over-read interrupt\n");
632
633 iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK,
634 fifo->base_addr + XLLF_ISR_OFFSET);
635 } else if (pending_interrupts & XLLF_INT_RPUE_MASK) {
636 /* receive underrun error interrupt */
637 dev_err(fifo->dt_device,
638 "receive underrun error interrupt\n");
639
640 iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK,
641 fifo->base_addr + XLLF_ISR_OFFSET);
642 } else if (pending_interrupts & XLLF_INT_TPOE_MASK) {
643 /* transmit overrun error interrupt */
644 dev_err(fifo->dt_device,
645 "transmit overrun error interrupt\n");
646
647 iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK,
648 fifo->base_addr + XLLF_ISR_OFFSET);
649 } else if (pending_interrupts & XLLF_INT_TSE_MASK) {
650 /* transmit length mismatch error interrupt */
651 dev_err(fifo->dt_device,
652 "transmit length mismatch error interrupt\n");
653
654 iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK,
655 fifo->base_addr + XLLF_ISR_OFFSET);
656 } else if (pending_interrupts) {
657 /* unknown interrupt type */
658 dev_err(fifo->dt_device,
659 "unknown interrupt(s) 0x%x\n",
660 pending_interrupts);
661
662 iowrite32(XLLF_INT_ALL_MASK,
663 fifo->base_addr + XLLF_ISR_OFFSET);
664 }
665 } while (pending_interrupts);
666
667 return IRQ_HANDLED;
668}
669
670static int axis_fifo_open(struct inode *inod, struct file *f)
671{
672 struct axis_fifo *fifo = container_of(f->private_data,
673 struct axis_fifo, miscdev);
674 f->private_data = fifo;
675
676 if (((f->f_flags & O_ACCMODE) == O_WRONLY) ||
677 ((f->f_flags & O_ACCMODE) == O_RDWR)) {
678 if (fifo->has_tx_fifo) {
679 fifo->write_flags = f->f_flags;
680 } else {
681 dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n");
682 return -EPERM;
683 }
684 }
685
686 if (((f->f_flags & O_ACCMODE) == O_RDONLY) ||
687 ((f->f_flags & O_ACCMODE) == O_RDWR)) {
688 if (fifo->has_rx_fifo) {
689 fifo->read_flags = f->f_flags;
690 } else {
691 dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n");
692 return -EPERM;
693 }
694 }
695
696 return 0;
697}
698
699static int axis_fifo_close(struct inode *inod, struct file *f)
700{
701 f->private_data = NULL;
702
703 return 0;
704}
705
706static const struct file_operations fops = {
707 .owner = THIS_MODULE,
708 .open = axis_fifo_open,
709 .release = axis_fifo_close,
710 .read = axis_fifo_read,
711 .write = axis_fifo_write
712};
713
714/* read named property from the device tree */
715static int get_dts_property(struct axis_fifo *fifo,
716 char *name, unsigned int *var)
717{
718 int rc;
719
720 rc = of_property_read_u32(np: fifo->dt_device->of_node, propname: name, out_value: var);
721 if (rc) {
722 dev_err(fifo->dt_device, "couldn't read IP dts property '%s'",
723 name);
724 return rc;
725 }
726 dev_dbg(fifo->dt_device, "dts property '%s' = %u\n",
727 name, *var);
728
729 return 0;
730}
731
732static int axis_fifo_parse_dt(struct axis_fifo *fifo)
733{
734 int ret;
735 unsigned int value;
736
737 ret = get_dts_property(fifo, name: "xlnx,axi-str-rxd-tdata-width", var: &value);
738 if (ret) {
739 dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n");
740 goto end;
741 } else if (value != 32) {
742 dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n");
743 ret = -EIO;
744 goto end;
745 }
746
747 ret = get_dts_property(fifo, name: "xlnx,axi-str-txd-tdata-width", var: &value);
748 if (ret) {
749 dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n");
750 goto end;
751 } else if (value != 32) {
752 dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n");
753 ret = -EIO;
754 goto end;
755 }
756
757 ret = get_dts_property(fifo, name: "xlnx,rx-fifo-depth",
758 var: &fifo->rx_fifo_depth);
759 if (ret) {
760 dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n");
761 ret = -EIO;
762 goto end;
763 }
764
765 ret = get_dts_property(fifo, name: "xlnx,tx-fifo-depth",
766 var: &fifo->tx_fifo_depth);
767 if (ret) {
768 dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n");
769 ret = -EIO;
770 goto end;
771 }
772
773 ret = get_dts_property(fifo, name: "xlnx,use-rx-data", var: &fifo->has_rx_fifo);
774 if (ret) {
775 dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
776 ret = -EIO;
777 goto end;
778 }
779
780 ret = get_dts_property(fifo, name: "xlnx,use-tx-data", var: &fifo->has_tx_fifo);
781 if (ret) {
782 dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n");
783 ret = -EIO;
784 goto end;
785 }
786
787end:
788 return ret;
789}
790
791static int axis_fifo_probe(struct platform_device *pdev)
792{
793 struct resource *r_mem; /* IO mem resources */
794 struct device *dev = &pdev->dev; /* OS device (from device tree) */
795 struct axis_fifo *fifo = NULL;
796 char *device_name;
797 int rc = 0; /* error return value */
798
799 /* ----------------------------
800 * init wrapper device
801 * ----------------------------
802 */
803
804 device_name = devm_kzalloc(dev, size: 32, GFP_KERNEL);
805 if (!device_name)
806 return -ENOMEM;
807
808 /* allocate device wrapper memory */
809 fifo = devm_kzalloc(dev, size: sizeof(*fifo), GFP_KERNEL);
810 if (!fifo)
811 return -ENOMEM;
812
813 dev_set_drvdata(dev, data: fifo);
814 fifo->dt_device = dev;
815
816 init_waitqueue_head(&fifo->read_queue);
817 init_waitqueue_head(&fifo->write_queue);
818
819 mutex_init(&fifo->read_lock);
820 mutex_init(&fifo->write_lock);
821
822 /* ----------------------------
823 * init device memory space
824 * ----------------------------
825 */
826
827 /* get iospace for the device and request physical memory */
828 fifo->base_addr = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &r_mem);
829 if (IS_ERR(ptr: fifo->base_addr)) {
830 rc = PTR_ERR(ptr: fifo->base_addr);
831 goto err_initial;
832 }
833
834 dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
835
836 /* create unique device name */
837 snprintf(buf: device_name, size: 32, fmt: "%s_%pa", DRIVER_NAME, &r_mem->start);
838 dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
839
840 /* ----------------------------
841 * init IP
842 * ----------------------------
843 */
844
845 rc = axis_fifo_parse_dt(fifo);
846 if (rc)
847 goto err_initial;
848
849 reset_ip_core(fifo);
850
851 /* ----------------------------
852 * init device interrupts
853 * ----------------------------
854 */
855
856 /* get IRQ resource */
857 rc = platform_get_irq(pdev, 0);
858 if (rc < 0)
859 goto err_initial;
860
861 /* request IRQ */
862 fifo->irq = rc;
863 rc = devm_request_irq(dev: fifo->dt_device, irq: fifo->irq, handler: &axis_fifo_irq, irqflags: 0,
864 DRIVER_NAME, dev_id: fifo);
865 if (rc) {
866 dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
867 fifo->irq);
868 goto err_initial;
869 }
870
871 /* ----------------------------
872 * init char device
873 * ----------------------------
874 */
875
876 /* create character device */
877 fifo->miscdev.fops = &fops;
878 fifo->miscdev.minor = MISC_DYNAMIC_MINOR;
879 fifo->miscdev.name = device_name;
880 fifo->miscdev.groups = axis_fifo_attrs_groups;
881 fifo->miscdev.parent = dev;
882 rc = misc_register(misc: &fifo->miscdev);
883 if (rc < 0)
884 goto err_initial;
885
886 return 0;
887
888err_initial:
889 dev_set_drvdata(dev, NULL);
890 return rc;
891}
892
893static void axis_fifo_remove(struct platform_device *pdev)
894{
895 struct device *dev = &pdev->dev;
896 struct axis_fifo *fifo = dev_get_drvdata(dev);
897
898 misc_deregister(misc: &fifo->miscdev);
899 dev_set_drvdata(dev, NULL);
900}
901
902static const struct of_device_id axis_fifo_of_match[] = {
903 { .compatible = "xlnx,axi-fifo-mm-s-4.1", },
904 {},
905};
906MODULE_DEVICE_TABLE(of, axis_fifo_of_match);
907
908static struct platform_driver axis_fifo_driver = {
909 .driver = {
910 .name = DRIVER_NAME,
911 .of_match_table = axis_fifo_of_match,
912 },
913 .probe = axis_fifo_probe,
914 .remove = axis_fifo_remove,
915};
916
917static int __init axis_fifo_init(void)
918{
919 if (read_timeout >= 0)
920 read_timeout = msecs_to_jiffies(m: read_timeout);
921 else
922 read_timeout = MAX_SCHEDULE_TIMEOUT;
923
924 if (write_timeout >= 0)
925 write_timeout = msecs_to_jiffies(m: write_timeout);
926 else
927 write_timeout = MAX_SCHEDULE_TIMEOUT;
928
929 pr_info("axis-fifo driver loaded with parameters read_timeout = %li, write_timeout = %li\n",
930 read_timeout, write_timeout);
931 return platform_driver_register(&axis_fifo_driver);
932}
933
934module_init(axis_fifo_init);
935
936static void __exit axis_fifo_exit(void)
937{
938 platform_driver_unregister(&axis_fifo_driver);
939}
940
941module_exit(axis_fifo_exit);
942
943MODULE_LICENSE("GPL");
944MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>");
945MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver");
946

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/drivers/staging/axis-fifo/axis-fifo.c