1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * netup_unidvb_core.c
4 *
5 * Main module for NetUP Universal Dual DVB-CI
6 *
7 * Copyright (C) 2014 NetUP Inc.
8 * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
9 * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kmod.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/list.h>
21#include <media/videobuf2-v4l2.h>
22#include <media/videobuf2-vmalloc.h>
23
24#include "netup_unidvb.h"
25#include "cxd2841er.h"
26#include "horus3a.h"
27#include "ascot2e.h"
28#include "helene.h"
29#include "lnbh25.h"
30
31static int spi_enable;
32module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
33
34MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
35MODULE_AUTHOR("info@netup.ru");
36MODULE_VERSION(NETUP_UNIDVB_VERSION);
37MODULE_LICENSE("GPL");
38
39DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
40
41/* Avalon-MM PCI-E registers */
42#define AVL_PCIE_IENR 0x50
43#define AVL_PCIE_ISR 0x40
44#define AVL_IRQ_ENABLE 0x80
45#define AVL_IRQ_ASSERTED 0x80
46/* GPIO registers */
47#define GPIO_REG_IO 0x4880
48#define GPIO_REG_IO_TOGGLE 0x4882
49#define GPIO_REG_IO_SET 0x4884
50#define GPIO_REG_IO_CLEAR 0x4886
51/* GPIO bits */
52#define GPIO_FEA_RESET (1 << 0)
53#define GPIO_FEB_RESET (1 << 1)
54#define GPIO_RFA_CTL (1 << 2)
55#define GPIO_RFB_CTL (1 << 3)
56#define GPIO_FEA_TU_RESET (1 << 4)
57#define GPIO_FEB_TU_RESET (1 << 5)
58/* DMA base address */
59#define NETUP_DMA0_ADDR 0x4900
60#define NETUP_DMA1_ADDR 0x4940
61/* 8 DMA blocks * 128 packets * 188 bytes*/
62#define NETUP_DMA_BLOCKS_COUNT 8
63#define NETUP_DMA_PACKETS_COUNT 128
64/* DMA status bits */
65#define BIT_DMA_RUN 1
66#define BIT_DMA_ERROR 2
67#define BIT_DMA_IRQ 0x200
68
69/**
70 * struct netup_dma_regs - the map of DMA module registers
71 * @ctrlstat_set: Control register, write to set control bits
72 * @ctrlstat_clear: Control register, write to clear control bits
73 * @start_addr_lo: DMA ring buffer start address, lower part
74 * @start_addr_hi: DMA ring buffer start address, higher part
75 * @size: DMA ring buffer size register
76 * * Bits [0-7]: DMA packet size, 188 bytes
77 * * Bits [16-23]: packets count in block, 128 packets
78 * * Bits [24-31]: blocks count, 8 blocks
79 * @timeout: DMA timeout in units of 8ns
80 * For example, value of 375000000 equals to 3 sec
81 * @curr_addr_lo: Current ring buffer head address, lower part
82 * @curr_addr_hi: Current ring buffer head address, higher part
83 * @stat_pkt_received: Statistic register, not tested
84 * @stat_pkt_accepted: Statistic register, not tested
85 * @stat_pkt_overruns: Statistic register, not tested
86 * @stat_pkt_underruns: Statistic register, not tested
87 * @stat_fifo_overruns: Statistic register, not tested
88 */
89struct netup_dma_regs {
90 __le32 ctrlstat_set;
91 __le32 ctrlstat_clear;
92 __le32 start_addr_lo;
93 __le32 start_addr_hi;
94 __le32 size;
95 __le32 timeout;
96 __le32 curr_addr_lo;
97 __le32 curr_addr_hi;
98 __le32 stat_pkt_received;
99 __le32 stat_pkt_accepted;
100 __le32 stat_pkt_overruns;
101 __le32 stat_pkt_underruns;
102 __le32 stat_fifo_overruns;
103} __packed __aligned(1);
104
105struct netup_unidvb_buffer {
106 struct vb2_v4l2_buffer vb;
107 struct list_head list;
108 u32 size;
109};
110
111static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
112static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
113
114static struct cxd2841er_config demod_config = {
115 .i2c_addr = 0xc8,
116 .xtal = SONY_XTAL_24000,
117 .flags = CXD2841ER_USE_GATECTRL | CXD2841ER_ASCOT
118};
119
120static struct horus3a_config horus3a_conf = {
121 .i2c_address = 0xc0,
122 .xtal_freq_mhz = 16,
123 .set_tuner_callback = netup_unidvb_tuner_ctrl
124};
125
126static struct ascot2e_config ascot2e_conf = {
127 .i2c_address = 0xc2,
128 .set_tuner_callback = netup_unidvb_tuner_ctrl
129};
130
131static struct helene_config helene_conf = {
132 .i2c_address = 0xc0,
133 .xtal = SONY_HELENE_XTAL_24000,
134 .set_tuner_callback = netup_unidvb_tuner_ctrl
135};
136
137static struct lnbh25_config lnbh25_conf = {
138 .i2c_address = 0x10,
139 .data2_config = LNBH25_TEN | LNBH25_EXTM
140};
141
142static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
143{
144 u8 reg, mask;
145 struct netup_dma *dma = priv;
146 struct netup_unidvb_dev *ndev;
147
148 if (!priv)
149 return -EINVAL;
150 ndev = dma->ndev;
151 dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
152 __func__, dma->num, is_dvb_tc);
153 reg = readb(addr: ndev->bmmio0 + GPIO_REG_IO);
154 mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
155
156 /* inverted tuner control in hw rev. 1.4 */
157 if (ndev->rev == NETUP_HW_REV_1_4)
158 is_dvb_tc = !is_dvb_tc;
159
160 if (!is_dvb_tc)
161 reg |= mask;
162 else
163 reg &= ~mask;
164 writeb(val: reg, addr: ndev->bmmio0 + GPIO_REG_IO);
165 return 0;
166}
167
168static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
169{
170 u16 gpio_reg;
171
172 /* enable PCI-E interrupts */
173 writel(AVL_IRQ_ENABLE, addr: ndev->bmmio0 + AVL_PCIE_IENR);
174 /* unreset frontends bits[0:1] */
175 writeb(val: 0x00, addr: ndev->bmmio0 + GPIO_REG_IO);
176 msleep(msecs: 100);
177 gpio_reg =
178 GPIO_FEA_RESET | GPIO_FEB_RESET |
179 GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
180 GPIO_RFA_CTL | GPIO_RFB_CTL;
181 writeb(val: gpio_reg, addr: ndev->bmmio0 + GPIO_REG_IO);
182 dev_dbg(&ndev->pci_dev->dev,
183 "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
184 __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
185 (int)readb(ndev->bmmio0 + GPIO_REG_IO));
186
187}
188
189static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
190{
191 u32 irq_mask = (dma->num == 0 ?
192 NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
193
194 dev_dbg(&dma->ndev->pci_dev->dev,
195 "%s(): DMA%d enable %d\n", __func__, dma->num, enable);
196 if (enable) {
197 writel(BIT_DMA_RUN, addr: &dma->regs->ctrlstat_set);
198 writew(val: irq_mask, addr: dma->ndev->bmmio0 + REG_IMASK_SET);
199 } else {
200 writel(BIT_DMA_RUN, addr: &dma->regs->ctrlstat_clear);
201 writew(val: irq_mask, addr: dma->ndev->bmmio0 + REG_IMASK_CLEAR);
202 }
203}
204
205static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
206{
207 u64 addr_curr;
208 u32 size;
209 unsigned long flags;
210 struct device *dev = &dma->ndev->pci_dev->dev;
211
212 spin_lock_irqsave(&dma->lock, flags);
213 addr_curr = ((u64)readl(addr: &dma->regs->curr_addr_hi) << 32) |
214 (u64)readl(addr: &dma->regs->curr_addr_lo) | dma->high_addr;
215 /* clear IRQ */
216 writel(BIT_DMA_IRQ, addr: &dma->regs->ctrlstat_clear);
217 /* sanity check */
218 if (addr_curr < dma->addr_phys ||
219 addr_curr > dma->addr_phys + dma->ring_buffer_size) {
220 if (addr_curr != 0) {
221 dev_err(dev,
222 "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
223 __func__, addr_curr, (u64)dma->addr_phys,
224 (u64)(dma->addr_phys + dma->ring_buffer_size));
225 }
226 goto irq_handled;
227 }
228 size = (addr_curr >= dma->addr_last) ?
229 (u32)(addr_curr - dma->addr_last) :
230 (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
231 if (dma->data_size != 0) {
232 printk_ratelimited("%s(): lost interrupt, data size %d\n",
233 __func__, dma->data_size);
234 dma->data_size += size;
235 }
236 if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
237 dma->data_size = size;
238 dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
239 }
240 dma->addr_last = addr_curr;
241 queue_work(wq: dma->ndev->wq, work: &dma->work);
242irq_handled:
243 spin_unlock_irqrestore(lock: &dma->lock, flags);
244 return IRQ_HANDLED;
245}
246
247static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
248{
249 struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
250 struct netup_unidvb_dev *ndev = pci_get_drvdata(pdev: pci_dev);
251 u32 reg40, reg_isr;
252 irqreturn_t iret = IRQ_NONE;
253
254 /* disable interrupts */
255 writel(val: 0, addr: ndev->bmmio0 + AVL_PCIE_IENR);
256 /* check IRQ source */
257 reg40 = readl(addr: ndev->bmmio0 + AVL_PCIE_ISR);
258 if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
259 /* IRQ is being signaled */
260 reg_isr = readw(addr: ndev->bmmio0 + REG_ISR);
261 if (reg_isr & NETUP_UNIDVB_IRQ_SPI)
262 iret = netup_spi_interrupt(spi: ndev->spi);
263 else if (!ndev->old_fw) {
264 if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
265 iret = netup_i2c_interrupt(i2c: &ndev->i2c[0]);
266 } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
267 iret = netup_i2c_interrupt(i2c: &ndev->i2c[1]);
268 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
269 iret = netup_dma_interrupt(dma: &ndev->dma[0]);
270 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
271 iret = netup_dma_interrupt(dma: &ndev->dma[1]);
272 } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
273 iret = netup_ci_interrupt(ndev);
274 } else {
275 goto err;
276 }
277 } else {
278err:
279 dev_err(&pci_dev->dev,
280 "%s(): unknown interrupt 0x%x\n",
281 __func__, reg_isr);
282 }
283 }
284 /* re-enable interrupts */
285 writel(AVL_IRQ_ENABLE, addr: ndev->bmmio0 + AVL_PCIE_IENR);
286 return iret;
287}
288
289static int netup_unidvb_queue_setup(struct vb2_queue *vq,
290 unsigned int *nbuffers,
291 unsigned int *nplanes,
292 unsigned int sizes[],
293 struct device *alloc_devs[])
294{
295 struct netup_dma *dma = vb2_get_drv_priv(q: vq);
296 unsigned int q_num_bufs = vb2_get_num_buffers(q: vq);
297
298 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
299
300 *nplanes = 1;
301 if (q_num_bufs + *nbuffers < VIDEO_MAX_FRAME)
302 *nbuffers = VIDEO_MAX_FRAME - q_num_bufs;
303 sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
304 dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
305 __func__, *nbuffers, sizes[0]);
306 return 0;
307}
308
309static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
310{
311 struct netup_dma *dma = vb2_get_drv_priv(q: vb->vb2_queue);
312 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
313 struct netup_unidvb_buffer *buf = container_of(vbuf,
314 struct netup_unidvb_buffer, vb);
315
316 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
317 buf->size = 0;
318 return 0;
319}
320
321static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
322{
323 unsigned long flags;
324 struct netup_dma *dma = vb2_get_drv_priv(q: vb->vb2_queue);
325 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
326 struct netup_unidvb_buffer *buf = container_of(vbuf,
327 struct netup_unidvb_buffer, vb);
328
329 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
330 spin_lock_irqsave(&dma->lock, flags);
331 list_add_tail(new: &buf->list, head: &dma->free_buffers);
332 spin_unlock_irqrestore(lock: &dma->lock, flags);
333 mod_timer(timer: &dma->timeout, expires: jiffies + msecs_to_jiffies(m: 1000));
334}
335
336static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
337{
338 struct netup_dma *dma = vb2_get_drv_priv(q);
339
340 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
341 netup_unidvb_dma_enable(dma, enable: 1);
342 return 0;
343}
344
345static void netup_unidvb_stop_streaming(struct vb2_queue *q)
346{
347 struct netup_dma *dma = vb2_get_drv_priv(q);
348
349 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
350 netup_unidvb_dma_enable(dma, enable: 0);
351 netup_unidvb_queue_cleanup(dma);
352}
353
354static const struct vb2_ops dvb_qops = {
355 .queue_setup = netup_unidvb_queue_setup,
356 .buf_prepare = netup_unidvb_buf_prepare,
357 .buf_queue = netup_unidvb_buf_queue,
358 .start_streaming = netup_unidvb_start_streaming,
359 .stop_streaming = netup_unidvb_stop_streaming,
360};
361
362static int netup_unidvb_queue_init(struct netup_dma *dma,
363 struct vb2_queue *vb_queue)
364{
365 int res;
366
367 /* Init videobuf2 queue structure */
368 vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
369 vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
370 vb_queue->drv_priv = dma;
371 vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
372 vb_queue->ops = &dvb_qops;
373 vb_queue->mem_ops = &vb2_vmalloc_memops;
374 vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
375 res = vb2_queue_init(q: vb_queue);
376 if (res != 0) {
377 dev_err(&dma->ndev->pci_dev->dev,
378 "%s(): vb2_queue_init failed (%d)\n", __func__, res);
379 }
380 return res;
381}
382
383static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
384 int num)
385{
386 int fe_count = 2;
387 int i = 0;
388 struct vb2_dvb_frontend *fes[2];
389 u8 fe_name[32];
390
391 if (ndev->rev == NETUP_HW_REV_1_3)
392 demod_config.xtal = SONY_XTAL_20500;
393 else
394 demod_config.xtal = SONY_XTAL_24000;
395
396 if (num < 0 || num > 1) {
397 dev_dbg(&ndev->pci_dev->dev,
398 "%s(): unable to init DVB bus %d\n", __func__, num);
399 return -ENODEV;
400 }
401 mutex_init(&ndev->frontends[num].lock);
402 INIT_LIST_HEAD(list: &ndev->frontends[num].felist);
403
404 for (i = 0; i < fe_count; i++) {
405 if (vb2_dvb_alloc_frontend(f: &ndev->frontends[num], id: i+1)
406 == NULL) {
407 dev_err(&ndev->pci_dev->dev,
408 "%s(): unable to allocate vb2_dvb_frontend\n",
409 __func__);
410 return -ENOMEM;
411 }
412 }
413
414 for (i = 0; i < fe_count; i++) {
415 fes[i] = vb2_dvb_get_frontend(f: &ndev->frontends[num], id: i+1);
416 if (fes[i] == NULL) {
417 dev_err(&ndev->pci_dev->dev,
418 "%s(): frontends has not been allocated\n",
419 __func__);
420 return -EINVAL;
421 }
422 }
423
424 for (i = 0; i < fe_count; i++) {
425 netup_unidvb_queue_init(dma: &ndev->dma[num], vb_queue: &fes[i]->dvb.dvbq);
426 snprintf(buf: fe_name, size: sizeof(fe_name), fmt: "netup_fe%d", i);
427 fes[i]->dvb.name = fe_name;
428 }
429
430 fes[0]->dvb.frontend = dvb_attach(cxd2841er_attach_s,
431 &demod_config, &ndev->i2c[num].adap);
432 if (fes[0]->dvb.frontend == NULL) {
433 dev_dbg(&ndev->pci_dev->dev,
434 "%s(): unable to attach DVB-S/S2 frontend\n",
435 __func__);
436 goto frontend_detach;
437 }
438
439 if (ndev->rev == NETUP_HW_REV_1_3) {
440 horus3a_conf.set_tuner_priv = &ndev->dma[num];
441 if (!dvb_attach(horus3a_attach, fes[0]->dvb.frontend,
442 &horus3a_conf, &ndev->i2c[num].adap)) {
443 dev_dbg(&ndev->pci_dev->dev,
444 "%s(): unable to attach HORUS3A DVB-S/S2 tuner frontend\n",
445 __func__);
446 goto frontend_detach;
447 }
448 } else {
449 helene_conf.set_tuner_priv = &ndev->dma[num];
450 if (!dvb_attach(helene_attach_s, fes[0]->dvb.frontend,
451 &helene_conf, &ndev->i2c[num].adap)) {
452 dev_err(&ndev->pci_dev->dev,
453 "%s(): unable to attach HELENE DVB-S/S2 tuner frontend\n",
454 __func__);
455 goto frontend_detach;
456 }
457 }
458
459 if (!dvb_attach(lnbh25_attach, fes[0]->dvb.frontend,
460 &lnbh25_conf, &ndev->i2c[num].adap)) {
461 dev_dbg(&ndev->pci_dev->dev,
462 "%s(): unable to attach SEC frontend\n", __func__);
463 goto frontend_detach;
464 }
465
466 /* DVB-T/T2 frontend */
467 fes[1]->dvb.frontend = dvb_attach(cxd2841er_attach_t_c,
468 &demod_config, &ndev->i2c[num].adap);
469 if (fes[1]->dvb.frontend == NULL) {
470 dev_dbg(&ndev->pci_dev->dev,
471 "%s(): unable to attach Ter frontend\n", __func__);
472 goto frontend_detach;
473 }
474 fes[1]->dvb.frontend->id = 1;
475 if (ndev->rev == NETUP_HW_REV_1_3) {
476 ascot2e_conf.set_tuner_priv = &ndev->dma[num];
477 if (!dvb_attach(ascot2e_attach, fes[1]->dvb.frontend,
478 &ascot2e_conf, &ndev->i2c[num].adap)) {
479 dev_dbg(&ndev->pci_dev->dev,
480 "%s(): unable to attach Ter tuner frontend\n",
481 __func__);
482 goto frontend_detach;
483 }
484 } else {
485 helene_conf.set_tuner_priv = &ndev->dma[num];
486 if (!dvb_attach(helene_attach, fes[1]->dvb.frontend,
487 &helene_conf, &ndev->i2c[num].adap)) {
488 dev_err(&ndev->pci_dev->dev,
489 "%s(): unable to attach HELENE Ter tuner frontend\n",
490 __func__);
491 goto frontend_detach;
492 }
493 }
494
495 if (vb2_dvb_register_bus(f: &ndev->frontends[num],
496 THIS_MODULE, NULL,
497 device: &ndev->pci_dev->dev, NULL, adapter_nr, mfe_shared: 1)) {
498 dev_dbg(&ndev->pci_dev->dev,
499 "%s(): unable to register DVB bus %d\n",
500 __func__, num);
501 goto frontend_detach;
502 }
503 dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
504 return 0;
505frontend_detach:
506 vb2_dvb_dealloc_frontends(f: &ndev->frontends[num]);
507 return -EINVAL;
508}
509
510static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
511{
512 if (num < 0 || num > 1) {
513 dev_err(&ndev->pci_dev->dev,
514 "%s(): unable to unregister DVB bus %d\n",
515 __func__, num);
516 return;
517 }
518 vb2_dvb_unregister_bus(f: &ndev->frontends[num]);
519 dev_info(&ndev->pci_dev->dev,
520 "%s(): DVB bus %d unregistered\n", __func__, num);
521}
522
523static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
524{
525 int res;
526
527 res = netup_unidvb_dvb_init(ndev, num: 0);
528 if (res)
529 return res;
530 res = netup_unidvb_dvb_init(ndev, num: 1);
531 if (res) {
532 netup_unidvb_dvb_fini(ndev, num: 0);
533 return res;
534 }
535 return 0;
536}
537
538static int netup_unidvb_ring_copy(struct netup_dma *dma,
539 struct netup_unidvb_buffer *buf)
540{
541 u32 copy_bytes, ring_bytes;
542 u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
543 u8 *p = vb2_plane_vaddr(vb: &buf->vb.vb2_buf, plane_no: 0);
544 struct netup_unidvb_dev *ndev = dma->ndev;
545
546 if (p == NULL) {
547 dev_err(&ndev->pci_dev->dev,
548 "%s(): buffer is NULL\n", __func__);
549 return -EINVAL;
550 }
551 p += buf->size;
552 if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
553 ring_bytes = dma->ring_buffer_size - dma->data_offset;
554 copy_bytes = (ring_bytes > buff_bytes) ?
555 buff_bytes : ring_bytes;
556 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
557 p += copy_bytes;
558 buf->size += copy_bytes;
559 buff_bytes -= copy_bytes;
560 dma->data_size -= copy_bytes;
561 dma->data_offset += copy_bytes;
562 if (dma->data_offset == dma->ring_buffer_size)
563 dma->data_offset = 0;
564 }
565 if (buff_bytes > 0) {
566 ring_bytes = dma->data_size;
567 copy_bytes = (ring_bytes > buff_bytes) ?
568 buff_bytes : ring_bytes;
569 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
570 buf->size += copy_bytes;
571 dma->data_size -= copy_bytes;
572 dma->data_offset += copy_bytes;
573 if (dma->data_offset == dma->ring_buffer_size)
574 dma->data_offset = 0;
575 }
576 return 0;
577}
578
579static void netup_unidvb_dma_worker(struct work_struct *work)
580{
581 struct netup_dma *dma = container_of(work, struct netup_dma, work);
582 struct netup_unidvb_dev *ndev = dma->ndev;
583 struct netup_unidvb_buffer *buf;
584 unsigned long flags;
585
586 spin_lock_irqsave(&dma->lock, flags);
587 if (dma->data_size == 0) {
588 dev_dbg(&ndev->pci_dev->dev,
589 "%s(): data_size == 0\n", __func__);
590 goto work_done;
591 }
592 while (dma->data_size > 0) {
593 if (list_empty(head: &dma->free_buffers)) {
594 dev_dbg(&ndev->pci_dev->dev,
595 "%s(): no free buffers\n", __func__);
596 goto work_done;
597 }
598 buf = list_first_entry(&dma->free_buffers,
599 struct netup_unidvb_buffer, list);
600 if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
601 dev_dbg(&ndev->pci_dev->dev,
602 "%s(): buffer overflow, size %d\n",
603 __func__, buf->size);
604 goto work_done;
605 }
606 if (netup_unidvb_ring_copy(dma, buf))
607 goto work_done;
608 if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
609 list_del(entry: &buf->list);
610 dev_dbg(&ndev->pci_dev->dev,
611 "%s(): buffer %p done, size %d\n",
612 __func__, buf, buf->size);
613 buf->vb.vb2_buf.timestamp = ktime_get_ns();
614 vb2_set_plane_payload(vb: &buf->vb.vb2_buf, plane_no: 0, size: buf->size);
615 vb2_buffer_done(vb: &buf->vb.vb2_buf, state: VB2_BUF_STATE_DONE);
616 }
617 }
618work_done:
619 dma->data_size = 0;
620 spin_unlock_irqrestore(lock: &dma->lock, flags);
621}
622
623static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
624{
625 struct netup_unidvb_buffer *buf;
626 unsigned long flags;
627
628 spin_lock_irqsave(&dma->lock, flags);
629 while (!list_empty(head: &dma->free_buffers)) {
630 buf = list_first_entry(&dma->free_buffers,
631 struct netup_unidvb_buffer, list);
632 list_del(entry: &buf->list);
633 vb2_buffer_done(vb: &buf->vb.vb2_buf, state: VB2_BUF_STATE_ERROR);
634 }
635 spin_unlock_irqrestore(lock: &dma->lock, flags);
636}
637
638static void netup_unidvb_dma_timeout(struct timer_list *t)
639{
640 struct netup_dma *dma = from_timer(dma, t, timeout);
641 struct netup_unidvb_dev *ndev = dma->ndev;
642
643 dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
644 netup_unidvb_queue_cleanup(dma);
645}
646
647static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
648{
649 struct netup_dma *dma;
650 struct device *dev = &ndev->pci_dev->dev;
651
652 if (num < 0 || num > 1) {
653 dev_err(dev, "%s(): unable to register DMA%d\n",
654 __func__, num);
655 return -ENODEV;
656 }
657 dma = &ndev->dma[num];
658 dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
659 dma->num = num;
660 dma->ndev = ndev;
661 spin_lock_init(&dma->lock);
662 INIT_WORK(&dma->work, netup_unidvb_dma_worker);
663 INIT_LIST_HEAD(list: &dma->free_buffers);
664 timer_setup(&dma->timeout, netup_unidvb_dma_timeout, 0);
665 dma->ring_buffer_size = ndev->dma_size / 2;
666 dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
667 dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
668 dma->ring_buffer_size * num);
669 dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
670 __func__, num, dma->addr_virt,
671 (unsigned long long)dma->addr_phys,
672 dma->ring_buffer_size);
673 memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
674 dma->addr_last = dma->addr_phys;
675 dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
676 dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
677 ndev->bmmio0 + NETUP_DMA0_ADDR :
678 ndev->bmmio0 + NETUP_DMA1_ADDR);
679 writel(val: (NETUP_DMA_BLOCKS_COUNT << 24) |
680 (NETUP_DMA_PACKETS_COUNT << 8) | 188, addr: &dma->regs->size);
681 writel(val: (u32)(dma->addr_phys & 0x3FFFFFFF), addr: &dma->regs->start_addr_lo);
682 writel(val: 0, addr: &dma->regs->start_addr_hi);
683 writel(val: dma->high_addr, addr: ndev->bmmio0 + 0x1000);
684 writel(val: 375000000, addr: &dma->regs->timeout);
685 msleep(msecs: 1000);
686 writel(BIT_DMA_IRQ, addr: &dma->regs->ctrlstat_clear);
687 return 0;
688}
689
690static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
691{
692 struct netup_dma *dma;
693
694 if (num < 0 || num > 1)
695 return;
696 dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
697 dma = &ndev->dma[num];
698 netup_unidvb_dma_enable(dma, enable: 0);
699 msleep(msecs: 50);
700 cancel_work_sync(work: &dma->work);
701 del_timer_sync(timer: &dma->timeout);
702}
703
704static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
705{
706 int res;
707
708 res = netup_unidvb_dma_init(ndev, num: 0);
709 if (res)
710 return res;
711 res = netup_unidvb_dma_init(ndev, num: 1);
712 if (res) {
713 netup_unidvb_dma_fini(ndev, num: 0);
714 return res;
715 }
716 netup_unidvb_dma_enable(dma: &ndev->dma[0], enable: 0);
717 netup_unidvb_dma_enable(dma: &ndev->dma[1], enable: 0);
718 return 0;
719}
720
721static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
722 struct pci_dev *pci_dev)
723{
724 int res;
725
726 writew(NETUP_UNIDVB_IRQ_CI, addr: ndev->bmmio0 + REG_IMASK_SET);
727 res = netup_unidvb_ci_register(dev: ndev, num: 0, pci_dev);
728 if (res)
729 return res;
730 res = netup_unidvb_ci_register(dev: ndev, num: 1, pci_dev);
731 if (res)
732 netup_unidvb_ci_unregister(dev: ndev, num: 0);
733 return res;
734}
735
736static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
737{
738 if (!request_mem_region(pci_resource_start(pci_dev, 0),
739 pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
740 dev_err(&pci_dev->dev,
741 "%s(): unable to request MMIO bar 0 at 0x%llx\n",
742 __func__,
743 (unsigned long long)pci_resource_start(pci_dev, 0));
744 return -EBUSY;
745 }
746 if (!request_mem_region(pci_resource_start(pci_dev, 1),
747 pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
748 dev_err(&pci_dev->dev,
749 "%s(): unable to request MMIO bar 1 at 0x%llx\n",
750 __func__,
751 (unsigned long long)pci_resource_start(pci_dev, 1));
752 release_mem_region(pci_resource_start(pci_dev, 0),
753 pci_resource_len(pci_dev, 0));
754 return -EBUSY;
755 }
756 return 0;
757}
758
759static int netup_unidvb_request_modules(struct device *dev)
760{
761 static const char * const modules[] = {
762 "lnbh25", "ascot2e", "horus3a", "cxd2841er", "helene", NULL
763 };
764 const char * const *curr_mod = modules;
765 int err;
766
767 while (*curr_mod != NULL) {
768 err = request_module(*curr_mod);
769 if (err) {
770 dev_warn(dev, "request_module(%s) failed: %d\n",
771 *curr_mod, err);
772 }
773 ++curr_mod;
774 }
775 return 0;
776}
777
778static int netup_unidvb_initdev(struct pci_dev *pci_dev,
779 const struct pci_device_id *pci_id)
780{
781 u8 board_revision;
782 u16 board_vendor;
783 struct netup_unidvb_dev *ndev;
784 int old_firmware = 0;
785
786 netup_unidvb_request_modules(dev: &pci_dev->dev);
787
788 /* Check card revision */
789 if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
790 dev_err(&pci_dev->dev,
791 "netup_unidvb: expected card revision %d, got %d\n",
792 NETUP_PCI_DEV_REVISION, pci_dev->revision);
793 dev_err(&pci_dev->dev,
794 "Please upgrade firmware!\n");
795 dev_err(&pci_dev->dev,
796 "Instructions on http://www.netup.tv\n");
797 old_firmware = 1;
798 spi_enable = 1;
799 }
800
801 /* allocate device context */
802 ndev = kzalloc(size: sizeof(*ndev), GFP_KERNEL);
803 if (!ndev)
804 goto dev_alloc_err;
805
806 /* detect hardware revision */
807 if (pci_dev->device == NETUP_HW_REV_1_3)
808 ndev->rev = NETUP_HW_REV_1_3;
809 else
810 ndev->rev = NETUP_HW_REV_1_4;
811
812 dev_info(&pci_dev->dev,
813 "%s(): board (0x%x) hardware revision 0x%x\n",
814 __func__, pci_dev->device, ndev->rev);
815
816 ndev->old_fw = old_firmware;
817 ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
818 if (!ndev->wq) {
819 dev_err(&pci_dev->dev,
820 "%s(): unable to create workqueue\n", __func__);
821 goto wq_create_err;
822 }
823 ndev->pci_dev = pci_dev;
824 ndev->pci_bus = pci_dev->bus->number;
825 ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
826 ndev->pci_func = PCI_FUNC(pci_dev->devfn);
827 ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
828 pci_set_drvdata(pdev: pci_dev, data: ndev);
829 /* PCI init */
830 dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
831 __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
832
833 if (pci_enable_device(dev: pci_dev)) {
834 dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
835 __func__);
836 goto pci_enable_err;
837 }
838 /* read PCI info */
839 pci_read_config_byte(dev: pci_dev, PCI_CLASS_REVISION, val: &board_revision);
840 pci_read_config_word(dev: pci_dev, PCI_VENDOR_ID, val: &board_vendor);
841 if (board_vendor != NETUP_VENDOR_ID) {
842 dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
843 __func__, board_vendor);
844 goto pci_detect_err;
845 }
846 dev_info(&pci_dev->dev,
847 "%s(): board vendor 0x%x, revision 0x%x\n",
848 __func__, board_vendor, board_revision);
849 pci_set_master(dev: pci_dev);
850 if (dma_set_mask(dev: &pci_dev->dev, mask: 0xffffffff) < 0) {
851 dev_err(&pci_dev->dev,
852 "%s(): 32bit PCI DMA is not supported\n", __func__);
853 goto pci_detect_err;
854 }
855 dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
856 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
857 pcie_capability_clear_and_set_word(dev: pci_dev, PCI_EXP_DEVCTL,
858 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
859 PCI_EXP_DEVCTL_NOSNOOP_EN, set: 0);
860 /* Adjust PCIe completion timeout. */
861 pcie_capability_clear_and_set_word(dev: pci_dev,
862 PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_COMP_TIMEOUT, set: 0x2);
863
864 if (netup_unidvb_request_mmio(pci_dev)) {
865 dev_err(&pci_dev->dev,
866 "%s(): unable to request MMIO regions\n", __func__);
867 goto pci_detect_err;
868 }
869 ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
870 pci_resource_len(pci_dev, 0));
871 if (!ndev->lmmio0) {
872 dev_err(&pci_dev->dev,
873 "%s(): unable to remap MMIO bar 0\n", __func__);
874 goto pci_bar0_error;
875 }
876 ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
877 pci_resource_len(pci_dev, 1));
878 if (!ndev->lmmio1) {
879 dev_err(&pci_dev->dev,
880 "%s(): unable to remap MMIO bar 1\n", __func__);
881 goto pci_bar1_error;
882 }
883 ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
884 ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
885 dev_info(&pci_dev->dev,
886 "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
887 __func__,
888 ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
889 ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
890 pci_dev->irq);
891
892 ndev->dma_size = 2 * 188 *
893 NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
894 ndev->dma_virt = dma_alloc_coherent(dev: &pci_dev->dev,
895 size: ndev->dma_size, dma_handle: &ndev->dma_phys, GFP_KERNEL);
896 if (!ndev->dma_virt) {
897 dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
898 __func__);
899 goto dma_alloc_err;
900 }
901 netup_unidvb_dev_enable(ndev);
902 if (spi_enable && netup_spi_init(ndev)) {
903 dev_warn(&pci_dev->dev,
904 "netup_unidvb: SPI flash setup failed\n");
905 goto spi_setup_err;
906 }
907 if (old_firmware) {
908 dev_err(&pci_dev->dev,
909 "netup_unidvb: card initialization was incomplete\n");
910 return 0;
911 }
912 if (netup_i2c_register(ndev)) {
913 dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
914 goto i2c_setup_err;
915 }
916 /* enable I2C IRQs */
917 writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
918 addr: ndev->bmmio0 + REG_IMASK_SET);
919 usleep_range(min: 5000, max: 10000);
920 if (netup_unidvb_dvb_setup(ndev)) {
921 dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
922 goto dvb_setup_err;
923 }
924 if (netup_unidvb_ci_setup(ndev, pci_dev)) {
925 dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
926 goto ci_setup_err;
927 }
928 if (netup_unidvb_dma_setup(ndev)) {
929 dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
930 goto dma_setup_err;
931 }
932
933 if (request_irq(irq: pci_dev->irq, handler: netup_unidvb_isr, IRQF_SHARED,
934 name: "netup_unidvb", dev: pci_dev) < 0) {
935 dev_err(&pci_dev->dev,
936 "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
937 goto dma_setup_err;
938 }
939
940 dev_info(&pci_dev->dev,
941 "netup_unidvb: device has been initialized\n");
942 return 0;
943dma_setup_err:
944 netup_unidvb_ci_unregister(dev: ndev, num: 0);
945 netup_unidvb_ci_unregister(dev: ndev, num: 1);
946ci_setup_err:
947 netup_unidvb_dvb_fini(ndev, num: 0);
948 netup_unidvb_dvb_fini(ndev, num: 1);
949dvb_setup_err:
950 netup_i2c_unregister(ndev);
951i2c_setup_err:
952 if (ndev->spi)
953 netup_spi_release(ndev);
954spi_setup_err:
955 dma_free_coherent(dev: &pci_dev->dev, size: ndev->dma_size,
956 cpu_addr: ndev->dma_virt, dma_handle: ndev->dma_phys);
957dma_alloc_err:
958 iounmap(addr: ndev->lmmio1);
959pci_bar1_error:
960 iounmap(addr: ndev->lmmio0);
961pci_bar0_error:
962 release_mem_region(pci_resource_start(pci_dev, 0),
963 pci_resource_len(pci_dev, 0));
964 release_mem_region(pci_resource_start(pci_dev, 1),
965 pci_resource_len(pci_dev, 1));
966pci_detect_err:
967 pci_disable_device(dev: pci_dev);
968pci_enable_err:
969 pci_set_drvdata(pdev: pci_dev, NULL);
970 destroy_workqueue(wq: ndev->wq);
971wq_create_err:
972 kfree(objp: ndev);
973dev_alloc_err:
974 dev_err(&pci_dev->dev,
975 "%s(): failed to initialize device\n", __func__);
976 return -EIO;
977}
978
979static void netup_unidvb_finidev(struct pci_dev *pci_dev)
980{
981 struct netup_unidvb_dev *ndev = pci_get_drvdata(pdev: pci_dev);
982
983 dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
984 if (!ndev->old_fw) {
985 netup_unidvb_dma_fini(ndev, num: 0);
986 netup_unidvb_dma_fini(ndev, num: 1);
987 netup_unidvb_ci_unregister(dev: ndev, num: 0);
988 netup_unidvb_ci_unregister(dev: ndev, num: 1);
989 netup_unidvb_dvb_fini(ndev, num: 0);
990 netup_unidvb_dvb_fini(ndev, num: 1);
991 netup_i2c_unregister(ndev);
992 }
993 if (ndev->spi)
994 netup_spi_release(ndev);
995 writew(val: 0xffff, addr: ndev->bmmio0 + REG_IMASK_CLEAR);
996 dma_free_coherent(dev: &ndev->pci_dev->dev, size: ndev->dma_size,
997 cpu_addr: ndev->dma_virt, dma_handle: ndev->dma_phys);
998 free_irq(pci_dev->irq, pci_dev);
999 iounmap(addr: ndev->lmmio0);
1000 iounmap(addr: ndev->lmmio1);
1001 release_mem_region(pci_resource_start(pci_dev, 0),
1002 pci_resource_len(pci_dev, 0));
1003 release_mem_region(pci_resource_start(pci_dev, 1),
1004 pci_resource_len(pci_dev, 1));
1005 pci_disable_device(dev: pci_dev);
1006 pci_set_drvdata(pdev: pci_dev, NULL);
1007 destroy_workqueue(wq: ndev->wq);
1008 kfree(objp: ndev);
1009 dev_info(&pci_dev->dev,
1010 "%s(): device has been successfully stopped\n", __func__);
1011}
1012
1013
1014static const struct pci_device_id netup_unidvb_pci_tbl[] = {
1015 { PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */
1016 { PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */
1017 { 0, }
1018};
1019MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
1020
1021static struct pci_driver netup_unidvb_pci_driver = {
1022 .name = "netup_unidvb",
1023 .id_table = netup_unidvb_pci_tbl,
1024 .probe = netup_unidvb_initdev,
1025 .remove = netup_unidvb_finidev,
1026};
1027
1028module_pci_driver(netup_unidvb_pci_driver);
1029

source code of linux/drivers/media/pci/netup_unidvb/netup_unidvb_core.c