1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Driver for the Conexant CX23885 PCIe bridge
4 *
5 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 */
7
8#include "cx23885.h"
9
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kmod.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <asm/div64.h>
21#include <linux/firmware.h>
22
23#include "cimax2.h"
24#include "altera-ci.h"
25#include "cx23888-ir.h"
26#include "cx23885-ir.h"
27#include "cx23885-av.h"
28#include "cx23885-input.h"
29
30MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
31MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
32MODULE_LICENSE("GPL");
33MODULE_VERSION(CX23885_VERSION);
34
35/*
36 * Some platforms have been found to require periodic resetting of the DMA
37 * engine. Ryzen and XEON platforms are known to be affected. The symptom
38 * encountered is "mpeg risc op code error". Only Ryzen platforms employ
39 * this workaround if the option equals 1. The workaround can be explicitly
40 * disabled for all platforms by setting to 0, the workaround can be forced
41 * on for any platform by setting to 2.
42 */
43static unsigned int dma_reset_workaround = 1;
44module_param(dma_reset_workaround, int, 0644);
45MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
46
47static unsigned int debug;
48module_param(debug, int, 0644);
49MODULE_PARM_DESC(debug, "enable debug messages");
50
51static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52module_param_array(card, int, NULL, 0444);
53MODULE_PARM_DESC(card, "card type");
54
55#define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
58 __func__, ##arg); \
59 } while (0)
60
61static unsigned int cx23885_devcount;
62
63#define NO_SYNC_LINE (-1U)
64
65/* FIXME, these allocations will change when
66 * analog arrives. The be reviewed.
67 * CX23887 Assumptions
68 * 1 line = 16 bytes of CDT
69 * cmds size = 80
70 * cdt size = 16 * linesize
71 * iqsize = 64
72 * maxlines = 6
73 *
74 * Address Space:
75 * 0x00000000 0x00008fff FIFO clusters
76 * 0x00010000 0x000104af Channel Management Data Structures
77 * 0x000104b0 0x000104ff Free
78 * 0x00010500 0x000108bf 15 channels * iqsize
79 * 0x000108c0 0x000108ff Free
80 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
81 * 15 channels * (iqsize + (maxlines * linesize))
82 * 0x00010ea0 0x00010xxx Free
83 */
84
85static struct sram_channel cx23885_sram_channels[] = {
86 [SRAM_CH01] = {
87 .name = "VID A",
88 .cmds_start = 0x10000,
89 .ctrl_start = 0x10380,
90 .cdt = 0x104c0,
91 .fifo_start = 0x40,
92 .fifo_size = 0x2800,
93 .ptr1_reg = DMA1_PTR1,
94 .ptr2_reg = DMA1_PTR2,
95 .cnt1_reg = DMA1_CNT1,
96 .cnt2_reg = DMA1_CNT2,
97 },
98 [SRAM_CH02] = {
99 .name = "ch2",
100 .cmds_start = 0x0,
101 .ctrl_start = 0x0,
102 .cdt = 0x0,
103 .fifo_start = 0x0,
104 .fifo_size = 0x0,
105 .ptr1_reg = DMA2_PTR1,
106 .ptr2_reg = DMA2_PTR2,
107 .cnt1_reg = DMA2_CNT1,
108 .cnt2_reg = DMA2_CNT2,
109 },
110 [SRAM_CH03] = {
111 .name = "TS1 B",
112 .cmds_start = 0x100A0,
113 .ctrl_start = 0x10400,
114 .cdt = 0x10580,
115 .fifo_start = 0x5000,
116 .fifo_size = 0x1000,
117 .ptr1_reg = DMA3_PTR1,
118 .ptr2_reg = DMA3_PTR2,
119 .cnt1_reg = DMA3_CNT1,
120 .cnt2_reg = DMA3_CNT2,
121 },
122 [SRAM_CH04] = {
123 .name = "ch4",
124 .cmds_start = 0x0,
125 .ctrl_start = 0x0,
126 .cdt = 0x0,
127 .fifo_start = 0x0,
128 .fifo_size = 0x0,
129 .ptr1_reg = DMA4_PTR1,
130 .ptr2_reg = DMA4_PTR2,
131 .cnt1_reg = DMA4_CNT1,
132 .cnt2_reg = DMA4_CNT2,
133 },
134 [SRAM_CH05] = {
135 .name = "ch5",
136 .cmds_start = 0x0,
137 .ctrl_start = 0x0,
138 .cdt = 0x0,
139 .fifo_start = 0x0,
140 .fifo_size = 0x0,
141 .ptr1_reg = DMA5_PTR1,
142 .ptr2_reg = DMA5_PTR2,
143 .cnt1_reg = DMA5_CNT1,
144 .cnt2_reg = DMA5_CNT2,
145 },
146 [SRAM_CH06] = {
147 .name = "TS2 C",
148 .cmds_start = 0x10140,
149 .ctrl_start = 0x10440,
150 .cdt = 0x105e0,
151 .fifo_start = 0x6000,
152 .fifo_size = 0x1000,
153 .ptr1_reg = DMA5_PTR1,
154 .ptr2_reg = DMA5_PTR2,
155 .cnt1_reg = DMA5_CNT1,
156 .cnt2_reg = DMA5_CNT2,
157 },
158 [SRAM_CH07] = {
159 .name = "TV Audio",
160 .cmds_start = 0x10190,
161 .ctrl_start = 0x10480,
162 .cdt = 0x10a00,
163 .fifo_start = 0x7000,
164 .fifo_size = 0x1000,
165 .ptr1_reg = DMA6_PTR1,
166 .ptr2_reg = DMA6_PTR2,
167 .cnt1_reg = DMA6_CNT1,
168 .cnt2_reg = DMA6_CNT2,
169 },
170 [SRAM_CH08] = {
171 .name = "ch8",
172 .cmds_start = 0x0,
173 .ctrl_start = 0x0,
174 .cdt = 0x0,
175 .fifo_start = 0x0,
176 .fifo_size = 0x0,
177 .ptr1_reg = DMA7_PTR1,
178 .ptr2_reg = DMA7_PTR2,
179 .cnt1_reg = DMA7_CNT1,
180 .cnt2_reg = DMA7_CNT2,
181 },
182 [SRAM_CH09] = {
183 .name = "ch9",
184 .cmds_start = 0x0,
185 .ctrl_start = 0x0,
186 .cdt = 0x0,
187 .fifo_start = 0x0,
188 .fifo_size = 0x0,
189 .ptr1_reg = DMA8_PTR1,
190 .ptr2_reg = DMA8_PTR2,
191 .cnt1_reg = DMA8_CNT1,
192 .cnt2_reg = DMA8_CNT2,
193 },
194};
195
196static struct sram_channel cx23887_sram_channels[] = {
197 [SRAM_CH01] = {
198 .name = "VID A",
199 .cmds_start = 0x10000,
200 .ctrl_start = 0x105b0,
201 .cdt = 0x107b0,
202 .fifo_start = 0x40,
203 .fifo_size = 0x2800,
204 .ptr1_reg = DMA1_PTR1,
205 .ptr2_reg = DMA1_PTR2,
206 .cnt1_reg = DMA1_CNT1,
207 .cnt2_reg = DMA1_CNT2,
208 },
209 [SRAM_CH02] = {
210 .name = "VID A (VBI)",
211 .cmds_start = 0x10050,
212 .ctrl_start = 0x105F0,
213 .cdt = 0x10810,
214 .fifo_start = 0x3000,
215 .fifo_size = 0x1000,
216 .ptr1_reg = DMA2_PTR1,
217 .ptr2_reg = DMA2_PTR2,
218 .cnt1_reg = DMA2_CNT1,
219 .cnt2_reg = DMA2_CNT2,
220 },
221 [SRAM_CH03] = {
222 .name = "TS1 B",
223 .cmds_start = 0x100A0,
224 .ctrl_start = 0x10630,
225 .cdt = 0x10870,
226 .fifo_start = 0x5000,
227 .fifo_size = 0x1000,
228 .ptr1_reg = DMA3_PTR1,
229 .ptr2_reg = DMA3_PTR2,
230 .cnt1_reg = DMA3_CNT1,
231 .cnt2_reg = DMA3_CNT2,
232 },
233 [SRAM_CH04] = {
234 .name = "ch4",
235 .cmds_start = 0x0,
236 .ctrl_start = 0x0,
237 .cdt = 0x0,
238 .fifo_start = 0x0,
239 .fifo_size = 0x0,
240 .ptr1_reg = DMA4_PTR1,
241 .ptr2_reg = DMA4_PTR2,
242 .cnt1_reg = DMA4_CNT1,
243 .cnt2_reg = DMA4_CNT2,
244 },
245 [SRAM_CH05] = {
246 .name = "ch5",
247 .cmds_start = 0x0,
248 .ctrl_start = 0x0,
249 .cdt = 0x0,
250 .fifo_start = 0x0,
251 .fifo_size = 0x0,
252 .ptr1_reg = DMA5_PTR1,
253 .ptr2_reg = DMA5_PTR2,
254 .cnt1_reg = DMA5_CNT1,
255 .cnt2_reg = DMA5_CNT2,
256 },
257 [SRAM_CH06] = {
258 .name = "TS2 C",
259 .cmds_start = 0x10140,
260 .ctrl_start = 0x10670,
261 .cdt = 0x108d0,
262 .fifo_start = 0x6000,
263 .fifo_size = 0x1000,
264 .ptr1_reg = DMA5_PTR1,
265 .ptr2_reg = DMA5_PTR2,
266 .cnt1_reg = DMA5_CNT1,
267 .cnt2_reg = DMA5_CNT2,
268 },
269 [SRAM_CH07] = {
270 .name = "TV Audio",
271 .cmds_start = 0x10190,
272 .ctrl_start = 0x106B0,
273 .cdt = 0x10930,
274 .fifo_start = 0x7000,
275 .fifo_size = 0x1000,
276 .ptr1_reg = DMA6_PTR1,
277 .ptr2_reg = DMA6_PTR2,
278 .cnt1_reg = DMA6_CNT1,
279 .cnt2_reg = DMA6_CNT2,
280 },
281 [SRAM_CH08] = {
282 .name = "ch8",
283 .cmds_start = 0x0,
284 .ctrl_start = 0x0,
285 .cdt = 0x0,
286 .fifo_start = 0x0,
287 .fifo_size = 0x0,
288 .ptr1_reg = DMA7_PTR1,
289 .ptr2_reg = DMA7_PTR2,
290 .cnt1_reg = DMA7_CNT1,
291 .cnt2_reg = DMA7_CNT2,
292 },
293 [SRAM_CH09] = {
294 .name = "ch9",
295 .cmds_start = 0x0,
296 .ctrl_start = 0x0,
297 .cdt = 0x0,
298 .fifo_start = 0x0,
299 .fifo_size = 0x0,
300 .ptr1_reg = DMA8_PTR1,
301 .ptr2_reg = DMA8_PTR2,
302 .cnt1_reg = DMA8_CNT1,
303 .cnt2_reg = DMA8_CNT2,
304 },
305};
306
307static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
308{
309 unsigned long flags;
310 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
311
312 dev->pci_irqmask |= mask;
313
314 spin_unlock_irqrestore(lock: &dev->pci_irqmask_lock, flags);
315}
316
317void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
318{
319 unsigned long flags;
320 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
321
322 dev->pci_irqmask |= mask;
323 cx_set(PCI_INT_MSK, mask);
324
325 spin_unlock_irqrestore(lock: &dev->pci_irqmask_lock, flags);
326}
327
328void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
329{
330 u32 v;
331 unsigned long flags;
332 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
333
334 v = mask & dev->pci_irqmask;
335 if (v)
336 cx_set(PCI_INT_MSK, v);
337
338 spin_unlock_irqrestore(lock: &dev->pci_irqmask_lock, flags);
339}
340
341static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
342{
343 cx23885_irq_enable(dev, mask: 0xffffffff);
344}
345
346void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
347{
348 unsigned long flags;
349 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
350
351 cx_clear(PCI_INT_MSK, mask);
352
353 spin_unlock_irqrestore(lock: &dev->pci_irqmask_lock, flags);
354}
355
356static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
357{
358 cx23885_irq_disable(dev, mask: 0xffffffff);
359}
360
361void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
362{
363 unsigned long flags;
364 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
365
366 dev->pci_irqmask &= ~mask;
367 cx_clear(PCI_INT_MSK, mask);
368
369 spin_unlock_irqrestore(lock: &dev->pci_irqmask_lock, flags);
370}
371
372static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
373{
374 u32 v;
375 unsigned long flags;
376 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
377
378 v = cx_read(PCI_INT_MSK);
379
380 spin_unlock_irqrestore(lock: &dev->pci_irqmask_lock, flags);
381 return v;
382}
383
384static int cx23885_risc_decode(u32 risc)
385{
386 static char *instr[16] = {
387 [RISC_SYNC >> 28] = "sync",
388 [RISC_WRITE >> 28] = "write",
389 [RISC_WRITEC >> 28] = "writec",
390 [RISC_READ >> 28] = "read",
391 [RISC_READC >> 28] = "readc",
392 [RISC_JUMP >> 28] = "jump",
393 [RISC_SKIP >> 28] = "skip",
394 [RISC_WRITERM >> 28] = "writerm",
395 [RISC_WRITECM >> 28] = "writecm",
396 [RISC_WRITECR >> 28] = "writecr",
397 };
398 static int incr[16] = {
399 [RISC_WRITE >> 28] = 3,
400 [RISC_JUMP >> 28] = 3,
401 [RISC_SKIP >> 28] = 1,
402 [RISC_SYNC >> 28] = 1,
403 [RISC_WRITERM >> 28] = 3,
404 [RISC_WRITECM >> 28] = 3,
405 [RISC_WRITECR >> 28] = 4,
406 };
407 static char *bits[] = {
408 "12", "13", "14", "resync",
409 "cnt0", "cnt1", "18", "19",
410 "20", "21", "22", "23",
411 "irq1", "irq2", "eol", "sol",
412 };
413 int i;
414
415 printk(KERN_DEBUG "0x%08x [ %s", risc,
416 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
417 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
418 if (risc & (1 << (i + 12)))
419 pr_cont(" %s", bits[i]);
420 pr_cont(" count=%d ]\n", risc & 0xfff);
421 return incr[risc >> 28] ? incr[risc >> 28] : 1;
422}
423
424static void cx23885_wakeup(struct cx23885_tsport *port,
425 struct cx23885_dmaqueue *q, u32 count)
426{
427 struct cx23885_buffer *buf;
428 int count_delta;
429 int max_buf_done = 5; /* service maximum five buffers */
430
431 do {
432 if (list_empty(head: &q->active))
433 return;
434 buf = list_entry(q->active.next,
435 struct cx23885_buffer, queue);
436
437 buf->vb.vb2_buf.timestamp = ktime_get_ns();
438 buf->vb.sequence = q->count++;
439 if (count != (q->count % 65536)) {
440 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
441 buf->vb.vb2_buf.index, count, q->count);
442 } else {
443 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
444 buf->vb.vb2_buf.index, count, q->count);
445 }
446 list_del(entry: &buf->queue);
447 vb2_buffer_done(vb: &buf->vb.vb2_buf, state: VB2_BUF_STATE_DONE);
448 max_buf_done--;
449 /* count register is 16 bits so apply modulo appropriately */
450 count_delta = ((int)count - (int)(q->count % 65536));
451 } while ((count_delta > 0) && (max_buf_done > 0));
452}
453
454int cx23885_sram_channel_setup(struct cx23885_dev *dev,
455 struct sram_channel *ch,
456 unsigned int bpl, u32 risc)
457{
458 unsigned int i, lines;
459 u32 cdt;
460
461 if (ch->cmds_start == 0) {
462 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
463 ch->name);
464 cx_write(ch->ptr1_reg, 0);
465 cx_write(ch->ptr2_reg, 0);
466 cx_write(ch->cnt2_reg, 0);
467 cx_write(ch->cnt1_reg, 0);
468 return 0;
469 } else {
470 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
471 ch->name);
472 }
473
474 bpl = (bpl + 7) & ~7; /* alignment */
475 cdt = ch->cdt;
476 lines = ch->fifo_size / bpl;
477 if (lines > 6)
478 lines = 6;
479 BUG_ON(lines < 2);
480
481 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
482 cx_write(8 + 4, 12);
483 cx_write(8 + 8, 0);
484
485 /* write CDT */
486 for (i = 0; i < lines; i++) {
487 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
488 ch->fifo_start + bpl*i);
489 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i + 4, 0);
491 cx_write(cdt + 16*i + 8, 0);
492 cx_write(cdt + 16*i + 12, 0);
493 }
494
495 /* write CMDS */
496 if (ch->jumponly)
497 cx_write(ch->cmds_start + 0, 8);
498 else
499 cx_write(ch->cmds_start + 0, risc);
500 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
501 cx_write(ch->cmds_start + 8, cdt);
502 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
503 cx_write(ch->cmds_start + 16, ch->ctrl_start);
504 if (ch->jumponly)
505 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
506 else
507 cx_write(ch->cmds_start + 20, 64 >> 2);
508 for (i = 24; i < 80; i += 4)
509 cx_write(ch->cmds_start + i, 0);
510
511 /* fill registers */
512 cx_write(ch->ptr1_reg, ch->fifo_start);
513 cx_write(ch->ptr2_reg, cdt);
514 cx_write(ch->cnt2_reg, (lines*16) >> 3);
515 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
516
517 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
518 dev->bridge,
519 ch->name,
520 bpl,
521 lines);
522
523 return 0;
524}
525
526void cx23885_sram_channel_dump(struct cx23885_dev *dev,
527 struct sram_channel *ch)
528{
529 static char *name[] = {
530 "init risc lo",
531 "init risc hi",
532 "cdt base",
533 "cdt size",
534 "iq base",
535 "iq size",
536 "risc pc lo",
537 "risc pc hi",
538 "iq wr ptr",
539 "iq rd ptr",
540 "cdt current",
541 "pci target lo",
542 "pci target hi",
543 "line / byte",
544 };
545 u32 risc;
546 unsigned int i, j, n;
547
548 pr_warn("%s: %s - dma channel status dump\n",
549 dev->name, ch->name);
550 for (i = 0; i < ARRAY_SIZE(name); i++)
551 pr_warn("%s: cmds: %-15s: 0x%08x\n",
552 dev->name, name[i],
553 cx_read(ch->cmds_start + 4*i));
554
555 for (i = 0; i < 4; i++) {
556 risc = cx_read(ch->cmds_start + 4 * (i + 14));
557 pr_warn("%s: risc%d:", dev->name, i);
558 cx23885_risc_decode(risc);
559 }
560 for (i = 0; i < (64 >> 2); i += n) {
561 risc = cx_read(ch->ctrl_start + 4 * i);
562 /* No consideration for bits 63-32 */
563
564 pr_warn("%s: (0x%08x) iq %x:", dev->name,
565 ch->ctrl_start + 4 * i, i);
566 n = cx23885_risc_decode(risc);
567 for (j = 1; j < n; j++) {
568 risc = cx_read(ch->ctrl_start + 4 * (i + j));
569 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
570 dev->name, i+j, risc, j);
571 }
572 }
573
574 pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
575 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
576 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
577 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
578 pr_warn("%s: ptr1_reg: 0x%08x\n",
579 dev->name, cx_read(ch->ptr1_reg));
580 pr_warn("%s: ptr2_reg: 0x%08x\n",
581 dev->name, cx_read(ch->ptr2_reg));
582 pr_warn("%s: cnt1_reg: 0x%08x\n",
583 dev->name, cx_read(ch->cnt1_reg));
584 pr_warn("%s: cnt2_reg: 0x%08x\n",
585 dev->name, cx_read(ch->cnt2_reg));
586}
587
588static void cx23885_risc_disasm(struct cx23885_tsport *port,
589 struct cx23885_riscmem *risc)
590{
591 struct cx23885_dev *dev = port->dev;
592 unsigned int i, j, n;
593
594 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
595 dev->name, risc->cpu, (unsigned long)risc->dma);
596 for (i = 0; i < (risc->size >> 2); i += n) {
597 pr_info("%s: %04d:", dev->name, i);
598 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
599 for (j = 1; j < n; j++)
600 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
601 dev->name, i + j, risc->cpu[i + j], j);
602 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
603 break;
604 }
605}
606
607static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
608{
609 uint32_t reg1_val, reg2_val;
610
611 if (!dev->need_dma_reset)
612 return;
613
614 reg1_val = cx_read(TC_REQ); /* read-only */
615 reg2_val = cx_read(TC_REQ_SET);
616
617 if (reg1_val && reg2_val) {
618 cx_write(TC_REQ, reg1_val);
619 cx_write(TC_REQ_SET, reg2_val);
620 cx_read(VID_B_DMA);
621 cx_read(VBI_B_DMA);
622 cx_read(VID_C_DMA);
623 cx_read(VBI_C_DMA);
624
625 dev_info(&dev->pci->dev,
626 "dma in progress detected 0x%08x 0x%08x, clearing\n",
627 reg1_val, reg2_val);
628 }
629}
630
631static void cx23885_shutdown(struct cx23885_dev *dev)
632{
633 /* disable RISC controller */
634 cx_write(DEV_CNTRL2, 0);
635
636 /* Disable all IR activity */
637 cx_write(IR_CNTRL_REG, 0);
638
639 /* Disable Video A/B activity */
640 cx_write(VID_A_DMA_CTL, 0);
641 cx_write(VID_B_DMA_CTL, 0);
642 cx_write(VID_C_DMA_CTL, 0);
643
644 /* Disable Audio activity */
645 cx_write(AUD_INT_DMA_CTL, 0);
646 cx_write(AUD_EXT_DMA_CTL, 0);
647
648 /* Disable Serial port */
649 cx_write(UART_CTL, 0);
650
651 /* Disable Interrupts */
652 cx23885_irq_disable_all(dev);
653 cx_write(VID_A_INT_MSK, 0);
654 cx_write(VID_B_INT_MSK, 0);
655 cx_write(VID_C_INT_MSK, 0);
656 cx_write(AUDIO_INT_INT_MSK, 0);
657 cx_write(AUDIO_EXT_INT_MSK, 0);
658
659}
660
661static void cx23885_reset(struct cx23885_dev *dev)
662{
663 dprintk(1, "%s()\n", __func__);
664
665 cx23885_shutdown(dev);
666
667 cx_write(PCI_INT_STAT, 0xffffffff);
668 cx_write(VID_A_INT_STAT, 0xffffffff);
669 cx_write(VID_B_INT_STAT, 0xffffffff);
670 cx_write(VID_C_INT_STAT, 0xffffffff);
671 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
672 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
673 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
674 cx_write(PAD_CTRL, 0x00500300);
675
676 /* clear dma in progress */
677 cx23885_clear_bridge_error(dev);
678 msleep(msecs: 100);
679
680 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH01],
681 bpl: 720*4, risc: 0);
682 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH02], bpl: 128, risc: 0);
683 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH03],
684 bpl: 188*4, risc: 0);
685 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH04], bpl: 128, risc: 0);
686 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH05], bpl: 128, risc: 0);
687 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH06],
688 bpl: 188*4, risc: 0);
689 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH07], bpl: 128, risc: 0);
690 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH08], bpl: 128, risc: 0);
691 cx23885_sram_channel_setup(dev, ch: &dev->sram_channels[SRAM_CH09], bpl: 128, risc: 0);
692
693 cx23885_gpio_setup(dev);
694
695 cx23885_irq_get_mask(dev);
696
697 /* clear dma in progress */
698 cx23885_clear_bridge_error(dev);
699}
700
701
702static int cx23885_pci_quirks(struct cx23885_dev *dev)
703{
704 dprintk(1, "%s()\n", __func__);
705
706 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
707 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
708 * occur on the cx23887 bridge.
709 */
710 if (dev->bridge == CX23885_BRIDGE_885)
711 cx_clear(RDR_TLCTL0, 1 << 4);
712
713 /* clear dma in progress */
714 cx23885_clear_bridge_error(dev);
715 return 0;
716}
717
718static int get_resources(struct cx23885_dev *dev)
719{
720 if (request_mem_region(pci_resource_start(dev->pci, 0),
721 pci_resource_len(dev->pci, 0),
722 dev->name))
723 return 0;
724
725 pr_err("%s: can't get MMIO memory @ 0x%llx\n",
726 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
727
728 return -EBUSY;
729}
730
731static int cx23885_init_tsport(struct cx23885_dev *dev,
732 struct cx23885_tsport *port, int portno)
733{
734 dprintk(1, "%s(portno=%d)\n", __func__, portno);
735
736 /* Transport bus init dma queue - Common settings */
737 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
738 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
739 port->vld_misc_val = 0x0;
740 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
741
742 spin_lock_init(&port->slock);
743 port->dev = dev;
744 port->nr = portno;
745
746 INIT_LIST_HEAD(list: &port->mpegq.active);
747 mutex_init(&port->frontends.lock);
748 INIT_LIST_HEAD(list: &port->frontends.felist);
749 port->frontends.active_fe_id = 0;
750
751 /* This should be hardcoded allow a single frontend
752 * attachment to this tsport, keeping the -dvb.c
753 * code clean and safe.
754 */
755 if (!port->num_frontends)
756 port->num_frontends = 1;
757
758 switch (portno) {
759 case 1:
760 port->reg_gpcnt = VID_B_GPCNT;
761 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
762 port->reg_dma_ctl = VID_B_DMA_CTL;
763 port->reg_lngth = VID_B_LNGTH;
764 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
765 port->reg_gen_ctrl = VID_B_GEN_CTL;
766 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
767 port->reg_sop_status = VID_B_SOP_STATUS;
768 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
769 port->reg_vld_misc = VID_B_VLD_MISC;
770 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
771 port->reg_src_sel = VID_B_SRC_SEL;
772 port->reg_ts_int_msk = VID_B_INT_MSK;
773 port->reg_ts_int_stat = VID_B_INT_STAT;
774 port->sram_chno = SRAM_CH03; /* VID_B */
775 port->pci_irqmask = 0x02; /* VID_B bit1 */
776 break;
777 case 2:
778 port->reg_gpcnt = VID_C_GPCNT;
779 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
780 port->reg_dma_ctl = VID_C_DMA_CTL;
781 port->reg_lngth = VID_C_LNGTH;
782 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
783 port->reg_gen_ctrl = VID_C_GEN_CTL;
784 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
785 port->reg_sop_status = VID_C_SOP_STATUS;
786 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
787 port->reg_vld_misc = VID_C_VLD_MISC;
788 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
789 port->reg_src_sel = 0;
790 port->reg_ts_int_msk = VID_C_INT_MSK;
791 port->reg_ts_int_stat = VID_C_INT_STAT;
792 port->sram_chno = SRAM_CH06; /* VID_C */
793 port->pci_irqmask = 0x04; /* VID_C bit2 */
794 break;
795 default:
796 BUG();
797 }
798
799 return 0;
800}
801
802static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
803{
804 switch (cx_read(RDR_CFG2) & 0xff) {
805 case 0x00:
806 /* cx23885 */
807 dev->hwrevision = 0xa0;
808 break;
809 case 0x01:
810 /* CX23885-12Z */
811 dev->hwrevision = 0xa1;
812 break;
813 case 0x02:
814 /* CX23885-13Z/14Z */
815 dev->hwrevision = 0xb0;
816 break;
817 case 0x03:
818 if (dev->pci->device == 0x8880) {
819 /* CX23888-21Z/22Z */
820 dev->hwrevision = 0xc0;
821 } else {
822 /* CX23885-14Z */
823 dev->hwrevision = 0xa4;
824 }
825 break;
826 case 0x04:
827 if (dev->pci->device == 0x8880) {
828 /* CX23888-31Z */
829 dev->hwrevision = 0xd0;
830 } else {
831 /* CX23885-15Z, CX23888-31Z */
832 dev->hwrevision = 0xa5;
833 }
834 break;
835 case 0x0e:
836 /* CX23887-15Z */
837 dev->hwrevision = 0xc0;
838 break;
839 case 0x0f:
840 /* CX23887-14Z */
841 dev->hwrevision = 0xb1;
842 break;
843 default:
844 pr_err("%s() New hardware revision found 0x%x\n",
845 __func__, dev->hwrevision);
846 }
847 if (dev->hwrevision)
848 pr_info("%s() Hardware revision = 0x%02x\n",
849 __func__, dev->hwrevision);
850 else
851 pr_err("%s() Hardware revision unknown 0x%x\n",
852 __func__, dev->hwrevision);
853}
854
855/* Find the first v4l2_subdev member of the group id in hw */
856struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
857{
858 struct v4l2_subdev *result = NULL;
859 struct v4l2_subdev *sd;
860
861 spin_lock(lock: &dev->v4l2_dev.lock);
862 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
863 if (sd->grp_id == hw) {
864 result = sd;
865 break;
866 }
867 }
868 spin_unlock(lock: &dev->v4l2_dev.lock);
869 return result;
870}
871
872static int cx23885_dev_setup(struct cx23885_dev *dev)
873{
874 int i;
875
876 spin_lock_init(&dev->pci_irqmask_lock);
877 spin_lock_init(&dev->slock);
878
879 mutex_init(&dev->lock);
880 mutex_init(&dev->gpio_lock);
881
882 atomic_inc(v: &dev->refcount);
883
884 dev->nr = cx23885_devcount++;
885 sprintf(buf: dev->name, fmt: "cx23885[%d]", dev->nr);
886
887 /* Configure the internal memory */
888 if (dev->pci->device == 0x8880) {
889 /* Could be 887 or 888, assume an 888 default */
890 dev->bridge = CX23885_BRIDGE_888;
891 /* Apply a sensible clock frequency for the PCIe bridge */
892 dev->clk_freq = 50000000;
893 dev->sram_channels = cx23887_sram_channels;
894 } else
895 if (dev->pci->device == 0x8852) {
896 dev->bridge = CX23885_BRIDGE_885;
897 /* Apply a sensible clock frequency for the PCIe bridge */
898 dev->clk_freq = 28000000;
899 dev->sram_channels = cx23885_sram_channels;
900 } else
901 BUG();
902
903 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
904 __func__, dev->bridge);
905
906 /* board config */
907 dev->board = UNSET;
908 if (card[dev->nr] < cx23885_bcount)
909 dev->board = card[dev->nr];
910 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
911 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
912 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
913 dev->board = cx23885_subids[i].card;
914 if (UNSET == dev->board) {
915 dev->board = CX23885_BOARD_UNKNOWN;
916 cx23885_card_list(dev);
917 }
918
919 if (dev->pci->device == 0x8852) {
920 /* no DIF on cx23885, so no analog tuner support possible */
921 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
922 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
923 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
924 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
925 }
926
927 /* If the user specific a clk freq override, apply it */
928 if (cx23885_boards[dev->board].clk_freq > 0)
929 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
930
931 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
932 dev->pci->subsystem_device == 0x7137) {
933 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
934 * with an 888, and a 25Mhz crystal, instead of the
935 * usual third overtone 50Mhz. The default clock rate must
936 * be overridden so the cx25840 is properly configured
937 */
938 dev->clk_freq = 25000000;
939 }
940
941 dev->pci_bus = dev->pci->bus->number;
942 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
943 cx23885_irq_add(dev, mask: 0x001f00);
944
945 /* External Master 1 Bus */
946 dev->i2c_bus[0].nr = 0;
947 dev->i2c_bus[0].dev = dev;
948 dev->i2c_bus[0].reg_stat = I2C1_STAT;
949 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
950 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
951 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
952 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
953 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
954
955 /* External Master 2 Bus */
956 dev->i2c_bus[1].nr = 1;
957 dev->i2c_bus[1].dev = dev;
958 dev->i2c_bus[1].reg_stat = I2C2_STAT;
959 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
960 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
961 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
962 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
963 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
964
965 /* Internal Master 3 Bus */
966 dev->i2c_bus[2].nr = 2;
967 dev->i2c_bus[2].dev = dev;
968 dev->i2c_bus[2].reg_stat = I2C3_STAT;
969 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
970 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
971 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
972 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
973 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
974
975 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
976 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
977 cx23885_init_tsport(dev, port: &dev->ts1, portno: 1);
978
979 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
980 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
981 cx23885_init_tsport(dev, port: &dev->ts2, portno: 2);
982
983 if (get_resources(dev) < 0) {
984 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
985 dev->name, dev->pci->subsystem_vendor,
986 dev->pci->subsystem_device);
987
988 cx23885_devcount--;
989 return -ENODEV;
990 }
991
992 /* PCIe stuff */
993 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
994 pci_resource_len(dev->pci, 0));
995
996 dev->bmmio = (u8 __iomem *)dev->lmmio;
997
998 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
999 dev->name, dev->pci->subsystem_vendor,
1000 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
1001 dev->board, card[dev->nr] == dev->board ?
1002 "insmod option" : "autodetected");
1003
1004 cx23885_pci_quirks(dev);
1005
1006 /* Assume some sensible defaults */
1007 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
1008 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
1009 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
1010 dev->radio_type = cx23885_boards[dev->board].radio_type;
1011 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
1012
1013 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1014 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
1015 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
1016 __func__, dev->radio_type, dev->radio_addr);
1017
1018 /* The cx23417 encoder has GPIO's that need to be initialised
1019 * before DVB, so that demodulators and tuners are out of
1020 * reset before DVB uses them.
1021 */
1022 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
1023 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
1024 cx23885_mc417_init(dev);
1025
1026 /* init hardware */
1027 cx23885_reset(dev);
1028
1029 cx23885_i2c_register(bus: &dev->i2c_bus[0]);
1030 cx23885_i2c_register(bus: &dev->i2c_bus[1]);
1031 cx23885_i2c_register(bus: &dev->i2c_bus[2]);
1032 cx23885_card_setup(dev);
1033 call_all(dev, tuner, standby);
1034 cx23885_ir_init(dev);
1035
1036 if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
1037 /*
1038 * GPIOs 9/8 are input detection bits for the breakout video
1039 * (gpio 8) and audio (gpio 9) cables. When they're attached,
1040 * this gpios are pulled high. Make sure these GPIOs are marked
1041 * as inputs.
1042 */
1043 cx23885_gpio_enable(dev, mask: 0x300, asoutput: 0);
1044 }
1045
1046 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1047 if (cx23885_video_register(dev) < 0) {
1048 pr_err("%s() Failed to register analog video adapters on VID_A\n",
1049 __func__);
1050 }
1051 }
1052
1053 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1054 if (cx23885_boards[dev->board].num_fds_portb)
1055 dev->ts1.num_frontends =
1056 cx23885_boards[dev->board].num_fds_portb;
1057 if (cx23885_dvb_register(port: &dev->ts1) < 0) {
1058 pr_err("%s() Failed to register dvb adapters on VID_B\n",
1059 __func__);
1060 }
1061 } else
1062 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1063 if (cx23885_417_register(dev) < 0) {
1064 pr_err("%s() Failed to register 417 on VID_B\n",
1065 __func__);
1066 }
1067 }
1068
1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1070 if (cx23885_boards[dev->board].num_fds_portc)
1071 dev->ts2.num_frontends =
1072 cx23885_boards[dev->board].num_fds_portc;
1073 if (cx23885_dvb_register(port: &dev->ts2) < 0) {
1074 pr_err("%s() Failed to register dvb on VID_C\n",
1075 __func__);
1076 }
1077 } else
1078 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1079 if (cx23885_417_register(dev) < 0) {
1080 pr_err("%s() Failed to register 417 on VID_C\n",
1081 __func__);
1082 }
1083 }
1084
1085 cx23885_dev_checkrevision(dev);
1086
1087 /* disable MSI for NetUP cards, otherwise CI is not working */
1088 if (cx23885_boards[dev->board].ci_type > 0)
1089 cx_clear(RDR_RDRCTL1, 1 << 8);
1090
1091 switch (dev->board) {
1092 case CX23885_BOARD_TEVII_S470:
1093 case CX23885_BOARD_TEVII_S471:
1094 cx_clear(RDR_RDRCTL1, 1 << 8);
1095 break;
1096 }
1097
1098 return 0;
1099}
1100
1101static void cx23885_dev_unregister(struct cx23885_dev *dev)
1102{
1103 release_mem_region(pci_resource_start(dev->pci, 0),
1104 pci_resource_len(dev->pci, 0));
1105
1106 if (!atomic_dec_and_test(v: &dev->refcount))
1107 return;
1108
1109 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1110 cx23885_video_unregister(dev);
1111
1112 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1113 cx23885_dvb_unregister(port: &dev->ts1);
1114
1115 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1116 cx23885_417_unregister(dev);
1117
1118 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1119 cx23885_dvb_unregister(port: &dev->ts2);
1120
1121 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1122 cx23885_417_unregister(dev);
1123
1124 cx23885_i2c_unregister(bus: &dev->i2c_bus[2]);
1125 cx23885_i2c_unregister(bus: &dev->i2c_bus[1]);
1126 cx23885_i2c_unregister(bus: &dev->i2c_bus[0]);
1127
1128 iounmap(addr: dev->lmmio);
1129}
1130
1131static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1132 unsigned int offset, u32 sync_line,
1133 unsigned int bpl, unsigned int padding,
1134 unsigned int lines, unsigned int lpi, bool jump)
1135{
1136 struct scatterlist *sg;
1137 unsigned int line, todo, sol;
1138
1139
1140 if (jump) {
1141 *(rp++) = cpu_to_le32(RISC_JUMP);
1142 *(rp++) = cpu_to_le32(0);
1143 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1144 }
1145
1146 /* sync instruction */
1147 if (sync_line != NO_SYNC_LINE)
1148 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1149
1150 /* scan lines */
1151 sg = sglist;
1152 for (line = 0; line < lines; line++) {
1153 while (offset && offset >= sg_dma_len(sg)) {
1154 offset -= sg_dma_len(sg);
1155 sg = sg_next(sg);
1156 }
1157
1158 if (lpi && line > 0 && !(line % lpi))
1159 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1160 else
1161 sol = RISC_SOL;
1162
1163 if (bpl <= sg_dma_len(sg)-offset) {
1164 /* fits into current chunk */
1165 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1167 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1168 offset += bpl;
1169 } else {
1170 /* scanline needs to be split */
1171 todo = bpl;
1172 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1173 (sg_dma_len(sg)-offset));
1174 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1175 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1176 todo -= (sg_dma_len(sg)-offset);
1177 offset = 0;
1178 sg = sg_next(sg);
1179 while (todo > sg_dma_len(sg)) {
1180 *(rp++) = cpu_to_le32(RISC_WRITE|
1181 sg_dma_len(sg));
1182 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1183 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1184 todo -= sg_dma_len(sg);
1185 sg = sg_next(sg);
1186 }
1187 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1188 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1189 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1190 offset += todo;
1191 }
1192 offset += padding;
1193 }
1194
1195 return rp;
1196}
1197
1198int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1199 struct scatterlist *sglist, unsigned int top_offset,
1200 unsigned int bottom_offset, unsigned int bpl,
1201 unsigned int padding, unsigned int lines)
1202{
1203 u32 instructions, fields;
1204 __le32 *rp;
1205
1206 fields = 0;
1207 if (UNSET != top_offset)
1208 fields++;
1209 if (UNSET != bottom_offset)
1210 fields++;
1211
1212 /* estimate risc mem: worst case is one write per page border +
1213 one write per scan line + syncs + jump (all 2 dwords). Padding
1214 can cause next bpl to start close to a page border. First DMA
1215 region may be smaller than PAGE_SIZE */
1216 /* write and jump need and extra dword */
1217 instructions = fields * (1 + ((bpl + padding) * lines)
1218 / PAGE_SIZE + lines);
1219 instructions += 5;
1220 risc->size = instructions * 12;
1221 risc->cpu = dma_alloc_coherent(dev: &pci->dev, size: risc->size, dma_handle: &risc->dma,
1222 GFP_KERNEL);
1223 if (risc->cpu == NULL)
1224 return -ENOMEM;
1225
1226 /* write risc instructions */
1227 rp = risc->cpu;
1228 if (UNSET != top_offset)
1229 rp = cx23885_risc_field(rp, sglist, offset: top_offset, sync_line: 0,
1230 bpl, padding, lines, lpi: 0, jump: true);
1231 if (UNSET != bottom_offset)
1232 rp = cx23885_risc_field(rp, sglist, offset: bottom_offset, sync_line: 0x200,
1233 bpl, padding, lines, lpi: 0, UNSET == top_offset);
1234
1235 /* save pointer to jmp instruction address */
1236 risc->jmp = rp;
1237 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1238 return 0;
1239}
1240
1241int cx23885_risc_databuffer(struct pci_dev *pci,
1242 struct cx23885_riscmem *risc,
1243 struct scatterlist *sglist,
1244 unsigned int bpl,
1245 unsigned int lines, unsigned int lpi)
1246{
1247 u32 instructions;
1248 __le32 *rp;
1249
1250 /* estimate risc mem: worst case is one write per page border +
1251 one write per scan line + syncs + jump (all 2 dwords). Here
1252 there is no padding and no sync. First DMA region may be smaller
1253 than PAGE_SIZE */
1254 /* Jump and write need an extra dword */
1255 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1256 instructions += 4;
1257
1258 risc->size = instructions * 12;
1259 risc->cpu = dma_alloc_coherent(dev: &pci->dev, size: risc->size, dma_handle: &risc->dma,
1260 GFP_KERNEL);
1261 if (risc->cpu == NULL)
1262 return -ENOMEM;
1263
1264 /* write risc instructions */
1265 rp = risc->cpu;
1266 rp = cx23885_risc_field(rp, sglist, offset: 0, NO_SYNC_LINE,
1267 bpl, padding: 0, lines, lpi, jump: lpi == 0);
1268
1269 /* save pointer to jmp instruction address */
1270 risc->jmp = rp;
1271 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1272 return 0;
1273}
1274
1275int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1276 struct scatterlist *sglist, unsigned int top_offset,
1277 unsigned int bottom_offset, unsigned int bpl,
1278 unsigned int padding, unsigned int lines)
1279{
1280 u32 instructions, fields;
1281 __le32 *rp;
1282
1283 fields = 0;
1284 if (UNSET != top_offset)
1285 fields++;
1286 if (UNSET != bottom_offset)
1287 fields++;
1288
1289 /* estimate risc mem: worst case is one write per page border +
1290 one write per scan line + syncs + jump (all 2 dwords). Padding
1291 can cause next bpl to start close to a page border. First DMA
1292 region may be smaller than PAGE_SIZE */
1293 /* write and jump need and extra dword */
1294 instructions = fields * (1 + ((bpl + padding) * lines)
1295 / PAGE_SIZE + lines);
1296 instructions += 5;
1297 risc->size = instructions * 12;
1298 risc->cpu = dma_alloc_coherent(dev: &pci->dev, size: risc->size, dma_handle: &risc->dma,
1299 GFP_KERNEL);
1300 if (risc->cpu == NULL)
1301 return -ENOMEM;
1302 /* write risc instructions */
1303 rp = risc->cpu;
1304
1305 /* Sync to line 6, so US CC line 21 will appear in line '12'
1306 * in the userland vbi payload */
1307 if (UNSET != top_offset)
1308 rp = cx23885_risc_field(rp, sglist, offset: top_offset, sync_line: 0,
1309 bpl, padding, lines, lpi: 0, jump: true);
1310
1311 if (UNSET != bottom_offset)
1312 rp = cx23885_risc_field(rp, sglist, offset: bottom_offset, sync_line: 0x200,
1313 bpl, padding, lines, lpi: 0, UNSET == top_offset);
1314
1315
1316
1317 /* save pointer to jmp instruction address */
1318 risc->jmp = rp;
1319 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1320 return 0;
1321}
1322
1323
1324void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1325{
1326 struct cx23885_riscmem *risc = &buf->risc;
1327
1328 if (risc->cpu)
1329 dma_free_coherent(dev: &dev->pci->dev, size: risc->size, cpu_addr: risc->cpu, dma_handle: risc->dma);
1330 memset(risc, 0, sizeof(*risc));
1331}
1332
1333static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1334{
1335 struct cx23885_dev *dev = port->dev;
1336
1337 dprintk(1, "%s() Register Dump\n", __func__);
1338 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1339 cx_read(DEV_CNTRL2));
1340 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1341 cx23885_irq_get_mask(dev));
1342 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1343 cx_read(AUDIO_INT_INT_MSK));
1344 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1345 cx_read(AUD_INT_DMA_CTL));
1346 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1347 cx_read(AUDIO_EXT_INT_MSK));
1348 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1349 cx_read(AUD_EXT_DMA_CTL));
1350 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1351 cx_read(PAD_CTRL));
1352 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1353 cx_read(ALT_PIN_OUT_SEL));
1354 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1355 cx_read(GPIO2));
1356 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1357 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1358 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1359 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1360 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1361 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1362 if (port->reg_src_sel)
1363 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1364 port->reg_src_sel, cx_read(port->reg_src_sel));
1365 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1366 port->reg_lngth, cx_read(port->reg_lngth));
1367 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1368 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1369 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1370 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1371 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1372 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1373 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1374 port->reg_sop_status, cx_read(port->reg_sop_status));
1375 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1376 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1377 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1378 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1379 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1380 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1381 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1382 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1383 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__,
1384 port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat));
1385 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__,
1386 cx_read(PCI_INT_STAT));
1387 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__,
1388 cx_read(VID_B_INT_MSTAT));
1389 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__,
1390 cx_read(VID_B_INT_SSTAT));
1391 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__,
1392 cx_read(VID_C_INT_MSTAT));
1393 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__,
1394 cx_read(VID_C_INT_SSTAT));
1395}
1396
1397int cx23885_start_dma(struct cx23885_tsport *port,
1398 struct cx23885_dmaqueue *q,
1399 struct cx23885_buffer *buf)
1400{
1401 struct cx23885_dev *dev = port->dev;
1402 u32 reg;
1403
1404 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1405 dev->width, dev->height, dev->field);
1406
1407 /* clear dma in progress */
1408 cx23885_clear_bridge_error(dev);
1409
1410 /* Stop the fifo and risc engine for this port */
1411 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1412
1413 /* setup fifo + format */
1414 cx23885_sram_channel_setup(dev,
1415 ch: &dev->sram_channels[port->sram_chno],
1416 bpl: port->ts_packet_size, risc: buf->risc.dma);
1417 if (debug > 5) {
1418 cx23885_sram_channel_dump(dev,
1419 ch: &dev->sram_channels[port->sram_chno]);
1420 cx23885_risc_disasm(port, risc: &buf->risc);
1421 }
1422
1423 /* write TS length to chip */
1424 cx_write(port->reg_lngth, port->ts_packet_size);
1425
1426 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1427 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1428 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1429 __func__,
1430 cx23885_boards[dev->board].portb,
1431 cx23885_boards[dev->board].portc);
1432 return -EINVAL;
1433 }
1434
1435 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1436 cx23885_av_clk(dev, enable: 0);
1437
1438 udelay(100);
1439
1440 /* If the port supports SRC SELECT, configure it */
1441 if (port->reg_src_sel)
1442 cx_write(port->reg_src_sel, port->src_sel_val);
1443
1444 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1445 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1446 cx_write(port->reg_vld_misc, port->vld_misc_val);
1447 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1448 udelay(100);
1449
1450 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1451 /* reset counter to zero */
1452 cx_write(port->reg_gpcnt_ctl, 3);
1453 q->count = 0;
1454
1455 /* Set VIDB pins to input */
1456 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1457 reg = cx_read(PAD_CTRL);
1458 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1459 cx_write(PAD_CTRL, reg);
1460 }
1461
1462 /* Set VIDC pins to input */
1463 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1464 reg = cx_read(PAD_CTRL);
1465 reg &= ~0x4; /* Clear TS2_SOP_OE */
1466 cx_write(PAD_CTRL, reg);
1467 }
1468
1469 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1470
1471 reg = cx_read(PAD_CTRL);
1472 reg = reg & ~0x1; /* Clear TS1_OE */
1473
1474 /* FIXME, bit 2 writing here is questionable */
1475 /* set TS1_SOP_OE and TS1_OE_HI */
1476 reg = reg | 0xa;
1477 cx_write(PAD_CTRL, reg);
1478
1479 /* Sets MOE_CLK_DIS to disable MoE clock */
1480 /* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */
1481 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1482
1483 /* ALT_GPIO_ALT_SET: GPIO[0]
1484 * IR_ALT_TX_SEL: GPIO[1]
1485 * GPIO1_ALT_SEL: VIP_656_DATA[0]
1486 * GPIO0_ALT_SEL: VIP_656_CLK
1487 */
1488 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1489 }
1490
1491 switch (dev->bridge) {
1492 case CX23885_BRIDGE_885:
1493 case CX23885_BRIDGE_887:
1494 case CX23885_BRIDGE_888:
1495 /* enable irqs */
1496 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1497 /* clear dma in progress */
1498 cx23885_clear_bridge_error(dev);
1499 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1500 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1501
1502 /* clear dma in progress */
1503 cx23885_clear_bridge_error(dev);
1504 cx23885_irq_add(dev, mask: port->pci_irqmask);
1505 cx23885_irq_enable_all(dev);
1506
1507 /* clear dma in progress */
1508 cx23885_clear_bridge_error(dev);
1509 break;
1510 default:
1511 BUG();
1512 }
1513
1514 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1515 /* clear dma in progress */
1516 cx23885_clear_bridge_error(dev);
1517
1518 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1519 cx23885_av_clk(dev, enable: 1);
1520
1521 if (debug > 4)
1522 cx23885_tsport_reg_dump(port);
1523
1524 cx23885_irq_get_mask(dev);
1525
1526 /* clear dma in progress */
1527 cx23885_clear_bridge_error(dev);
1528
1529 return 0;
1530}
1531
1532static int cx23885_stop_dma(struct cx23885_tsport *port)
1533{
1534 struct cx23885_dev *dev = port->dev;
1535 u32 reg;
1536 int delay = 0;
1537 uint32_t reg1_val;
1538 uint32_t reg2_val;
1539
1540 dprintk(1, "%s()\n", __func__);
1541
1542 /* Stop interrupts and DMA */
1543 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1544 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1545 /* just in case wait for any dma to complete before allowing dealloc */
1546 mdelay(20);
1547 for (delay = 0; delay < 100; delay++) {
1548 reg1_val = cx_read(TC_REQ);
1549 reg2_val = cx_read(TC_REQ_SET);
1550 if (reg1_val == 0 || reg2_val == 0)
1551 break;
1552 mdelay(1);
1553 }
1554 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1555 delay, reg1_val, reg2_val);
1556
1557 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1558 reg = cx_read(PAD_CTRL);
1559
1560 /* Set TS1_OE */
1561 reg = reg | 0x1;
1562
1563 /* clear TS1_SOP_OE and TS1_OE_HI */
1564 reg = reg & ~0xa;
1565 cx_write(PAD_CTRL, reg);
1566 cx_write(port->reg_src_sel, 0);
1567 cx_write(port->reg_gen_ctrl, 8);
1568 }
1569
1570 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1571 cx23885_av_clk(dev, enable: 0);
1572
1573 return 0;
1574}
1575
1576/* ------------------------------------------------------------------ */
1577
1578int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1579{
1580 struct cx23885_dev *dev = port->dev;
1581 int size = port->ts_packet_size * port->ts_packet_count;
1582 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb: &buf->vb.vb2_buf, plane_no: 0);
1583
1584 dprintk(1, "%s: %p\n", __func__, buf);
1585 if (vb2_plane_size(vb: &buf->vb.vb2_buf, plane_no: 0) < size)
1586 return -EINVAL;
1587 vb2_set_plane_payload(vb: &buf->vb.vb2_buf, plane_no: 0, size);
1588
1589 cx23885_risc_databuffer(pci: dev->pci, risc: &buf->risc,
1590 sglist: sgt->sgl,
1591 bpl: port->ts_packet_size, lines: port->ts_packet_count, lpi: 0);
1592 return 0;
1593}
1594
1595/*
1596 * The risc program for each buffer works as follows: it starts with a simple
1597 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1598 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1599 * the initial JUMP).
1600 *
1601 * This is the risc program of the first buffer to be queued if the active list
1602 * is empty and it just keeps DMAing this buffer without generating any
1603 * interrupts.
1604 *
1605 * If a new buffer is added then the initial JUMP in the code for that buffer
1606 * will generate an interrupt which signals that the previous buffer has been
1607 * DMAed successfully and that it can be returned to userspace.
1608 *
1609 * It also sets the final jump of the previous buffer to the start of the new
1610 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1611 * atomic u32 write, so there is no race condition.
1612 *
1613 * The end-result of all this that you only get an interrupt when a buffer
1614 * is ready, so the control flow is very easy.
1615 */
1616void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1617{
1618 struct cx23885_buffer *prev;
1619 struct cx23885_dev *dev = port->dev;
1620 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1621 unsigned long flags;
1622
1623 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1624 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1625 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1626 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1627
1628 spin_lock_irqsave(&dev->slock, flags);
1629 if (list_empty(head: &cx88q->active)) {
1630 list_add_tail(new: &buf->queue, head: &cx88q->active);
1631 dprintk(1, "[%p/%d] %s - first active\n",
1632 buf, buf->vb.vb2_buf.index, __func__);
1633 } else {
1634 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1635 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1636 queue);
1637 list_add_tail(new: &buf->queue, head: &cx88q->active);
1638 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1639 dprintk(1, "[%p/%d] %s - append to active\n",
1640 buf, buf->vb.vb2_buf.index, __func__);
1641 }
1642 spin_unlock_irqrestore(lock: &dev->slock, flags);
1643}
1644
1645/* ----------------------------------------------------------- */
1646
1647static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1648{
1649 struct cx23885_dmaqueue *q = &port->mpegq;
1650 struct cx23885_buffer *buf;
1651 unsigned long flags;
1652
1653 spin_lock_irqsave(&port->slock, flags);
1654 while (!list_empty(head: &q->active)) {
1655 buf = list_entry(q->active.next, struct cx23885_buffer,
1656 queue);
1657 list_del(entry: &buf->queue);
1658 vb2_buffer_done(vb: &buf->vb.vb2_buf, state: VB2_BUF_STATE_ERROR);
1659 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1660 buf, buf->vb.vb2_buf.index, reason,
1661 (unsigned long)buf->risc.dma);
1662 }
1663 spin_unlock_irqrestore(lock: &port->slock, flags);
1664}
1665
1666void cx23885_cancel_buffers(struct cx23885_tsport *port)
1667{
1668 dprintk(1, "%s()\n", __func__);
1669 cx23885_stop_dma(port);
1670 do_cancel_buffers(port, reason: "cancel");
1671}
1672
1673int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1674{
1675 /* FIXME: port1 assumption here. */
1676 struct cx23885_tsport *port = &dev->ts1;
1677 int count = 0;
1678 int handled = 0;
1679
1680 if (status == 0)
1681 return handled;
1682
1683 count = cx_read(port->reg_gpcnt);
1684 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1685 status, cx_read(port->reg_ts_int_msk), count);
1686
1687 if ((status & VID_B_MSK_BAD_PKT) ||
1688 (status & VID_B_MSK_OPC_ERR) ||
1689 (status & VID_B_MSK_VBI_OPC_ERR) ||
1690 (status & VID_B_MSK_SYNC) ||
1691 (status & VID_B_MSK_VBI_SYNC) ||
1692 (status & VID_B_MSK_OF) ||
1693 (status & VID_B_MSK_VBI_OF)) {
1694 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1695 dev->name, status);
1696 if (status & VID_B_MSK_BAD_PKT)
1697 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1698 if (status & VID_B_MSK_OPC_ERR)
1699 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1700 if (status & VID_B_MSK_VBI_OPC_ERR)
1701 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1702 if (status & VID_B_MSK_SYNC)
1703 dprintk(1, " VID_B_MSK_SYNC\n");
1704 if (status & VID_B_MSK_VBI_SYNC)
1705 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1706 if (status & VID_B_MSK_OF)
1707 dprintk(1, " VID_B_MSK_OF\n");
1708 if (status & VID_B_MSK_VBI_OF)
1709 dprintk(1, " VID_B_MSK_VBI_OF\n");
1710
1711 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1712 cx23885_sram_channel_dump(dev,
1713 ch: &dev->sram_channels[port->sram_chno]);
1714 cx23885_417_check_encoder(dev);
1715 } else if (status & VID_B_MSK_RISCI1) {
1716 dprintk(7, " VID_B_MSK_RISCI1\n");
1717 spin_lock(lock: &port->slock);
1718 cx23885_wakeup(port, q: &port->mpegq, count);
1719 spin_unlock(lock: &port->slock);
1720 }
1721 if (status) {
1722 cx_write(port->reg_ts_int_stat, status);
1723 handled = 1;
1724 }
1725
1726 return handled;
1727}
1728
1729static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1730{
1731 struct cx23885_dev *dev = port->dev;
1732 int handled = 0;
1733 u32 count;
1734
1735 if ((status & VID_BC_MSK_OPC_ERR) ||
1736 (status & VID_BC_MSK_BAD_PKT) ||
1737 (status & VID_BC_MSK_SYNC) ||
1738 (status & VID_BC_MSK_OF)) {
1739
1740 if (status & VID_BC_MSK_OPC_ERR)
1741 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1742 VID_BC_MSK_OPC_ERR);
1743
1744 if (status & VID_BC_MSK_BAD_PKT)
1745 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1746 VID_BC_MSK_BAD_PKT);
1747
1748 if (status & VID_BC_MSK_SYNC)
1749 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1750 VID_BC_MSK_SYNC);
1751
1752 if (status & VID_BC_MSK_OF)
1753 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1754 VID_BC_MSK_OF);
1755
1756 pr_err("%s: mpeg risc op code error\n", dev->name);
1757
1758 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1759 cx23885_sram_channel_dump(dev,
1760 ch: &dev->sram_channels[port->sram_chno]);
1761
1762 } else if (status & VID_BC_MSK_RISCI1) {
1763
1764 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1765
1766 spin_lock(lock: &port->slock);
1767 count = cx_read(port->reg_gpcnt);
1768 cx23885_wakeup(port, q: &port->mpegq, count);
1769 spin_unlock(lock: &port->slock);
1770
1771 }
1772 if (status) {
1773 cx_write(port->reg_ts_int_stat, status);
1774 handled = 1;
1775 }
1776
1777 return handled;
1778}
1779
1780static irqreturn_t cx23885_irq(int irq, void *dev_id)
1781{
1782 struct cx23885_dev *dev = dev_id;
1783 struct cx23885_tsport *ts1 = &dev->ts1;
1784 struct cx23885_tsport *ts2 = &dev->ts2;
1785 u32 pci_status, pci_mask;
1786 u32 vida_status, vida_mask;
1787 u32 audint_status, audint_mask;
1788 u32 ts1_status, ts1_mask;
1789 u32 ts2_status, ts2_mask;
1790 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1791 int audint_count = 0;
1792 bool subdev_handled;
1793
1794 pci_status = cx_read(PCI_INT_STAT);
1795 pci_mask = cx23885_irq_get_mask(dev);
1796 if ((pci_status & pci_mask) == 0) {
1797 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1798 pci_status, pci_mask);
1799 goto out;
1800 }
1801
1802 vida_status = cx_read(VID_A_INT_STAT);
1803 vida_mask = cx_read(VID_A_INT_MSK);
1804 audint_status = cx_read(AUDIO_INT_INT_STAT);
1805 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1806 ts1_status = cx_read(VID_B_INT_STAT);
1807 ts1_mask = cx_read(VID_B_INT_MSK);
1808 ts2_status = cx_read(VID_C_INT_STAT);
1809 ts2_mask = cx_read(VID_C_INT_MSK);
1810
1811 if (((pci_status & pci_mask) == 0) &&
1812 ((ts2_status & ts2_mask) == 0) &&
1813 ((ts1_status & ts1_mask) == 0))
1814 goto out;
1815
1816 vida_count = cx_read(VID_A_GPCNT);
1817 audint_count = cx_read(AUD_INT_A_GPCNT);
1818 ts1_count = cx_read(ts1->reg_gpcnt);
1819 ts2_count = cx_read(ts2->reg_gpcnt);
1820 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1821 pci_status, pci_mask);
1822 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1823 vida_status, vida_mask, vida_count);
1824 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1825 audint_status, audint_mask, audint_count);
1826 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1827 ts1_status, ts1_mask, ts1_count);
1828 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1829 ts2_status, ts2_mask, ts2_count);
1830
1831 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1832 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1833 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1834 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1835 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1836 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1837
1838 if (pci_status & PCI_MSK_RISC_RD)
1839 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1840 PCI_MSK_RISC_RD);
1841
1842 if (pci_status & PCI_MSK_RISC_WR)
1843 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1844 PCI_MSK_RISC_WR);
1845
1846 if (pci_status & PCI_MSK_AL_RD)
1847 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1848 PCI_MSK_AL_RD);
1849
1850 if (pci_status & PCI_MSK_AL_WR)
1851 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1852 PCI_MSK_AL_WR);
1853
1854 if (pci_status & PCI_MSK_APB_DMA)
1855 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1856 PCI_MSK_APB_DMA);
1857
1858 if (pci_status & PCI_MSK_VID_C)
1859 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1860 PCI_MSK_VID_C);
1861
1862 if (pci_status & PCI_MSK_VID_B)
1863 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1864 PCI_MSK_VID_B);
1865
1866 if (pci_status & PCI_MSK_VID_A)
1867 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1868 PCI_MSK_VID_A);
1869
1870 if (pci_status & PCI_MSK_AUD_INT)
1871 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1872 PCI_MSK_AUD_INT);
1873
1874 if (pci_status & PCI_MSK_AUD_EXT)
1875 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1876 PCI_MSK_AUD_EXT);
1877
1878 if (pci_status & PCI_MSK_GPIO0)
1879 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1880 PCI_MSK_GPIO0);
1881
1882 if (pci_status & PCI_MSK_GPIO1)
1883 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1884 PCI_MSK_GPIO1);
1885
1886 if (pci_status & PCI_MSK_AV_CORE)
1887 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1888 PCI_MSK_AV_CORE);
1889
1890 if (pci_status & PCI_MSK_IR)
1891 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1892 PCI_MSK_IR);
1893 }
1894
1895 if (cx23885_boards[dev->board].ci_type == 1 &&
1896 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1897 handled += netup_ci_slot_status(dev, pci_status);
1898
1899 if (cx23885_boards[dev->board].ci_type == 2 &&
1900 (pci_status & PCI_MSK_GPIO0))
1901 handled += altera_ci_irq(dev);
1902
1903 if (ts1_status) {
1904 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1905 handled += cx23885_irq_ts(port: ts1, status: ts1_status);
1906 else
1907 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1908 handled += cx23885_irq_417(dev, status: ts1_status);
1909 }
1910
1911 if (ts2_status) {
1912 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1913 handled += cx23885_irq_ts(port: ts2, status: ts2_status);
1914 else
1915 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1916 handled += cx23885_irq_417(dev, status: ts2_status);
1917 }
1918
1919 if (vida_status)
1920 handled += cx23885_video_irq(dev, status: vida_status);
1921
1922 if (audint_status)
1923 handled += cx23885_audio_irq(dev, status: audint_status, mask: audint_mask);
1924
1925 if (pci_status & PCI_MSK_IR) {
1926 subdev_handled = false;
1927 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1928 pci_status, &subdev_handled);
1929 if (subdev_handled)
1930 handled++;
1931 }
1932
1933 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1934 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1935 schedule_work(work: &dev->cx25840_work);
1936 handled++;
1937 }
1938
1939 if (handled)
1940 cx_write(PCI_INT_STAT, pci_status & pci_mask);
1941out:
1942 return IRQ_RETVAL(handled);
1943}
1944
1945static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1946 unsigned int notification, void *arg)
1947{
1948 struct cx23885_dev *dev;
1949
1950 if (sd == NULL)
1951 return;
1952
1953 dev = to_cx23885(v4l2_dev: sd->v4l2_dev);
1954
1955 switch (notification) {
1956 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1957 if (sd == dev->sd_ir)
1958 cx23885_ir_rx_v4l2_dev_notify(sd, events: *(u32 *)arg);
1959 break;
1960 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1961 if (sd == dev->sd_ir)
1962 cx23885_ir_tx_v4l2_dev_notify(sd, events: *(u32 *)arg);
1963 break;
1964 }
1965}
1966
1967static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1968{
1969 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1970 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1971 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1972 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1973}
1974
1975static inline int encoder_on_portb(struct cx23885_dev *dev)
1976{
1977 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1978}
1979
1980static inline int encoder_on_portc(struct cx23885_dev *dev)
1981{
1982 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1983}
1984
1985/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1986 * registers depending on the board configuration (and whether the
1987 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1988 * be pushed into the correct hardware register, regardless of the
1989 * physical location. Certain registers are shared so we sanity check
1990 * and report errors if we think we're tampering with a GPIo that might
1991 * be assigned to the encoder (and used for the host bus).
1992 *
1993 * GPIO 2 through 0 - On the cx23885 bridge
1994 * GPIO 18 through 3 - On the cx23417 host bus interface
1995 * GPIO 23 through 19 - On the cx25840 a/v core
1996 */
1997void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1998{
1999 if (mask & 0x7)
2000 cx_set(GP0_IO, mask & 0x7);
2001
2002 if (mask & 0x0007fff8) {
2003 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2004 pr_err("%s: Setting GPIO on encoder ports\n",
2005 dev->name);
2006 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2007 }
2008
2009 /* TODO: 23-19 */
2010 if (mask & 0x00f80000)
2011 pr_info("%s: Unsupported\n", dev->name);
2012}
2013
2014void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2015{
2016 if (mask & 0x00000007)
2017 cx_clear(GP0_IO, mask & 0x7);
2018
2019 if (mask & 0x0007fff8) {
2020 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2021 pr_err("%s: Clearing GPIO moving on encoder ports\n",
2022 dev->name);
2023 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2024 }
2025
2026 /* TODO: 23-19 */
2027 if (mask & 0x00f80000)
2028 pr_info("%s: Unsupported\n", dev->name);
2029}
2030
2031u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2032{
2033 if (mask & 0x00000007)
2034 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2035
2036 if (mask & 0x0007fff8) {
2037 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2038 pr_err("%s: Reading GPIO moving on encoder ports\n",
2039 dev->name);
2040 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2041 }
2042
2043 /* TODO: 23-19 */
2044 if (mask & 0x00f80000)
2045 pr_info("%s: Unsupported\n", dev->name);
2046
2047 return 0;
2048}
2049
2050void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2051{
2052 if ((mask & 0x00000007) && asoutput)
2053 cx_set(GP0_IO, (mask & 0x7) << 16);
2054 else if ((mask & 0x00000007) && !asoutput)
2055 cx_clear(GP0_IO, (mask & 0x7) << 16);
2056
2057 if (mask & 0x0007fff8) {
2058 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2059 pr_err("%s: Enabling GPIO on encoder ports\n",
2060 dev->name);
2061 }
2062
2063 /* MC417_OEN is active low for output, write 1 for an input */
2064 if ((mask & 0x0007fff8) && asoutput)
2065 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2066
2067 else if ((mask & 0x0007fff8) && !asoutput)
2068 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2069
2070 /* TODO: 23-19 */
2071}
2072
2073static struct {
2074 int vendor, dev;
2075} const broken_dev_id[] = {
2076 /* According with
2077 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
2078 * 0x1451 is PCI ID for the IOMMU found on Ryzen
2079 */
2080 { PCI_VENDOR_ID_AMD, 0x1451 },
2081 /* According to sudo lspci -nn,
2082 * 0x1423 is the PCI ID for the IOMMU found on Kaveri
2083 */
2084 { PCI_VENDOR_ID_AMD, 0x1423 },
2085 /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
2086 */
2087 { PCI_VENDOR_ID_AMD, 0x1481 },
2088 /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
2089 */
2090 { PCI_VENDOR_ID_AMD, 0x1419 },
2091 /* 0x1631 is the PCI ID for the IOMMU found on Renoir/Cezanne
2092 */
2093 { PCI_VENDOR_ID_AMD, 0x1631 },
2094 /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
2095 */
2096 { PCI_VENDOR_ID_ATI, 0x5a23 },
2097};
2098
2099static bool cx23885_does_need_dma_reset(void)
2100{
2101 int i;
2102 struct pci_dev *pdev = NULL;
2103
2104 if (dma_reset_workaround == 0)
2105 return false;
2106 else if (dma_reset_workaround == 2)
2107 return true;
2108
2109 for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
2110 pdev = pci_get_device(vendor: broken_dev_id[i].vendor,
2111 device: broken_dev_id[i].dev, NULL);
2112 if (pdev) {
2113 pci_dev_put(dev: pdev);
2114 return true;
2115 }
2116 }
2117 return false;
2118}
2119
2120static int cx23885_initdev(struct pci_dev *pci_dev,
2121 const struct pci_device_id *pci_id)
2122{
2123 struct cx23885_dev *dev;
2124 struct v4l2_ctrl_handler *hdl;
2125 int err;
2126
2127 dev = kzalloc(size: sizeof(*dev), GFP_KERNEL);
2128 if (NULL == dev)
2129 return -ENOMEM;
2130
2131 dev->need_dma_reset = cx23885_does_need_dma_reset();
2132
2133 err = v4l2_device_register(dev: &pci_dev->dev, v4l2_dev: &dev->v4l2_dev);
2134 if (err < 0)
2135 goto fail_free;
2136
2137 hdl = &dev->ctrl_handler;
2138 v4l2_ctrl_handler_init(hdl, 6);
2139 if (hdl->error) {
2140 err = hdl->error;
2141 goto fail_ctrl;
2142 }
2143 dev->v4l2_dev.ctrl_handler = hdl;
2144
2145 /* Prepare to handle notifications from subdevices */
2146 cx23885_v4l2_dev_notify_init(dev);
2147
2148 /* pci init */
2149 dev->pci = pci_dev;
2150 if (pci_enable_device(dev: pci_dev)) {
2151 err = -EIO;
2152 goto fail_ctrl;
2153 }
2154
2155 if (cx23885_dev_setup(dev) < 0) {
2156 err = -EINVAL;
2157 goto fail_ctrl;
2158 }
2159
2160 /* print pci info */
2161 dev->pci_rev = pci_dev->revision;
2162 pci_read_config_byte(dev: pci_dev, PCI_LATENCY_TIMER, val: &dev->pci_lat);
2163 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
2164 dev->name,
2165 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2166 dev->pci_lat,
2167 (unsigned long long)pci_resource_start(pci_dev, 0));
2168
2169 pci_set_master(dev: pci_dev);
2170 err = dma_set_mask(dev: &pci_dev->dev, mask: 0xffffffff);
2171 if (err) {
2172 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2173 goto fail_dma_set_mask;
2174 }
2175
2176 err = request_irq(irq: pci_dev->irq, handler: cx23885_irq,
2177 IRQF_SHARED, name: dev->name, dev);
2178 if (err < 0) {
2179 pr_err("%s: can't get IRQ %d\n",
2180 dev->name, pci_dev->irq);
2181 goto fail_dma_set_mask;
2182 }
2183
2184 switch (dev->board) {
2185 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2186 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2187 break;
2188 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2189 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2190 break;
2191 }
2192
2193 /*
2194 * The CX2388[58] IR controller can start firing interrupts when
2195 * enabled, so these have to take place after the cx23885_irq() handler
2196 * is hooked up by the call to request_irq() above.
2197 */
2198 cx23885_ir_pci_int_enable(dev);
2199 cx23885_input_init(dev);
2200
2201 return 0;
2202
2203fail_dma_set_mask:
2204 cx23885_dev_unregister(dev);
2205fail_ctrl:
2206 v4l2_ctrl_handler_free(hdl);
2207 v4l2_device_unregister(v4l2_dev: &dev->v4l2_dev);
2208fail_free:
2209 kfree(objp: dev);
2210 return err;
2211}
2212
2213static void cx23885_finidev(struct pci_dev *pci_dev)
2214{
2215 struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev: pci_dev);
2216 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2217
2218 cx23885_input_fini(dev);
2219 cx23885_ir_fini(dev);
2220
2221 cx23885_shutdown(dev);
2222
2223 /* unregister stuff */
2224 free_irq(pci_dev->irq, dev);
2225
2226 pci_disable_device(dev: pci_dev);
2227
2228 cx23885_dev_unregister(dev);
2229 v4l2_ctrl_handler_free(hdl: &dev->ctrl_handler);
2230 v4l2_device_unregister(v4l2_dev);
2231 kfree(objp: dev);
2232}
2233
2234static const struct pci_device_id cx23885_pci_tbl[] = {
2235 {
2236 /* CX23885 */
2237 .vendor = 0x14f1,
2238 .device = 0x8852,
2239 .subvendor = PCI_ANY_ID,
2240 .subdevice = PCI_ANY_ID,
2241 }, {
2242 /* CX23887 Rev 2 */
2243 .vendor = 0x14f1,
2244 .device = 0x8880,
2245 .subvendor = PCI_ANY_ID,
2246 .subdevice = PCI_ANY_ID,
2247 }, {
2248 /* --- end of list --- */
2249 }
2250};
2251MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2252
2253static struct pci_driver cx23885_pci_driver = {
2254 .name = "cx23885",
2255 .id_table = cx23885_pci_tbl,
2256 .probe = cx23885_initdev,
2257 .remove = cx23885_finidev,
2258};
2259
2260static int __init cx23885_init(void)
2261{
2262 pr_info("cx23885 driver version %s loaded\n",
2263 CX23885_VERSION);
2264 return pci_register_driver(&cx23885_pci_driver);
2265}
2266
2267static void __exit cx23885_fini(void)
2268{
2269 pci_unregister_driver(dev: &cx23885_pci_driver);
2270}
2271
2272module_init(cx23885_init);
2273module_exit(cx23885_fini);
2274

source code of linux/drivers/media/pci/cx23885/cx23885-core.c