1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
4 *
5 * Based on original driver by Krzysztof Ha?asa:
6 * Copyright (C) 2015 Industrial Research Institute for Automation
7 * and Measurements PIAP
8 *
9 * Notes
10 * -----
11 *
12 * 1. Under stress-testing, it has been observed that the PCIe link
13 * goes down, without reason. Therefore, the driver takes special care
14 * to allow device hot-unplugging.
15 *
16 * 2. TW686X devices are capable of setting a few different DMA modes,
17 * including: scatter-gather, field and frame modes. However,
18 * under stress testings it has been found that the machine can
19 * freeze completely if DMA registers are programmed while streaming
20 * is active.
21 *
22 * Therefore, driver implements a dma_mode called 'memcpy' which
23 * avoids cycling the DMA buffers, and insteads allocates extra DMA buffers
24 * and then copies into vmalloc'ed user buffers.
25 *
26 * In addition to this, when streaming is on, the driver tries to access
27 * hardware registers as infrequently as possible. This is done by using
28 * a timer to limit the rate at which DMA is reset on DMA channels error.
29 */
30
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/delay.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci_ids.h>
37#include <linux/slab.h>
38#include <linux/timer.h>
39
40#include "tw686x.h"
41#include "tw686x-regs.h"
42
43/*
44 * This module parameter allows to control the DMA_TIMER_INTERVAL value.
45 * The DMA_TIMER_INTERVAL register controls the minimum DMA interrupt
46 * time span (iow, the maximum DMA interrupt rate) thus allowing for
47 * IRQ coalescing.
48 *
49 * The chip datasheet does not mention a time unit for this value, so
50 * users wanting fine-grain control over the interrupt rate should
51 * determine the desired value through testing.
52 */
53static u32 dma_interval = 0x00098968;
54module_param(dma_interval, int, 0444);
55MODULE_PARM_DESC(dma_interval, "Minimum time span for DMA interrupting host");
56
57static unsigned int dma_mode = TW686X_DMA_MODE_MEMCPY;
58static const char *dma_mode_name(unsigned int mode)
59{
60 switch (mode) {
61 case TW686X_DMA_MODE_MEMCPY:
62 return "memcpy";
63 case TW686X_DMA_MODE_CONTIG:
64 return "contig";
65 case TW686X_DMA_MODE_SG:
66 return "sg";
67 default:
68 return "unknown";
69 }
70}
71
72static int tw686x_dma_mode_get(char *buffer, const struct kernel_param *kp)
73{
74 return sprintf(buf: buffer, fmt: "%s", dma_mode_name(mode: dma_mode));
75}
76
77static int tw686x_dma_mode_set(const char *val, const struct kernel_param *kp)
78{
79 if (!strcasecmp(s1: val, s2: dma_mode_name(TW686X_DMA_MODE_MEMCPY)))
80 dma_mode = TW686X_DMA_MODE_MEMCPY;
81 else if (!strcasecmp(s1: val, s2: dma_mode_name(TW686X_DMA_MODE_CONTIG)))
82 dma_mode = TW686X_DMA_MODE_CONTIG;
83 else if (!strcasecmp(s1: val, s2: dma_mode_name(TW686X_DMA_MODE_SG)))
84 dma_mode = TW686X_DMA_MODE_SG;
85 else
86 return -EINVAL;
87 return 0;
88}
89module_param_call(dma_mode, tw686x_dma_mode_set, tw686x_dma_mode_get,
90 &dma_mode, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(dma_mode, "DMA operation mode (memcpy/contig/sg, default=memcpy)");
92
93void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel)
94{
95 u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
96 u32 dma_cmd = reg_read(dev, DMA_CMD);
97
98 dma_en &= ~BIT(channel);
99 dma_cmd &= ~BIT(channel);
100
101 /* Must remove it from pending too */
102 dev->pending_dma_en &= ~BIT(channel);
103 dev->pending_dma_cmd &= ~BIT(channel);
104
105 /* Stop DMA if no channels are enabled */
106 if (!dma_en)
107 dma_cmd = 0;
108 reg_write(dev, DMA_CHANNEL_ENABLE, value: dma_en);
109 reg_write(dev, DMA_CMD, value: dma_cmd);
110}
111
112void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel)
113{
114 u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
115 u32 dma_cmd = reg_read(dev, DMA_CMD);
116
117 dev->pending_dma_en |= dma_en | BIT(channel);
118 dev->pending_dma_cmd |= dma_cmd | DMA_CMD_ENABLE | BIT(channel);
119}
120
121/*
122 * The purpose of this awful hack is to avoid enabling the DMA
123 * channels "too fast" which makes some TW686x devices very
124 * angry and freeze the CPU (see note 1).
125 */
126static void tw686x_dma_delay(struct timer_list *t)
127{
128 struct tw686x_dev *dev = from_timer(dev, t, dma_delay_timer);
129 unsigned long flags;
130
131 spin_lock_irqsave(&dev->lock, flags);
132
133 reg_write(dev, DMA_CHANNEL_ENABLE, value: dev->pending_dma_en);
134 reg_write(dev, DMA_CMD, value: dev->pending_dma_cmd);
135 dev->pending_dma_en = 0;
136 dev->pending_dma_cmd = 0;
137
138 spin_unlock_irqrestore(lock: &dev->lock, flags);
139}
140
141static void tw686x_reset_channels(struct tw686x_dev *dev, unsigned int ch_mask)
142{
143 u32 dma_en, dma_cmd;
144
145 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
146 dma_cmd = reg_read(dev, DMA_CMD);
147
148 /*
149 * Save pending register status, the timer will
150 * restore them.
151 */
152 dev->pending_dma_en |= dma_en;
153 dev->pending_dma_cmd |= dma_cmd;
154
155 /* Disable the reset channels */
156 reg_write(dev, DMA_CHANNEL_ENABLE, value: dma_en & ~ch_mask);
157
158 if ((dma_en & ~ch_mask) == 0) {
159 dev_dbg(&dev->pci_dev->dev, "reset: stopping DMA\n");
160 dma_cmd &= ~DMA_CMD_ENABLE;
161 }
162 reg_write(dev, DMA_CMD, value: dma_cmd & ~ch_mask);
163}
164
165static irqreturn_t tw686x_irq(int irq, void *dev_id)
166{
167 struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
168 unsigned int video_requests, audio_requests, reset_ch;
169 u32 fifo_status, fifo_signal, fifo_ov, fifo_bad, fifo_errors;
170 u32 int_status, dma_en, video_en, pb_status;
171 unsigned long flags;
172
173 int_status = reg_read(dev, INT_STATUS); /* cleared on read */
174 fifo_status = reg_read(dev, VIDEO_FIFO_STATUS);
175
176 /* INT_STATUS does not include FIFO_STATUS errors! */
177 if (!int_status && !TW686X_FIFO_ERROR(fifo_status))
178 return IRQ_NONE;
179
180 if (int_status & INT_STATUS_DMA_TOUT) {
181 dev_dbg(&dev->pci_dev->dev,
182 "DMA timeout. Resetting DMA for all channels\n");
183 reset_ch = ~0;
184 goto reset_channels;
185 }
186
187 spin_lock_irqsave(&dev->lock, flags);
188 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
189 spin_unlock_irqrestore(lock: &dev->lock, flags);
190
191 video_en = dma_en & 0xff;
192 fifo_signal = ~(fifo_status & 0xff) & video_en;
193 fifo_ov = fifo_status >> 24;
194 fifo_bad = fifo_status >> 16;
195
196 /* Mask of channels with signal and FIFO errors */
197 fifo_errors = fifo_signal & (fifo_ov | fifo_bad);
198
199 reset_ch = 0;
200 pb_status = reg_read(dev, PB_STATUS);
201
202 /* Coalesce video frame/error events */
203 video_requests = (int_status & video_en) | fifo_errors;
204 audio_requests = (int_status & dma_en) >> 8;
205
206 if (video_requests)
207 tw686x_video_irq(dev, requests: video_requests, pb_status,
208 fifo_status, reset_ch: &reset_ch);
209 if (audio_requests)
210 tw686x_audio_irq(dev, requests: audio_requests, pb_status);
211
212reset_channels:
213 if (reset_ch) {
214 spin_lock_irqsave(&dev->lock, flags);
215 tw686x_reset_channels(dev, ch_mask: reset_ch);
216 spin_unlock_irqrestore(lock: &dev->lock, flags);
217 mod_timer(timer: &dev->dma_delay_timer,
218 expires: jiffies + msecs_to_jiffies(m: 100));
219 }
220
221 return IRQ_HANDLED;
222}
223
224static void tw686x_dev_release(struct v4l2_device *v4l2_dev)
225{
226 struct tw686x_dev *dev = container_of(v4l2_dev, struct tw686x_dev,
227 v4l2_dev);
228 unsigned int ch;
229
230 for (ch = 0; ch < max_channels(dev); ch++)
231 v4l2_ctrl_handler_free(hdl: &dev->video_channels[ch].ctrl_handler);
232
233 v4l2_device_unregister(v4l2_dev: &dev->v4l2_dev);
234
235 kfree(objp: dev->audio_channels);
236 kfree(objp: dev->video_channels);
237 kfree(objp: dev);
238}
239
240static int tw686x_probe(struct pci_dev *pci_dev,
241 const struct pci_device_id *pci_id)
242{
243 struct tw686x_dev *dev;
244 int err;
245
246 dev = kzalloc(size: sizeof(*dev), GFP_KERNEL);
247 if (!dev)
248 return -ENOMEM;
249 dev->type = pci_id->driver_data;
250 dev->dma_mode = dma_mode;
251 sprintf(buf: dev->name, fmt: "tw%04X", pci_dev->device);
252
253 dev->video_channels = kcalloc(n: max_channels(dev),
254 size: sizeof(*dev->video_channels), GFP_KERNEL);
255 if (!dev->video_channels) {
256 err = -ENOMEM;
257 goto free_dev;
258 }
259
260 dev->audio_channels = kcalloc(n: max_channels(dev),
261 size: sizeof(*dev->audio_channels), GFP_KERNEL);
262 if (!dev->audio_channels) {
263 err = -ENOMEM;
264 goto free_video;
265 }
266
267 pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx (%s mode)\n", dev->name,
268 pci_name(pci_dev), pci_dev->irq,
269 (unsigned long)pci_resource_start(pci_dev, 0),
270 dma_mode_name(dma_mode));
271
272 dev->pci_dev = pci_dev;
273 if (pci_enable_device(dev: pci_dev)) {
274 err = -EIO;
275 goto free_audio;
276 }
277
278 pci_set_master(dev: pci_dev);
279 err = dma_set_mask(dev: &pci_dev->dev, DMA_BIT_MASK(32));
280 if (err) {
281 dev_err(&pci_dev->dev, "32-bit PCI DMA not supported\n");
282 err = -EIO;
283 goto disable_pci;
284 }
285
286 err = pci_request_regions(pci_dev, dev->name);
287 if (err) {
288 dev_err(&pci_dev->dev, "unable to request PCI region\n");
289 goto disable_pci;
290 }
291
292 dev->mmio = pci_ioremap_bar(pdev: pci_dev, bar: 0);
293 if (!dev->mmio) {
294 dev_err(&pci_dev->dev, "unable to remap PCI region\n");
295 err = -ENOMEM;
296 goto free_region;
297 }
298
299 /* Reset all subsystems */
300 reg_write(dev, SYS_SOFT_RST, value: 0x0f);
301 mdelay(1);
302
303 reg_write(dev, SRST[0], value: 0x3f);
304 if (max_channels(dev) > 4)
305 reg_write(dev, SRST[1], value: 0x3f);
306
307 /* Disable the DMA engine */
308 reg_write(dev, DMA_CMD, value: 0);
309 reg_write(dev, DMA_CHANNEL_ENABLE, value: 0);
310
311 /* Enable DMA FIFO overflow and pointer check */
312 reg_write(dev, DMA_CONFIG, value: 0xffffff04);
313 reg_write(dev, DMA_CHANNEL_TIMEOUT, value: 0x140c8584);
314 reg_write(dev, DMA_TIMER_INTERVAL, value: dma_interval);
315
316 spin_lock_init(&dev->lock);
317
318 timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0);
319
320 /*
321 * This must be set right before initializing v4l2_dev.
322 * It's used to release resources after the last handle
323 * held is released.
324 */
325 dev->v4l2_dev.release = tw686x_dev_release;
326 err = tw686x_video_init(dev);
327 if (err) {
328 dev_err(&pci_dev->dev, "can't register video\n");
329 goto iounmap;
330 }
331
332 err = tw686x_audio_init(dev);
333 if (err)
334 dev_warn(&pci_dev->dev, "can't register audio\n");
335
336 err = request_irq(irq: pci_dev->irq, handler: tw686x_irq, IRQF_SHARED,
337 name: dev->name, dev);
338 if (err < 0) {
339 dev_err(&pci_dev->dev, "unable to request interrupt\n");
340 goto tw686x_free;
341 }
342
343 pci_set_drvdata(pdev: pci_dev, data: dev);
344 return 0;
345
346tw686x_free:
347 tw686x_video_free(dev);
348 tw686x_audio_free(dev);
349iounmap:
350 pci_iounmap(dev: pci_dev, dev->mmio);
351free_region:
352 pci_release_regions(pci_dev);
353disable_pci:
354 pci_disable_device(dev: pci_dev);
355free_audio:
356 kfree(objp: dev->audio_channels);
357free_video:
358 kfree(objp: dev->video_channels);
359free_dev:
360 kfree(objp: dev);
361 return err;
362}
363
364static void tw686x_remove(struct pci_dev *pci_dev)
365{
366 struct tw686x_dev *dev = pci_get_drvdata(pdev: pci_dev);
367 unsigned long flags;
368
369 /* This guarantees the IRQ handler is no longer running,
370 * which means we can kiss good-bye some resources.
371 */
372 free_irq(pci_dev->irq, dev);
373
374 tw686x_video_free(dev);
375 tw686x_audio_free(dev);
376 del_timer_sync(timer: &dev->dma_delay_timer);
377
378 pci_iounmap(dev: pci_dev, dev->mmio);
379 pci_release_regions(pci_dev);
380 pci_disable_device(dev: pci_dev);
381
382 /*
383 * Setting pci_dev to NULL allows to detect hardware is no longer
384 * available and will be used by vb2_ops. This is required because
385 * the device sometimes hot-unplugs itself as the result of a PCIe
386 * link down.
387 * The lock is really important here.
388 */
389 spin_lock_irqsave(&dev->lock, flags);
390 dev->pci_dev = NULL;
391 spin_unlock_irqrestore(lock: &dev->lock, flags);
392
393 /*
394 * This calls tw686x_dev_release if it's the last reference.
395 * Otherwise, release is postponed until there are no users left.
396 */
397 v4l2_device_put(v4l2_dev: &dev->v4l2_dev);
398}
399
400/*
401 * On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
402 * with 10-bit start_idx and end_idx determining start and end of frame buffer
403 * for particular channel.
404 * TW6868 with all its 8 channels would be problematic (only 127 SG entries per
405 * channel) but we support only 4 channels on this chip anyway (the first
406 * 4 channels are driven with internal video decoder, the other 4 would require
407 * an external TW286x part).
408 *
409 * On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
410 * starting with 0. Both chips have complete sets of internal video decoders
411 * (respectively 4 or 8-channel).
412 *
413 * All chips have separate SG tables for two video frames.
414 */
415
416/* driver_data is number of A/V channels */
417static const struct pci_device_id tw686x_pci_tbl[] = {
418 {
419 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6864),
420 .driver_data = 4
421 },
422 {
423 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6865), /* not tested */
424 .driver_data = 4 | TYPE_SECOND_GEN
425 },
426 /*
427 * TW6868 supports 8 A/V channels with an external TW2865 chip;
428 * not supported by the driver.
429 */
430 {
431 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6868), /* not tested */
432 .driver_data = 4
433 },
434 {
435 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6869),
436 .driver_data = 8 | TYPE_SECOND_GEN},
437 {}
438};
439MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
440
441static struct pci_driver tw686x_pci_driver = {
442 .name = "tw686x",
443 .id_table = tw686x_pci_tbl,
444 .probe = tw686x_probe,
445 .remove = tw686x_remove,
446};
447module_pci_driver(tw686x_pci_driver);
448
449MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
450MODULE_AUTHOR("Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>");
451MODULE_AUTHOR("Krzysztof Ha?asa <khalasa@piap.pl>");
452MODULE_LICENSE("GPL v2");
453

source code of linux/drivers/media/pci/tw686x/tw686x-core.c