1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* interrupt handling |
3 | Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> |
4 | Copyright (C) 2004 Chris Kennedy <c@groovy.org> |
5 | Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> |
6 | |
7 | */ |
8 | |
9 | #include "ivtv-driver.h" |
10 | #include "ivtv-queue.h" |
11 | #include "ivtv-udma.h" |
12 | #include "ivtv-irq.h" |
13 | #include "ivtv-mailbox.h" |
14 | #include "ivtv-vbi.h" |
15 | #include "ivtv-yuv.h" |
16 | #include <media/v4l2-event.h> |
17 | |
18 | #define DMA_MAGIC_COOKIE 0x000001fe |
19 | |
20 | static void ivtv_dma_dec_start(struct ivtv_stream *s); |
21 | |
22 | static const int ivtv_stream_map[] = { |
23 | IVTV_ENC_STREAM_TYPE_MPG, |
24 | IVTV_ENC_STREAM_TYPE_YUV, |
25 | IVTV_ENC_STREAM_TYPE_PCM, |
26 | IVTV_ENC_STREAM_TYPE_VBI, |
27 | }; |
28 | |
29 | static void ivtv_pcm_work_handler(struct ivtv *itv) |
30 | { |
31 | struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM]; |
32 | struct ivtv_buffer *buf; |
33 | |
34 | /* Pass the PCM data to ivtv-alsa */ |
35 | |
36 | while (1) { |
37 | /* |
38 | * Users should not be using both the ALSA and V4L2 PCM audio |
39 | * capture interfaces at the same time. If the user is doing |
40 | * this, there maybe a buffer in q_io to grab, use, and put |
41 | * back in rotation. |
42 | */ |
43 | buf = ivtv_dequeue(s, q: &s->q_io); |
44 | if (buf == NULL) |
45 | buf = ivtv_dequeue(s, q: &s->q_full); |
46 | if (buf == NULL) |
47 | break; |
48 | |
49 | if (buf->readpos < buf->bytesused) |
50 | itv->pcm_announce_callback(itv->alsa, |
51 | (u8 *)(buf->buf + buf->readpos), |
52 | (size_t)(buf->bytesused - buf->readpos)); |
53 | |
54 | ivtv_enqueue(s, buf, q: &s->q_free); |
55 | } |
56 | } |
57 | |
58 | static void ivtv_pio_work_handler(struct ivtv *itv) |
59 | { |
60 | struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream]; |
61 | struct ivtv_buffer *buf; |
62 | int i = 0; |
63 | |
64 | IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n" ); |
65 | if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS || |
66 | s->vdev.v4l2_dev == NULL || !ivtv_use_pio(s)) { |
67 | itv->cur_pio_stream = -1; |
68 | /* trigger PIO complete user interrupt */ |
69 | write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); |
70 | return; |
71 | } |
72 | IVTV_DEBUG_HI_DMA("Process PIO %s\n" , s->name); |
73 | list_for_each_entry(buf, &s->q_dma.list, list) { |
74 | u32 size = s->sg_processing[i].size & 0x3ffff; |
75 | |
76 | /* Copy the data from the card to the buffer */ |
77 | if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { |
78 | memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size); |
79 | } |
80 | else { |
81 | memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size); |
82 | } |
83 | i++; |
84 | if (i == s->sg_processing_size) |
85 | break; |
86 | } |
87 | write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); |
88 | } |
89 | |
90 | void ivtv_irq_work_handler(struct kthread_work *work) |
91 | { |
92 | struct ivtv *itv = container_of(work, struct ivtv, irq_work); |
93 | |
94 | if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, addr: &itv->i_flags)) |
95 | ivtv_pio_work_handler(itv); |
96 | |
97 | if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, addr: &itv->i_flags)) |
98 | ivtv_vbi_work_handler(itv); |
99 | |
100 | if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, addr: &itv->i_flags)) |
101 | ivtv_yuv_work_handler(itv); |
102 | |
103 | if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM, addr: &itv->i_flags)) |
104 | ivtv_pcm_work_handler(itv); |
105 | } |
106 | |
107 | /* Determine the required DMA size, setup enough buffers in the predma queue and |
108 | actually copy the data from the card to the buffers in case a PIO transfer is |
109 | required for this stream. |
110 | */ |
111 | static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA]) |
112 | { |
113 | struct ivtv *itv = s->itv; |
114 | struct ivtv_buffer *buf; |
115 | u32 bytes_needed = 0; |
116 | u32 offset, size; |
117 | u32 UVoffset = 0, UVsize = 0; |
118 | int skip_bufs = s->q_predma.buffers; |
119 | int idx = s->sg_pending_size; |
120 | int rc; |
121 | |
122 | /* sanity checks */ |
123 | if (s->vdev.v4l2_dev == NULL) { |
124 | IVTV_DEBUG_WARN("Stream %s not started\n" , s->name); |
125 | return -1; |
126 | } |
127 | if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { |
128 | IVTV_DEBUG_WARN("Stream %s not open\n" , s->name); |
129 | return -1; |
130 | } |
131 | |
132 | /* determine offset, size and PTS for the various streams */ |
133 | switch (s->type) { |
134 | case IVTV_ENC_STREAM_TYPE_MPG: |
135 | offset = data[1]; |
136 | size = data[2]; |
137 | s->pending_pts = 0; |
138 | break; |
139 | |
140 | case IVTV_ENC_STREAM_TYPE_YUV: |
141 | offset = data[1]; |
142 | size = data[2]; |
143 | UVoffset = data[3]; |
144 | UVsize = data[4]; |
145 | s->pending_pts = ((u64) data[5] << 32) | data[6]; |
146 | break; |
147 | |
148 | case IVTV_ENC_STREAM_TYPE_PCM: |
149 | offset = data[1] + 12; |
150 | size = data[2] - 12; |
151 | s->pending_pts = read_dec(offset - 8) | |
152 | ((u64)(read_dec(offset - 12)) << 32); |
153 | if (itv->has_cx23415) |
154 | offset += IVTV_DECODER_OFFSET; |
155 | break; |
156 | |
157 | case IVTV_ENC_STREAM_TYPE_VBI: |
158 | size = itv->vbi.enc_size * itv->vbi.fpi; |
159 | offset = read_enc(itv->vbi.enc_start - 4) + 12; |
160 | if (offset == 12) { |
161 | IVTV_DEBUG_INFO("VBI offset == 0\n" ); |
162 | return -1; |
163 | } |
164 | s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32); |
165 | break; |
166 | |
167 | case IVTV_DEC_STREAM_TYPE_VBI: |
168 | size = read_dec(itv->vbi.dec_start + 4) + 8; |
169 | offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start; |
170 | s->pending_pts = 0; |
171 | offset += IVTV_DECODER_OFFSET; |
172 | break; |
173 | default: |
174 | /* shouldn't happen */ |
175 | return -1; |
176 | } |
177 | |
178 | /* if this is the start of the DMA then fill in the magic cookie */ |
179 | if (s->sg_pending_size == 0 && ivtv_use_dma(s)) { |
180 | if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || |
181 | s->type == IVTV_DEC_STREAM_TYPE_VBI)) { |
182 | s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET); |
183 | write_dec_sync(DMA_MAGIC_COOKIE, offset - IVTV_DECODER_OFFSET); |
184 | } |
185 | else { |
186 | s->pending_backup = read_enc(offset); |
187 | write_enc_sync(DMA_MAGIC_COOKIE, offset); |
188 | } |
189 | s->pending_offset = offset; |
190 | } |
191 | |
192 | bytes_needed = size; |
193 | if (s->type == IVTV_ENC_STREAM_TYPE_YUV) { |
194 | /* The size for the Y samples needs to be rounded upwards to a |
195 | multiple of the buf_size. The UV samples then start in the |
196 | next buffer. */ |
197 | bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size); |
198 | bytes_needed += UVsize; |
199 | } |
200 | |
201 | IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n" , |
202 | ivtv_use_pio(s) ? "PIO" : "DMA" , s->name, bytes_needed, offset); |
203 | |
204 | rc = ivtv_queue_move(s, from: &s->q_free, steal: &s->q_full, to: &s->q_predma, needed_bytes: bytes_needed); |
205 | if (rc < 0) { /* Insufficient buffers */ |
206 | IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n" , |
207 | bytes_needed, s->name); |
208 | return -1; |
209 | } |
210 | if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) { |
211 | IVTV_WARN("All %s stream buffers are full. Dropping data.\n" , s->name); |
212 | IVTV_WARN("Cause: the application is not reading fast enough.\n" ); |
213 | } |
214 | s->buffers_stolen = rc; |
215 | |
216 | /* got the buffers, now fill in sg_pending */ |
217 | buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); |
218 | memset(buf->buf, 0, 128); |
219 | list_for_each_entry(buf, &s->q_predma.list, list) { |
220 | if (skip_bufs-- > 0) |
221 | continue; |
222 | s->sg_pending[idx].dst = buf->dma_handle; |
223 | s->sg_pending[idx].src = offset; |
224 | s->sg_pending[idx].size = s->buf_size; |
225 | buf->bytesused = min(size, s->buf_size); |
226 | buf->dma_xfer_cnt = s->dma_xfer_cnt; |
227 | |
228 | s->q_predma.bytesused += buf->bytesused; |
229 | size -= buf->bytesused; |
230 | offset += s->buf_size; |
231 | |
232 | /* Sync SG buffers */ |
233 | ivtv_buf_sync_for_device(s, buf); |
234 | |
235 | if (size == 0) { /* YUV */ |
236 | /* process the UV section */ |
237 | offset = UVoffset; |
238 | size = UVsize; |
239 | } |
240 | idx++; |
241 | } |
242 | s->sg_pending_size = idx; |
243 | return 0; |
244 | } |
245 | |
246 | static void dma_post(struct ivtv_stream *s) |
247 | { |
248 | struct ivtv *itv = s->itv; |
249 | struct ivtv_buffer *buf = NULL; |
250 | struct list_head *p; |
251 | u32 offset; |
252 | __le32 *u32buf; |
253 | int x = 0; |
254 | |
255 | IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n" , ivtv_use_pio(s) ? "PIO" : "DMA" , |
256 | s->name, s->dma_offset); |
257 | list_for_each(p, &s->q_dma.list) { |
258 | buf = list_entry(p, struct ivtv_buffer, list); |
259 | u32buf = (__le32 *)buf->buf; |
260 | |
261 | /* Sync Buffer */ |
262 | ivtv_buf_sync_for_cpu(s, buf); |
263 | |
264 | if (x == 0 && ivtv_use_dma(s)) { |
265 | offset = s->dma_last_offset; |
266 | if (le32_to_cpu(u32buf[offset / 4]) != DMA_MAGIC_COOKIE) |
267 | { |
268 | for (offset = 0; offset < 64; offset++) |
269 | if (le32_to_cpu(u32buf[offset]) == DMA_MAGIC_COOKIE) |
270 | break; |
271 | offset *= 4; |
272 | if (offset == 256) { |
273 | IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n" , s->name); |
274 | offset = s->dma_last_offset; |
275 | } |
276 | if (s->dma_last_offset != offset) |
277 | IVTV_DEBUG_WARN("%s: offset %d -> %d\n" , s->name, s->dma_last_offset, offset); |
278 | s->dma_last_offset = offset; |
279 | } |
280 | if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || |
281 | s->type == IVTV_DEC_STREAM_TYPE_VBI)) { |
282 | write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET); |
283 | } |
284 | else { |
285 | write_enc_sync(0, s->dma_offset); |
286 | } |
287 | if (offset) { |
288 | buf->bytesused -= offset; |
289 | memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset); |
290 | } |
291 | *u32buf = cpu_to_le32(s->dma_backup); |
292 | } |
293 | x++; |
294 | /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */ |
295 | if (s->type == IVTV_ENC_STREAM_TYPE_MPG || |
296 | s->type == IVTV_ENC_STREAM_TYPE_VBI) |
297 | buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP; |
298 | } |
299 | if (buf) |
300 | buf->bytesused += s->dma_last_offset; |
301 | if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) { |
302 | list_for_each_entry(buf, &s->q_dma.list, list) { |
303 | /* Parse and Groom VBI Data */ |
304 | s->q_dma.bytesused -= buf->bytesused; |
305 | ivtv_process_vbi_data(itv, buf, pts_stamp: 0, streamtype: s->type); |
306 | s->q_dma.bytesused += buf->bytesused; |
307 | } |
308 | if (s->fh == NULL) { |
309 | ivtv_queue_move(s, from: &s->q_dma, NULL, to: &s->q_free, needed_bytes: 0); |
310 | return; |
311 | } |
312 | } |
313 | |
314 | ivtv_queue_move(s, from: &s->q_dma, NULL, to: &s->q_full, needed_bytes: s->q_dma.bytesused); |
315 | |
316 | if (s->type == IVTV_ENC_STREAM_TYPE_PCM && |
317 | itv->pcm_announce_callback != NULL) { |
318 | /* |
319 | * Set up the work handler to pass the data to ivtv-alsa. |
320 | * |
321 | * We just use q_full and let the work handler race with users |
322 | * making ivtv-fileops.c calls on the PCM device node. |
323 | * |
324 | * Users should not be using both the ALSA and V4L2 PCM audio |
325 | * capture interfaces at the same time. If the user does this, |
326 | * fragments of data will just go out each interface as they |
327 | * race for PCM data. |
328 | */ |
329 | set_bit(IVTV_F_I_WORK_HANDLER_PCM, addr: &itv->i_flags); |
330 | set_bit(IVTV_F_I_HAVE_WORK, addr: &itv->i_flags); |
331 | } |
332 | |
333 | if (s->fh) |
334 | wake_up(&s->waitq); |
335 | } |
336 | |
337 | void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock) |
338 | { |
339 | struct ivtv *itv = s->itv; |
340 | struct yuv_playback_info *yi = &itv->yuv_info; |
341 | u8 frame = yi->draw_frame; |
342 | struct yuv_frame_info *f = &yi->new_frame_info[frame]; |
343 | struct ivtv_buffer *buf; |
344 | u32 y_size = 720 * ((f->src_h + 31) & ~31); |
345 | u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET; |
346 | int y_done = 0; |
347 | int bytes_written = 0; |
348 | int idx = 0; |
349 | |
350 | IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n" , s->name, s->q_predma.bytesused, offset); |
351 | |
352 | /* Insert buffer block for YUV if needed */ |
353 | if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) { |
354 | if (yi->blanking_dmaptr) { |
355 | s->sg_pending[idx].src = yi->blanking_dmaptr; |
356 | s->sg_pending[idx].dst = offset; |
357 | s->sg_pending[idx].size = 720 * 16; |
358 | } |
359 | offset += 720 * 16; |
360 | idx++; |
361 | } |
362 | |
363 | list_for_each_entry(buf, &s->q_predma.list, list) { |
364 | /* YUV UV Offset from Y Buffer */ |
365 | if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && |
366 | (bytes_written + buf->bytesused) >= y_size) { |
367 | s->sg_pending[idx].src = buf->dma_handle; |
368 | s->sg_pending[idx].dst = offset; |
369 | s->sg_pending[idx].size = y_size - bytes_written; |
370 | offset = uv_offset; |
371 | if (s->sg_pending[idx].size != buf->bytesused) { |
372 | idx++; |
373 | s->sg_pending[idx].src = |
374 | buf->dma_handle + s->sg_pending[idx - 1].size; |
375 | s->sg_pending[idx].dst = offset; |
376 | s->sg_pending[idx].size = |
377 | buf->bytesused - s->sg_pending[idx - 1].size; |
378 | offset += s->sg_pending[idx].size; |
379 | } |
380 | y_done = 1; |
381 | } else { |
382 | s->sg_pending[idx].src = buf->dma_handle; |
383 | s->sg_pending[idx].dst = offset; |
384 | s->sg_pending[idx].size = buf->bytesused; |
385 | offset += buf->bytesused; |
386 | } |
387 | bytes_written += buf->bytesused; |
388 | |
389 | /* Sync SG buffers */ |
390 | ivtv_buf_sync_for_device(s, buf); |
391 | idx++; |
392 | } |
393 | s->sg_pending_size = idx; |
394 | |
395 | /* Sync Hardware SG List of buffers */ |
396 | ivtv_stream_sync_for_device(s); |
397 | if (lock) { |
398 | unsigned long flags = 0; |
399 | |
400 | spin_lock_irqsave(&itv->dma_reg_lock, flags); |
401 | if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) |
402 | ivtv_dma_dec_start(s); |
403 | else |
404 | set_bit(IVTV_F_S_DMA_PENDING, addr: &s->s_flags); |
405 | spin_unlock_irqrestore(lock: &itv->dma_reg_lock, flags); |
406 | } else { |
407 | if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) |
408 | ivtv_dma_dec_start(s); |
409 | else |
410 | set_bit(IVTV_F_S_DMA_PENDING, addr: &s->s_flags); |
411 | } |
412 | } |
413 | |
414 | static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s) |
415 | { |
416 | struct ivtv *itv = s->itv; |
417 | |
418 | s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); |
419 | s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); |
420 | s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); |
421 | s->sg_processed++; |
422 | /* Sync Hardware SG List of buffers */ |
423 | ivtv_stream_sync_for_device(s); |
424 | write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR); |
425 | write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER); |
426 | itv->dma_timer.expires = jiffies + msecs_to_jiffies(m: 300); |
427 | add_timer(timer: &itv->dma_timer); |
428 | } |
429 | |
430 | static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s) |
431 | { |
432 | struct ivtv *itv = s->itv; |
433 | |
434 | s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); |
435 | s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); |
436 | s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); |
437 | s->sg_processed++; |
438 | /* Sync Hardware SG List of buffers */ |
439 | ivtv_stream_sync_for_device(s); |
440 | write_reg(s->sg_handle, IVTV_REG_DECDMAADDR); |
441 | write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); |
442 | itv->dma_timer.expires = jiffies + msecs_to_jiffies(m: 300); |
443 | add_timer(timer: &itv->dma_timer); |
444 | } |
445 | |
446 | /* start the encoder DMA */ |
447 | static void ivtv_dma_enc_start(struct ivtv_stream *s) |
448 | { |
449 | struct ivtv *itv = s->itv; |
450 | struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; |
451 | int i; |
452 | |
453 | IVTV_DEBUG_HI_DMA("start %s for %s\n" , ivtv_use_dma(s) ? "DMA" : "PIO" , s->name); |
454 | |
455 | if (s->q_predma.bytesused) |
456 | ivtv_queue_move(s, from: &s->q_predma, NULL, to: &s->q_dma, needed_bytes: s->q_predma.bytesused); |
457 | |
458 | if (ivtv_use_dma(s)) |
459 | s->sg_pending[s->sg_pending_size - 1].size += 256; |
460 | |
461 | /* If this is an MPEG stream, and VBI data is also pending, then append the |
462 | VBI DMA to the MPEG DMA and transfer both sets of data at once. |
463 | |
464 | VBI DMA is a second class citizen compared to MPEG and mixing them together |
465 | will confuse the firmware (the end of a VBI DMA is seen as the end of a |
466 | MPEG DMA, thus effectively dropping an MPEG frame). So instead we make |
467 | sure we only use the MPEG DMA to transfer the VBI DMA if both are in |
468 | use. This way no conflicts occur. */ |
469 | clear_bit(IVTV_F_S_DMA_HAS_VBI, addr: &s->s_flags); |
470 | if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size && |
471 | s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) { |
472 | ivtv_queue_move(s: s_vbi, from: &s_vbi->q_predma, NULL, to: &s_vbi->q_dma, needed_bytes: s_vbi->q_predma.bytesused); |
473 | if (ivtv_use_dma(s: s_vbi)) |
474 | s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256; |
475 | for (i = 0; i < s_vbi->sg_pending_size; i++) { |
476 | s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i]; |
477 | } |
478 | s_vbi->dma_offset = s_vbi->pending_offset; |
479 | s_vbi->sg_pending_size = 0; |
480 | s_vbi->dma_xfer_cnt++; |
481 | set_bit(IVTV_F_S_DMA_HAS_VBI, addr: &s->s_flags); |
482 | IVTV_DEBUG_HI_DMA("include DMA for %s\n" , s_vbi->name); |
483 | } |
484 | |
485 | s->dma_xfer_cnt++; |
486 | memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); |
487 | s->sg_processing_size = s->sg_pending_size; |
488 | s->sg_pending_size = 0; |
489 | s->sg_processed = 0; |
490 | s->dma_offset = s->pending_offset; |
491 | s->dma_backup = s->pending_backup; |
492 | s->dma_pts = s->pending_pts; |
493 | |
494 | if (ivtv_use_pio(s)) { |
495 | set_bit(IVTV_F_I_WORK_HANDLER_PIO, addr: &itv->i_flags); |
496 | set_bit(IVTV_F_I_HAVE_WORK, addr: &itv->i_flags); |
497 | set_bit(IVTV_F_I_PIO, addr: &itv->i_flags); |
498 | itv->cur_pio_stream = s->type; |
499 | } |
500 | else { |
501 | itv->dma_retries = 0; |
502 | ivtv_dma_enc_start_xfer(s); |
503 | set_bit(IVTV_F_I_DMA, addr: &itv->i_flags); |
504 | itv->cur_dma_stream = s->type; |
505 | } |
506 | } |
507 | |
508 | static void ivtv_dma_dec_start(struct ivtv_stream *s) |
509 | { |
510 | struct ivtv *itv = s->itv; |
511 | |
512 | if (s->q_predma.bytesused) |
513 | ivtv_queue_move(s, from: &s->q_predma, NULL, to: &s->q_dma, needed_bytes: s->q_predma.bytesused); |
514 | s->dma_xfer_cnt++; |
515 | memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); |
516 | s->sg_processing_size = s->sg_pending_size; |
517 | s->sg_pending_size = 0; |
518 | s->sg_processed = 0; |
519 | |
520 | IVTV_DEBUG_HI_DMA("start DMA for %s\n" , s->name); |
521 | itv->dma_retries = 0; |
522 | ivtv_dma_dec_start_xfer(s); |
523 | set_bit(IVTV_F_I_DMA, addr: &itv->i_flags); |
524 | itv->cur_dma_stream = s->type; |
525 | } |
526 | |
527 | static void ivtv_irq_dma_read(struct ivtv *itv) |
528 | { |
529 | struct ivtv_stream *s = NULL; |
530 | struct ivtv_buffer *buf; |
531 | int hw_stream_type = 0; |
532 | |
533 | IVTV_DEBUG_HI_IRQ("DEC DMA READ\n" ); |
534 | |
535 | del_timer(timer: &itv->dma_timer); |
536 | |
537 | if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) |
538 | return; |
539 | |
540 | if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { |
541 | s = &itv->streams[itv->cur_dma_stream]; |
542 | ivtv_stream_sync_for_cpu(s); |
543 | |
544 | if (read_reg(IVTV_REG_DMASTATUS) & 0x14) { |
545 | IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n" , |
546 | read_reg(IVTV_REG_DMASTATUS), |
547 | s->sg_processed, s->sg_processing_size, itv->dma_retries); |
548 | write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); |
549 | if (itv->dma_retries == 3) { |
550 | /* Too many retries, give up on this frame */ |
551 | itv->dma_retries = 0; |
552 | s->sg_processed = s->sg_processing_size; |
553 | } |
554 | else { |
555 | /* Retry, starting with the first xfer segment. |
556 | Just retrying the current segment is not sufficient. */ |
557 | s->sg_processed = 0; |
558 | itv->dma_retries++; |
559 | } |
560 | } |
561 | if (s->sg_processed < s->sg_processing_size) { |
562 | /* DMA next buffer */ |
563 | ivtv_dma_dec_start_xfer(s); |
564 | return; |
565 | } |
566 | if (s->type == IVTV_DEC_STREAM_TYPE_YUV) |
567 | hw_stream_type = 2; |
568 | IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n" , s->name, s->q_dma.bytesused); |
569 | |
570 | /* For some reason must kick the firmware, like PIO mode, |
571 | I think this tells the firmware we are done and the size |
572 | of the xfer so it can calculate what we need next. |
573 | I think we can do this part ourselves but would have to |
574 | fully calculate xfer info ourselves and not use interrupts |
575 | */ |
576 | ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, args: 3, 0, s->q_dma.bytesused, |
577 | hw_stream_type); |
578 | |
579 | /* Free last DMA call */ |
580 | while ((buf = ivtv_dequeue(s, q: &s->q_dma)) != NULL) { |
581 | ivtv_buf_sync_for_cpu(s, buf); |
582 | ivtv_enqueue(s, buf, q: &s->q_free); |
583 | } |
584 | wake_up(&s->waitq); |
585 | } |
586 | clear_bit(IVTV_F_I_UDMA, addr: &itv->i_flags); |
587 | clear_bit(IVTV_F_I_DMA, addr: &itv->i_flags); |
588 | itv->cur_dma_stream = -1; |
589 | wake_up(&itv->dma_waitq); |
590 | } |
591 | |
592 | static void ivtv_irq_enc_dma_complete(struct ivtv *itv) |
593 | { |
594 | u32 data[CX2341X_MBOX_MAX_DATA]; |
595 | struct ivtv_stream *s; |
596 | |
597 | ivtv_api_get_data(mbdata: &itv->enc_mbox, IVTV_MBOX_DMA_END, argc: 2, data); |
598 | IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n" , data[0], data[1], itv->cur_dma_stream); |
599 | |
600 | del_timer(timer: &itv->dma_timer); |
601 | |
602 | if (itv->cur_dma_stream < 0) |
603 | return; |
604 | |
605 | s = &itv->streams[itv->cur_dma_stream]; |
606 | ivtv_stream_sync_for_cpu(s); |
607 | |
608 | if (data[0] & 0x18) { |
609 | IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n" , data[0], |
610 | s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries); |
611 | write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); |
612 | if (itv->dma_retries == 3) { |
613 | /* Too many retries, give up on this frame */ |
614 | itv->dma_retries = 0; |
615 | s->sg_processed = s->sg_processing_size; |
616 | } |
617 | else { |
618 | /* Retry, starting with the first xfer segment. |
619 | Just retrying the current segment is not sufficient. */ |
620 | s->sg_processed = 0; |
621 | itv->dma_retries++; |
622 | } |
623 | } |
624 | if (s->sg_processed < s->sg_processing_size) { |
625 | /* DMA next buffer */ |
626 | ivtv_dma_enc_start_xfer(s); |
627 | return; |
628 | } |
629 | clear_bit(IVTV_F_I_DMA, addr: &itv->i_flags); |
630 | itv->cur_dma_stream = -1; |
631 | dma_post(s); |
632 | if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, addr: &s->s_flags)) { |
633 | s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; |
634 | dma_post(s); |
635 | } |
636 | s->sg_processing_size = 0; |
637 | s->sg_processed = 0; |
638 | wake_up(&itv->dma_waitq); |
639 | } |
640 | |
641 | static void ivtv_irq_enc_pio_complete(struct ivtv *itv) |
642 | { |
643 | struct ivtv_stream *s; |
644 | |
645 | if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) { |
646 | itv->cur_pio_stream = -1; |
647 | return; |
648 | } |
649 | s = &itv->streams[itv->cur_pio_stream]; |
650 | IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n" , s->name); |
651 | clear_bit(IVTV_F_I_PIO, addr: &itv->i_flags); |
652 | itv->cur_pio_stream = -1; |
653 | dma_post(s); |
654 | if (s->type == IVTV_ENC_STREAM_TYPE_MPG) |
655 | ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, args: 3, 0, 0, 0); |
656 | else if (s->type == IVTV_ENC_STREAM_TYPE_YUV) |
657 | ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, args: 3, 0, 0, 1); |
658 | else if (s->type == IVTV_ENC_STREAM_TYPE_PCM) |
659 | ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, args: 3, 0, 0, 2); |
660 | clear_bit(IVTV_F_I_PIO, addr: &itv->i_flags); |
661 | if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, addr: &s->s_flags)) { |
662 | s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; |
663 | dma_post(s); |
664 | } |
665 | wake_up(&itv->dma_waitq); |
666 | } |
667 | |
668 | static void ivtv_irq_dma_err(struct ivtv *itv) |
669 | { |
670 | u32 data[CX2341X_MBOX_MAX_DATA]; |
671 | u32 status; |
672 | |
673 | del_timer(timer: &itv->dma_timer); |
674 | |
675 | ivtv_api_get_data(mbdata: &itv->enc_mbox, IVTV_MBOX_DMA_END, argc: 2, data); |
676 | status = read_reg(IVTV_REG_DMASTATUS); |
677 | IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n" , data[0], data[1], |
678 | status, itv->cur_dma_stream); |
679 | /* |
680 | * We do *not* write back to the IVTV_REG_DMASTATUS register to |
681 | * clear the error status, if either the encoder write (0x02) or |
682 | * decoder read (0x01) bus master DMA operation do not indicate |
683 | * completed. We can race with the DMA engine, which may have |
684 | * transitioned to completed status *after* we read the register. |
685 | * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the |
686 | * DMA engine has completed, will cause the DMA engine to stop working. |
687 | */ |
688 | status &= 0x3; |
689 | if (status == 0x3) |
690 | write_reg(status, IVTV_REG_DMASTATUS); |
691 | |
692 | if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && |
693 | itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { |
694 | struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; |
695 | |
696 | if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { |
697 | /* retry */ |
698 | /* |
699 | * FIXME - handle cases of DMA error similar to |
700 | * encoder below, except conditioned on status & 0x1 |
701 | */ |
702 | ivtv_dma_dec_start(s); |
703 | return; |
704 | } else { |
705 | if ((status & 0x2) == 0) { |
706 | /* |
707 | * CX2341x Bus Master DMA write is ongoing. |
708 | * Reset the timer and let it complete. |
709 | */ |
710 | itv->dma_timer.expires = |
711 | jiffies + msecs_to_jiffies(m: 600); |
712 | add_timer(timer: &itv->dma_timer); |
713 | return; |
714 | } |
715 | |
716 | if (itv->dma_retries < 3) { |
717 | /* |
718 | * CX2341x Bus Master DMA write has ended. |
719 | * Retry the write, starting with the first |
720 | * xfer segment. Just retrying the current |
721 | * segment is not sufficient. |
722 | */ |
723 | s->sg_processed = 0; |
724 | itv->dma_retries++; |
725 | ivtv_dma_enc_start_xfer(s); |
726 | return; |
727 | } |
728 | /* Too many retries, give up on this one */ |
729 | } |
730 | |
731 | } |
732 | if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { |
733 | ivtv_udma_start(itv); |
734 | return; |
735 | } |
736 | clear_bit(IVTV_F_I_UDMA, addr: &itv->i_flags); |
737 | clear_bit(IVTV_F_I_DMA, addr: &itv->i_flags); |
738 | itv->cur_dma_stream = -1; |
739 | wake_up(&itv->dma_waitq); |
740 | } |
741 | |
742 | static void ivtv_irq_enc_start_cap(struct ivtv *itv) |
743 | { |
744 | u32 data[CX2341X_MBOX_MAX_DATA]; |
745 | struct ivtv_stream *s; |
746 | |
747 | /* Get DMA destination and size arguments from card */ |
748 | ivtv_api_get_data(mbdata: &itv->enc_mbox, IVTV_MBOX_DMA, argc: 7, data); |
749 | IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n" , data[0], data[1], data[2]); |
750 | |
751 | if (data[0] > 2 || data[1] == 0 || data[2] == 0) { |
752 | IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n" , |
753 | data[0], data[1], data[2]); |
754 | return; |
755 | } |
756 | s = &itv->streams[ivtv_stream_map[data[0]]]; |
757 | if (!stream_enc_dma_append(s, data)) { |
758 | set_bit(nr: ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, addr: &s->s_flags); |
759 | } |
760 | } |
761 | |
762 | static void ivtv_irq_enc_vbi_cap(struct ivtv *itv) |
763 | { |
764 | u32 data[CX2341X_MBOX_MAX_DATA]; |
765 | struct ivtv_stream *s; |
766 | |
767 | IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n" ); |
768 | s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; |
769 | |
770 | if (!stream_enc_dma_append(s, data)) |
771 | set_bit(nr: ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, addr: &s->s_flags); |
772 | } |
773 | |
774 | static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv) |
775 | { |
776 | u32 data[CX2341X_MBOX_MAX_DATA]; |
777 | struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI]; |
778 | |
779 | IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n" ); |
780 | if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) && |
781 | !stream_enc_dma_append(s, data)) { |
782 | set_bit(IVTV_F_S_PIO_PENDING, addr: &s->s_flags); |
783 | } |
784 | } |
785 | |
786 | static void ivtv_irq_dec_data_req(struct ivtv *itv) |
787 | { |
788 | u32 data[CX2341X_MBOX_MAX_DATA]; |
789 | struct ivtv_stream *s; |
790 | |
791 | /* YUV or MPG */ |
792 | |
793 | if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { |
794 | ivtv_api_get_data(mbdata: &itv->dec_mbox, IVTV_MBOX_DMA, argc: 2, data); |
795 | itv->dma_data_req_size = |
796 | 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31); |
797 | itv->dma_data_req_offset = data[1]; |
798 | if (atomic_read(v: &itv->yuv_info.next_dma_frame) >= 0) |
799 | ivtv_yuv_frame_complete(itv); |
800 | s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; |
801 | } |
802 | else { |
803 | ivtv_api_get_data(mbdata: &itv->dec_mbox, IVTV_MBOX_DMA, argc: 3, data); |
804 | itv->dma_data_req_size = min_t(u32, data[2], 0x10000); |
805 | itv->dma_data_req_offset = data[1]; |
806 | s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; |
807 | } |
808 | IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n" , s->name, s->q_full.bytesused, |
809 | itv->dma_data_req_offset, itv->dma_data_req_size); |
810 | if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) { |
811 | set_bit(IVTV_F_S_NEEDS_DATA, addr: &s->s_flags); |
812 | } |
813 | else { |
814 | if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) |
815 | ivtv_yuv_setup_stream_frame(itv); |
816 | clear_bit(IVTV_F_S_NEEDS_DATA, addr: &s->s_flags); |
817 | ivtv_queue_move(s, from: &s->q_full, NULL, to: &s->q_predma, needed_bytes: itv->dma_data_req_size); |
818 | ivtv_dma_stream_dec_prepare(s, offset: itv->dma_data_req_offset + IVTV_DECODER_OFFSET, lock: 0); |
819 | } |
820 | } |
821 | |
822 | static void ivtv_irq_vsync(struct ivtv *itv) |
823 | { |
824 | /* The vsync interrupt is unusual in that it won't clear until |
825 | * the end of the first line for the current field, at which |
826 | * point it clears itself. This can result in repeated vsync |
827 | * interrupts, or a missed vsync. Read some of the registers |
828 | * to determine the line being displayed and ensure we handle |
829 | * one vsync per frame. |
830 | */ |
831 | unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1; |
832 | struct yuv_playback_info *yi = &itv->yuv_info; |
833 | int last_dma_frame = atomic_read(v: &yi->next_dma_frame); |
834 | struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame]; |
835 | |
836 | if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n" ); |
837 | |
838 | if (((frame ^ f->sync_field) == 0 && |
839 | ((itv->last_vsync_field & 1) ^ f->sync_field)) || |
840 | (frame != (itv->last_vsync_field & 1) && !f->interlaced)) { |
841 | int next_dma_frame = last_dma_frame; |
842 | |
843 | if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) { |
844 | if (next_dma_frame >= 0 && next_dma_frame != atomic_read(v: &yi->next_fill_frame)) { |
845 | write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c); |
846 | write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830); |
847 | write_reg(yuv_offset[next_dma_frame] >> 4, 0x834); |
848 | write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838); |
849 | next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS; |
850 | atomic_set(v: &yi->next_dma_frame, i: next_dma_frame); |
851 | yi->fields_lapsed = -1; |
852 | yi->running = 1; |
853 | } |
854 | } |
855 | } |
856 | if (frame != (itv->last_vsync_field & 1)) { |
857 | static const struct v4l2_event evtop = { |
858 | .type = V4L2_EVENT_VSYNC, |
859 | .u.vsync.field = V4L2_FIELD_TOP, |
860 | }; |
861 | static const struct v4l2_event evbottom = { |
862 | .type = V4L2_EVENT_VSYNC, |
863 | .u.vsync.field = V4L2_FIELD_BOTTOM, |
864 | }; |
865 | struct ivtv_stream *s = ivtv_get_output_stream(itv); |
866 | |
867 | itv->last_vsync_field += 1; |
868 | if (frame == 0) { |
869 | clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, addr: &itv->i_flags); |
870 | clear_bit(IVTV_F_I_EV_VSYNC_FIELD, addr: &itv->i_flags); |
871 | } |
872 | else { |
873 | set_bit(IVTV_F_I_EV_VSYNC_FIELD, addr: &itv->i_flags); |
874 | } |
875 | if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) { |
876 | set_bit(IVTV_F_I_EV_VSYNC, addr: &itv->i_flags); |
877 | wake_up(&itv->event_waitq); |
878 | if (s) |
879 | wake_up(&s->waitq); |
880 | } |
881 | if (s && s->vdev.v4l2_dev) |
882 | v4l2_event_queue(vdev: &s->vdev, ev: frame ? &evtop : &evbottom); |
883 | wake_up(&itv->vsync_waitq); |
884 | |
885 | /* Send VBI to saa7127 */ |
886 | if (frame && (itv->output_mode == OUT_PASSTHROUGH || |
887 | test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) || |
888 | test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) || |
889 | test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) { |
890 | set_bit(IVTV_F_I_WORK_HANDLER_VBI, addr: &itv->i_flags); |
891 | set_bit(IVTV_F_I_HAVE_WORK, addr: &itv->i_flags); |
892 | } |
893 | |
894 | /* Check if we need to update the yuv registers */ |
895 | if (yi->running && (yi->yuv_forced_update || f->update)) { |
896 | if (!f->update) { |
897 | last_dma_frame = |
898 | (u8)(atomic_read(v: &yi->next_dma_frame) - |
899 | 1) % IVTV_YUV_BUFFERS; |
900 | f = &yi->new_frame_info[last_dma_frame]; |
901 | } |
902 | |
903 | if (f->src_w) { |
904 | yi->update_frame = last_dma_frame; |
905 | f->update = 0; |
906 | yi->yuv_forced_update = 0; |
907 | set_bit(IVTV_F_I_WORK_HANDLER_YUV, addr: &itv->i_flags); |
908 | set_bit(IVTV_F_I_HAVE_WORK, addr: &itv->i_flags); |
909 | } |
910 | } |
911 | |
912 | yi->fields_lapsed++; |
913 | } |
914 | } |
915 | |
916 | #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT) |
917 | |
918 | irqreturn_t ivtv_irq_handler(int irq, void *dev_id) |
919 | { |
920 | struct ivtv *itv = (struct ivtv *)dev_id; |
921 | u32 combo; |
922 | u32 stat; |
923 | int i; |
924 | u8 vsync_force = 0; |
925 | |
926 | spin_lock(lock: &itv->dma_reg_lock); |
927 | /* get contents of irq status register */ |
928 | stat = read_reg(IVTV_REG_IRQSTATUS); |
929 | |
930 | combo = ~itv->irqmask & stat; |
931 | |
932 | /* Clear out IRQ */ |
933 | if (combo) write_reg(combo, IVTV_REG_IRQSTATUS); |
934 | |
935 | if (0 == combo) { |
936 | /* The vsync interrupt is unusual and clears itself. If we |
937 | * took too long, we may have missed it. Do some checks |
938 | */ |
939 | if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { |
940 | /* vsync is enabled, see if we're in a new field */ |
941 | if ((itv->last_vsync_field & 1) != |
942 | (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) { |
943 | /* New field, looks like we missed it */ |
944 | IVTV_DEBUG_YUV("VSync interrupt missed %d\n" , |
945 | read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16); |
946 | vsync_force = 1; |
947 | } |
948 | } |
949 | |
950 | if (!vsync_force) { |
951 | /* No Vsync expected, wasn't for us */ |
952 | spin_unlock(lock: &itv->dma_reg_lock); |
953 | return IRQ_NONE; |
954 | } |
955 | } |
956 | |
957 | /* Exclude interrupts noted below from the output, otherwise the log is flooded with |
958 | these messages */ |
959 | if (combo & ~0xff6d0400) |
960 | IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n" , combo); |
961 | |
962 | if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) { |
963 | IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n" ); |
964 | } |
965 | |
966 | if (combo & IVTV_IRQ_DMA_READ) { |
967 | ivtv_irq_dma_read(itv); |
968 | } |
969 | |
970 | if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) { |
971 | ivtv_irq_enc_dma_complete(itv); |
972 | } |
973 | |
974 | if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) { |
975 | ivtv_irq_enc_pio_complete(itv); |
976 | } |
977 | |
978 | if (combo & IVTV_IRQ_DMA_ERR) { |
979 | ivtv_irq_dma_err(itv); |
980 | } |
981 | |
982 | if (combo & IVTV_IRQ_ENC_START_CAP) { |
983 | ivtv_irq_enc_start_cap(itv); |
984 | } |
985 | |
986 | if (combo & IVTV_IRQ_ENC_VBI_CAP) { |
987 | ivtv_irq_enc_vbi_cap(itv); |
988 | } |
989 | |
990 | if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) { |
991 | ivtv_irq_dec_vbi_reinsert(itv); |
992 | } |
993 | |
994 | if (combo & IVTV_IRQ_ENC_EOS) { |
995 | IVTV_DEBUG_IRQ("ENC EOS\n" ); |
996 | set_bit(IVTV_F_I_EOS, addr: &itv->i_flags); |
997 | wake_up(&itv->eos_waitq); |
998 | } |
999 | |
1000 | if (combo & IVTV_IRQ_DEC_DATA_REQ) { |
1001 | ivtv_irq_dec_data_req(itv); |
1002 | } |
1003 | |
1004 | /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */ |
1005 | if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { |
1006 | ivtv_irq_vsync(itv); |
1007 | } |
1008 | |
1009 | if (combo & IVTV_IRQ_ENC_VIM_RST) { |
1010 | IVTV_DEBUG_IRQ("VIM RST\n" ); |
1011 | /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */ |
1012 | } |
1013 | |
1014 | if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) { |
1015 | IVTV_DEBUG_INFO("Stereo mode changed\n" ); |
1016 | } |
1017 | |
1018 | if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) { |
1019 | itv->irq_rr_idx++; |
1020 | for (i = 0; i < IVTV_MAX_STREAMS; i++) { |
1021 | int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; |
1022 | struct ivtv_stream *s = &itv->streams[idx]; |
1023 | |
1024 | if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, addr: &s->s_flags)) |
1025 | continue; |
1026 | if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) |
1027 | ivtv_dma_dec_start(s); |
1028 | else |
1029 | ivtv_dma_enc_start(s); |
1030 | break; |
1031 | } |
1032 | |
1033 | if (i == IVTV_MAX_STREAMS && |
1034 | test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) |
1035 | ivtv_udma_start(itv); |
1036 | } |
1037 | |
1038 | if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) { |
1039 | itv->irq_rr_idx++; |
1040 | for (i = 0; i < IVTV_MAX_STREAMS; i++) { |
1041 | int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; |
1042 | struct ivtv_stream *s = &itv->streams[idx]; |
1043 | |
1044 | if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, addr: &s->s_flags)) |
1045 | continue; |
1046 | if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG) |
1047 | ivtv_dma_enc_start(s); |
1048 | break; |
1049 | } |
1050 | } |
1051 | |
1052 | if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, addr: &itv->i_flags)) { |
1053 | kthread_queue_work(worker: &itv->irq_worker, work: &itv->irq_work); |
1054 | } |
1055 | |
1056 | spin_unlock(lock: &itv->dma_reg_lock); |
1057 | |
1058 | /* If we've just handled a 'forced' vsync, it's safest to say it |
1059 | * wasn't ours. Another device may have triggered it at just |
1060 | * the right time. |
1061 | */ |
1062 | return vsync_force ? IRQ_NONE : IRQ_HANDLED; |
1063 | } |
1064 | |
1065 | void ivtv_unfinished_dma(struct timer_list *t) |
1066 | { |
1067 | struct ivtv *itv = from_timer(itv, t, dma_timer); |
1068 | |
1069 | if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) |
1070 | return; |
1071 | IVTV_ERR("DMA TIMEOUT %08x %d\n" , read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); |
1072 | |
1073 | write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); |
1074 | clear_bit(IVTV_F_I_UDMA, addr: &itv->i_flags); |
1075 | clear_bit(IVTV_F_I_DMA, addr: &itv->i_flags); |
1076 | itv->cur_dma_stream = -1; |
1077 | wake_up(&itv->dma_waitq); |
1078 | } |
1079 | |