1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | User DMA |
4 | |
5 | Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> |
6 | Copyright (C) 2004 Chris Kennedy <c@groovy.org> |
7 | Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> |
8 | |
9 | */ |
10 | |
11 | #include "ivtv-driver.h" |
12 | #include "ivtv-udma.h" |
13 | |
14 | void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size) |
15 | { |
16 | dma_page->uaddr = first & PAGE_MASK; |
17 | dma_page->offset = first & ~PAGE_MASK; |
18 | dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK); |
19 | dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT; |
20 | dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT; |
21 | dma_page->page_count = dma_page->last - dma_page->first + 1; |
22 | if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; |
23 | } |
24 | |
25 | int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset) |
26 | { |
27 | int i, offset; |
28 | unsigned long flags; |
29 | |
30 | if (map_offset < 0) |
31 | return map_offset; |
32 | |
33 | offset = dma_page->offset; |
34 | |
35 | /* Fill SG Array with new values */ |
36 | for (i = 0; i < dma_page->page_count; i++) { |
37 | unsigned int len = (i == dma_page->page_count - 1) ? |
38 | dma_page->tail : PAGE_SIZE - offset; |
39 | |
40 | if (PageHighMem(page: dma->map[map_offset])) { |
41 | void *src; |
42 | |
43 | if (dma->bouncemap[map_offset] == NULL) |
44 | dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL); |
45 | if (dma->bouncemap[map_offset] == NULL) |
46 | return -1; |
47 | local_irq_save(flags); |
48 | src = kmap_atomic(page: dma->map[map_offset]) + offset; |
49 | memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); |
50 | kunmap_atomic(src); |
51 | local_irq_restore(flags); |
52 | sg_set_page(sg: &dma->SGlist[map_offset], page: dma->bouncemap[map_offset], len, offset); |
53 | } |
54 | else { |
55 | sg_set_page(sg: &dma->SGlist[map_offset], page: dma->map[map_offset], len, offset); |
56 | } |
57 | offset = 0; |
58 | map_offset++; |
59 | } |
60 | return map_offset; |
61 | } |
62 | |
63 | void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) { |
64 | int i; |
65 | struct scatterlist *sg; |
66 | |
67 | for_each_sg(dma->SGlist, sg, dma->SG_length, i) { |
68 | dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg)); |
69 | dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg)); |
70 | dma->SGarray[i].dst = cpu_to_le32(buffer_offset); |
71 | buffer_offset += sg_dma_len(sg); |
72 | |
73 | split -= sg_dma_len(sg); |
74 | if (split == 0) |
75 | buffer_offset = buffer_offset_2; |
76 | } |
77 | } |
78 | |
79 | /* User DMA Buffers */ |
80 | void ivtv_udma_alloc(struct ivtv *itv) |
81 | { |
82 | if (itv->udma.SG_handle == 0) { |
83 | /* Map DMA Page Array Buffer */ |
84 | itv->udma.SG_handle = dma_map_single(&itv->pdev->dev, |
85 | itv->udma.SGarray, |
86 | sizeof(itv->udma.SGarray), |
87 | DMA_TO_DEVICE); |
88 | ivtv_udma_sync_for_cpu(itv); |
89 | } |
90 | } |
91 | |
92 | int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, |
93 | void __user *userbuf, int size_in_bytes) |
94 | { |
95 | struct ivtv_dma_page_info user_dma; |
96 | struct ivtv_user_dma *dma = &itv->udma; |
97 | int err; |
98 | |
99 | IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n" , (unsigned int)ivtv_dest_addr); |
100 | |
101 | /* Still in USE */ |
102 | if (dma->SG_length || dma->page_count) { |
103 | IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n" , |
104 | dma->SG_length, dma->page_count); |
105 | return -EBUSY; |
106 | } |
107 | |
108 | ivtv_udma_get_page_info(dma_page: &user_dma, first: (unsigned long)userbuf, size: size_in_bytes); |
109 | |
110 | if (user_dma.page_count <= 0) { |
111 | IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n" , |
112 | user_dma.page_count, size_in_bytes, user_dma.offset); |
113 | return -EINVAL; |
114 | } |
115 | |
116 | /* Pin user pages for DMA Xfer */ |
117 | err = pin_user_pages_unlocked(start: user_dma.uaddr, nr_pages: user_dma.page_count, |
118 | pages: dma->map, gup_flags: 0); |
119 | |
120 | if (user_dma.page_count != err) { |
121 | IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n" , |
122 | err, user_dma.page_count); |
123 | if (err >= 0) { |
124 | unpin_user_pages(pages: dma->map, npages: err); |
125 | return -EINVAL; |
126 | } |
127 | return err; |
128 | } |
129 | |
130 | dma->page_count = user_dma.page_count; |
131 | |
132 | /* Fill SG List with new values */ |
133 | if (ivtv_udma_fill_sg_list(dma, dma_page: &user_dma, map_offset: 0) < 0) { |
134 | unpin_user_pages(pages: dma->map, npages: dma->page_count); |
135 | dma->page_count = 0; |
136 | return -ENOMEM; |
137 | } |
138 | |
139 | /* Map SG List */ |
140 | dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist, |
141 | dma->page_count, DMA_TO_DEVICE); |
142 | |
143 | /* Fill SG Array with new values */ |
144 | ivtv_udma_fill_sg_array (dma, buffer_offset: ivtv_dest_addr, buffer_offset_2: 0, split: -1); |
145 | |
146 | /* Tag SG Array with Interrupt Bit */ |
147 | dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000); |
148 | |
149 | ivtv_udma_sync_for_device(itv); |
150 | return dma->page_count; |
151 | } |
152 | |
153 | void ivtv_udma_unmap(struct ivtv *itv) |
154 | { |
155 | struct ivtv_user_dma *dma = &itv->udma; |
156 | |
157 | IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n" ); |
158 | |
159 | /* Nothing to free */ |
160 | if (dma->page_count == 0) |
161 | return; |
162 | |
163 | /* Unmap Scatterlist */ |
164 | if (dma->SG_length) { |
165 | dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count, |
166 | DMA_TO_DEVICE); |
167 | dma->SG_length = 0; |
168 | } |
169 | /* sync DMA */ |
170 | ivtv_udma_sync_for_cpu(itv); |
171 | |
172 | unpin_user_pages(pages: dma->map, npages: dma->page_count); |
173 | dma->page_count = 0; |
174 | } |
175 | |
176 | void ivtv_udma_free(struct ivtv *itv) |
177 | { |
178 | int i; |
179 | |
180 | /* Unmap SG Array */ |
181 | if (itv->udma.SG_handle) { |
182 | dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle, |
183 | sizeof(itv->udma.SGarray), DMA_TO_DEVICE); |
184 | } |
185 | |
186 | /* Unmap Scatterlist */ |
187 | if (itv->udma.SG_length) { |
188 | dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist, |
189 | itv->udma.page_count, DMA_TO_DEVICE); |
190 | } |
191 | |
192 | for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) { |
193 | if (itv->udma.bouncemap[i]) |
194 | __free_page(itv->udma.bouncemap[i]); |
195 | } |
196 | } |
197 | |
198 | void ivtv_udma_start(struct ivtv *itv) |
199 | { |
200 | IVTV_DEBUG_DMA("start UDMA\n" ); |
201 | write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR); |
202 | write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); |
203 | set_bit(IVTV_F_I_DMA, addr: &itv->i_flags); |
204 | set_bit(IVTV_F_I_UDMA, addr: &itv->i_flags); |
205 | clear_bit(IVTV_F_I_UDMA_PENDING, addr: &itv->i_flags); |
206 | } |
207 | |
208 | void ivtv_udma_prepare(struct ivtv *itv) |
209 | { |
210 | unsigned long flags; |
211 | |
212 | spin_lock_irqsave(&itv->dma_reg_lock, flags); |
213 | if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) |
214 | ivtv_udma_start(itv); |
215 | else |
216 | set_bit(IVTV_F_I_UDMA_PENDING, addr: &itv->i_flags); |
217 | spin_unlock_irqrestore(lock: &itv->dma_reg_lock, flags); |
218 | } |
219 | |