1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * DMABUF CMA heap exporter |
4 | * |
5 | * Copyright (C) 2012, 2019, 2020 Linaro Ltd. |
6 | * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. |
7 | * |
8 | * Also utilizing parts of Andrew Davis' SRAM heap: |
9 | * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ |
10 | * Andrew F. Davis <afd@ti.com> |
11 | */ |
12 | #include <linux/cma.h> |
13 | #include <linux/dma-buf.h> |
14 | #include <linux/dma-heap.h> |
15 | #include <linux/dma-map-ops.h> |
16 | #include <linux/err.h> |
17 | #include <linux/highmem.h> |
18 | #include <linux/io.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/module.h> |
21 | #include <linux/scatterlist.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/vmalloc.h> |
24 | |
25 | |
26 | struct cma_heap { |
27 | struct dma_heap *heap; |
28 | struct cma *cma; |
29 | }; |
30 | |
31 | struct cma_heap_buffer { |
32 | struct cma_heap *heap; |
33 | struct list_head attachments; |
34 | struct mutex lock; |
35 | unsigned long len; |
36 | struct page *cma_pages; |
37 | struct page **pages; |
38 | pgoff_t pagecount; |
39 | int vmap_cnt; |
40 | void *vaddr; |
41 | }; |
42 | |
43 | struct dma_heap_attachment { |
44 | struct device *dev; |
45 | struct sg_table table; |
46 | struct list_head list; |
47 | bool mapped; |
48 | }; |
49 | |
50 | static int cma_heap_attach(struct dma_buf *dmabuf, |
51 | struct dma_buf_attachment *attachment) |
52 | { |
53 | struct cma_heap_buffer *buffer = dmabuf->priv; |
54 | struct dma_heap_attachment *a; |
55 | int ret; |
56 | |
57 | a = kzalloc(size: sizeof(*a), GFP_KERNEL); |
58 | if (!a) |
59 | return -ENOMEM; |
60 | |
61 | ret = sg_alloc_table_from_pages(sgt: &a->table, pages: buffer->pages, |
62 | n_pages: buffer->pagecount, offset: 0, |
63 | size: buffer->pagecount << PAGE_SHIFT, |
64 | GFP_KERNEL); |
65 | if (ret) { |
66 | kfree(objp: a); |
67 | return ret; |
68 | } |
69 | |
70 | a->dev = attachment->dev; |
71 | INIT_LIST_HEAD(list: &a->list); |
72 | a->mapped = false; |
73 | |
74 | attachment->priv = a; |
75 | |
76 | mutex_lock(&buffer->lock); |
77 | list_add(new: &a->list, head: &buffer->attachments); |
78 | mutex_unlock(lock: &buffer->lock); |
79 | |
80 | return 0; |
81 | } |
82 | |
83 | static void cma_heap_detach(struct dma_buf *dmabuf, |
84 | struct dma_buf_attachment *attachment) |
85 | { |
86 | struct cma_heap_buffer *buffer = dmabuf->priv; |
87 | struct dma_heap_attachment *a = attachment->priv; |
88 | |
89 | mutex_lock(&buffer->lock); |
90 | list_del(entry: &a->list); |
91 | mutex_unlock(lock: &buffer->lock); |
92 | |
93 | sg_free_table(&a->table); |
94 | kfree(objp: a); |
95 | } |
96 | |
97 | static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, |
98 | enum dma_data_direction direction) |
99 | { |
100 | struct dma_heap_attachment *a = attachment->priv; |
101 | struct sg_table *table = &a->table; |
102 | int ret; |
103 | |
104 | ret = dma_map_sgtable(dev: attachment->dev, sgt: table, dir: direction, attrs: 0); |
105 | if (ret) |
106 | return ERR_PTR(error: -ENOMEM); |
107 | a->mapped = true; |
108 | return table; |
109 | } |
110 | |
111 | static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, |
112 | struct sg_table *table, |
113 | enum dma_data_direction direction) |
114 | { |
115 | struct dma_heap_attachment *a = attachment->priv; |
116 | |
117 | a->mapped = false; |
118 | dma_unmap_sgtable(dev: attachment->dev, sgt: table, dir: direction, attrs: 0); |
119 | } |
120 | |
121 | static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
122 | enum dma_data_direction direction) |
123 | { |
124 | struct cma_heap_buffer *buffer = dmabuf->priv; |
125 | struct dma_heap_attachment *a; |
126 | |
127 | mutex_lock(&buffer->lock); |
128 | |
129 | if (buffer->vmap_cnt) |
130 | invalidate_kernel_vmap_range(vaddr: buffer->vaddr, size: buffer->len); |
131 | |
132 | list_for_each_entry(a, &buffer->attachments, list) { |
133 | if (!a->mapped) |
134 | continue; |
135 | dma_sync_sgtable_for_cpu(dev: a->dev, sgt: &a->table, dir: direction); |
136 | } |
137 | mutex_unlock(lock: &buffer->lock); |
138 | |
139 | return 0; |
140 | } |
141 | |
142 | static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
143 | enum dma_data_direction direction) |
144 | { |
145 | struct cma_heap_buffer *buffer = dmabuf->priv; |
146 | struct dma_heap_attachment *a; |
147 | |
148 | mutex_lock(&buffer->lock); |
149 | |
150 | if (buffer->vmap_cnt) |
151 | flush_kernel_vmap_range(vaddr: buffer->vaddr, size: buffer->len); |
152 | |
153 | list_for_each_entry(a, &buffer->attachments, list) { |
154 | if (!a->mapped) |
155 | continue; |
156 | dma_sync_sgtable_for_device(dev: a->dev, sgt: &a->table, dir: direction); |
157 | } |
158 | mutex_unlock(lock: &buffer->lock); |
159 | |
160 | return 0; |
161 | } |
162 | |
163 | static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) |
164 | { |
165 | struct vm_area_struct *vma = vmf->vma; |
166 | struct cma_heap_buffer *buffer = vma->vm_private_data; |
167 | |
168 | if (vmf->pgoff > buffer->pagecount) |
169 | return VM_FAULT_SIGBUS; |
170 | |
171 | return vmf_insert_pfn(vma, addr: vmf->address, page_to_pfn(buffer->pages[vmf->pgoff])); |
172 | } |
173 | |
174 | static const struct vm_operations_struct dma_heap_vm_ops = { |
175 | .fault = cma_heap_vm_fault, |
176 | }; |
177 | |
178 | static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
179 | { |
180 | struct cma_heap_buffer *buffer = dmabuf->priv; |
181 | |
182 | if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) |
183 | return -EINVAL; |
184 | |
185 | vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); |
186 | |
187 | vma->vm_ops = &dma_heap_vm_ops; |
188 | vma->vm_private_data = buffer; |
189 | |
190 | return 0; |
191 | } |
192 | |
193 | static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) |
194 | { |
195 | void *vaddr; |
196 | |
197 | vaddr = vmap(pages: buffer->pages, count: buffer->pagecount, VM_MAP, PAGE_KERNEL); |
198 | if (!vaddr) |
199 | return ERR_PTR(error: -ENOMEM); |
200 | |
201 | return vaddr; |
202 | } |
203 | |
204 | static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) |
205 | { |
206 | struct cma_heap_buffer *buffer = dmabuf->priv; |
207 | void *vaddr; |
208 | int ret = 0; |
209 | |
210 | mutex_lock(&buffer->lock); |
211 | if (buffer->vmap_cnt) { |
212 | buffer->vmap_cnt++; |
213 | iosys_map_set_vaddr(map, vaddr: buffer->vaddr); |
214 | goto out; |
215 | } |
216 | |
217 | vaddr = cma_heap_do_vmap(buffer); |
218 | if (IS_ERR(ptr: vaddr)) { |
219 | ret = PTR_ERR(ptr: vaddr); |
220 | goto out; |
221 | } |
222 | buffer->vaddr = vaddr; |
223 | buffer->vmap_cnt++; |
224 | iosys_map_set_vaddr(map, vaddr: buffer->vaddr); |
225 | out: |
226 | mutex_unlock(lock: &buffer->lock); |
227 | |
228 | return ret; |
229 | } |
230 | |
231 | static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) |
232 | { |
233 | struct cma_heap_buffer *buffer = dmabuf->priv; |
234 | |
235 | mutex_lock(&buffer->lock); |
236 | if (!--buffer->vmap_cnt) { |
237 | vunmap(addr: buffer->vaddr); |
238 | buffer->vaddr = NULL; |
239 | } |
240 | mutex_unlock(lock: &buffer->lock); |
241 | iosys_map_clear(map); |
242 | } |
243 | |
244 | static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) |
245 | { |
246 | struct cma_heap_buffer *buffer = dmabuf->priv; |
247 | struct cma_heap *cma_heap = buffer->heap; |
248 | |
249 | if (buffer->vmap_cnt > 0) { |
250 | WARN(1, "%s: buffer still mapped in the kernel\n" , __func__); |
251 | vunmap(addr: buffer->vaddr); |
252 | buffer->vaddr = NULL; |
253 | } |
254 | |
255 | /* free page list */ |
256 | kfree(objp: buffer->pages); |
257 | /* release memory */ |
258 | cma_release(cma: cma_heap->cma, pages: buffer->cma_pages, count: buffer->pagecount); |
259 | kfree(objp: buffer); |
260 | } |
261 | |
262 | static const struct dma_buf_ops cma_heap_buf_ops = { |
263 | .attach = cma_heap_attach, |
264 | .detach = cma_heap_detach, |
265 | .map_dma_buf = cma_heap_map_dma_buf, |
266 | .unmap_dma_buf = cma_heap_unmap_dma_buf, |
267 | .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, |
268 | .end_cpu_access = cma_heap_dma_buf_end_cpu_access, |
269 | .mmap = cma_heap_mmap, |
270 | .vmap = cma_heap_vmap, |
271 | .vunmap = cma_heap_vunmap, |
272 | .release = cma_heap_dma_buf_release, |
273 | }; |
274 | |
275 | static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, |
276 | unsigned long len, |
277 | unsigned long fd_flags, |
278 | unsigned long heap_flags) |
279 | { |
280 | struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); |
281 | struct cma_heap_buffer *buffer; |
282 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
283 | size_t size = PAGE_ALIGN(len); |
284 | pgoff_t pagecount = size >> PAGE_SHIFT; |
285 | unsigned long align = get_order(size); |
286 | struct page *cma_pages; |
287 | struct dma_buf *dmabuf; |
288 | int ret = -ENOMEM; |
289 | pgoff_t pg; |
290 | |
291 | buffer = kzalloc(size: sizeof(*buffer), GFP_KERNEL); |
292 | if (!buffer) |
293 | return ERR_PTR(error: -ENOMEM); |
294 | |
295 | INIT_LIST_HEAD(list: &buffer->attachments); |
296 | mutex_init(&buffer->lock); |
297 | buffer->len = size; |
298 | |
299 | if (align > CONFIG_CMA_ALIGNMENT) |
300 | align = CONFIG_CMA_ALIGNMENT; |
301 | |
302 | cma_pages = cma_alloc(cma: cma_heap->cma, count: pagecount, align, no_warn: false); |
303 | if (!cma_pages) |
304 | goto free_buffer; |
305 | |
306 | /* Clear the cma pages */ |
307 | if (PageHighMem(page: cma_pages)) { |
308 | unsigned long nr_clear_pages = pagecount; |
309 | struct page *page = cma_pages; |
310 | |
311 | while (nr_clear_pages > 0) { |
312 | void *vaddr = kmap_atomic(page); |
313 | |
314 | memset(vaddr, 0, PAGE_SIZE); |
315 | kunmap_atomic(vaddr); |
316 | /* |
317 | * Avoid wasting time zeroing memory if the process |
318 | * has been killed by by SIGKILL |
319 | */ |
320 | if (fatal_signal_pending(current)) |
321 | goto free_cma; |
322 | page++; |
323 | nr_clear_pages--; |
324 | } |
325 | } else { |
326 | memset(page_address(cma_pages), 0, size); |
327 | } |
328 | |
329 | buffer->pages = kmalloc_array(n: pagecount, size: sizeof(*buffer->pages), GFP_KERNEL); |
330 | if (!buffer->pages) { |
331 | ret = -ENOMEM; |
332 | goto free_cma; |
333 | } |
334 | |
335 | for (pg = 0; pg < pagecount; pg++) |
336 | buffer->pages[pg] = &cma_pages[pg]; |
337 | |
338 | buffer->cma_pages = cma_pages; |
339 | buffer->heap = cma_heap; |
340 | buffer->pagecount = pagecount; |
341 | |
342 | /* create the dmabuf */ |
343 | exp_info.exp_name = dma_heap_get_name(heap); |
344 | exp_info.ops = &cma_heap_buf_ops; |
345 | exp_info.size = buffer->len; |
346 | exp_info.flags = fd_flags; |
347 | exp_info.priv = buffer; |
348 | dmabuf = dma_buf_export(exp_info: &exp_info); |
349 | if (IS_ERR(ptr: dmabuf)) { |
350 | ret = PTR_ERR(ptr: dmabuf); |
351 | goto free_pages; |
352 | } |
353 | return dmabuf; |
354 | |
355 | free_pages: |
356 | kfree(objp: buffer->pages); |
357 | free_cma: |
358 | cma_release(cma: cma_heap->cma, pages: cma_pages, count: pagecount); |
359 | free_buffer: |
360 | kfree(objp: buffer); |
361 | |
362 | return ERR_PTR(error: ret); |
363 | } |
364 | |
365 | static const struct dma_heap_ops cma_heap_ops = { |
366 | .allocate = cma_heap_allocate, |
367 | }; |
368 | |
369 | static int __add_cma_heap(struct cma *cma, void *data) |
370 | { |
371 | struct cma_heap *cma_heap; |
372 | struct dma_heap_export_info exp_info; |
373 | |
374 | cma_heap = kzalloc(size: sizeof(*cma_heap), GFP_KERNEL); |
375 | if (!cma_heap) |
376 | return -ENOMEM; |
377 | cma_heap->cma = cma; |
378 | |
379 | exp_info.name = cma_get_name(cma); |
380 | exp_info.ops = &cma_heap_ops; |
381 | exp_info.priv = cma_heap; |
382 | |
383 | cma_heap->heap = dma_heap_add(exp_info: &exp_info); |
384 | if (IS_ERR(ptr: cma_heap->heap)) { |
385 | int ret = PTR_ERR(ptr: cma_heap->heap); |
386 | |
387 | kfree(objp: cma_heap); |
388 | return ret; |
389 | } |
390 | |
391 | return 0; |
392 | } |
393 | |
394 | static int add_default_cma_heap(void) |
395 | { |
396 | struct cma *default_cma = dev_get_cma_area(NULL); |
397 | int ret = 0; |
398 | |
399 | if (default_cma) |
400 | ret = __add_cma_heap(cma: default_cma, NULL); |
401 | |
402 | return ret; |
403 | } |
404 | module_init(add_default_cma_heap); |
405 | MODULE_DESCRIPTION("DMA-BUF CMA Heap" ); |
406 | |