1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Framework for userspace DMA-BUF allocations |
4 | * |
5 | * Copyright (C) 2011 Google, Inc. |
6 | * Copyright (C) 2019 Linaro Ltd. |
7 | */ |
8 | |
9 | #include <linux/cdev.h> |
10 | #include <linux/debugfs.h> |
11 | #include <linux/device.h> |
12 | #include <linux/dma-buf.h> |
13 | #include <linux/err.h> |
14 | #include <linux/xarray.h> |
15 | #include <linux/list.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/nospec.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/syscalls.h> |
20 | #include <linux/dma-heap.h> |
21 | #include <uapi/linux/dma-heap.h> |
22 | |
23 | #define DEVNAME "dma_heap" |
24 | |
25 | #define NUM_HEAP_MINORS 128 |
26 | |
27 | /** |
28 | * struct dma_heap - represents a dmabuf heap in the system |
29 | * @name: used for debugging/device-node name |
30 | * @ops: ops struct for this heap |
31 | * @heap_devt heap device node |
32 | * @list list head connecting to list of heaps |
33 | * @heap_cdev heap char device |
34 | * |
35 | * Represents a heap of memory from which buffers can be made. |
36 | */ |
37 | struct dma_heap { |
38 | const char *name; |
39 | const struct dma_heap_ops *ops; |
40 | void *priv; |
41 | dev_t heap_devt; |
42 | struct list_head list; |
43 | struct cdev heap_cdev; |
44 | }; |
45 | |
46 | static LIST_HEAD(heap_list); |
47 | static DEFINE_MUTEX(heap_list_lock); |
48 | static dev_t dma_heap_devt; |
49 | static struct class *dma_heap_class; |
50 | static DEFINE_XARRAY_ALLOC(dma_heap_minors); |
51 | |
52 | static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, |
53 | unsigned int fd_flags, |
54 | unsigned int heap_flags) |
55 | { |
56 | struct dma_buf *dmabuf; |
57 | int fd; |
58 | |
59 | /* |
60 | * Allocations from all heaps have to begin |
61 | * and end on page boundaries. |
62 | */ |
63 | len = PAGE_ALIGN(len); |
64 | if (!len) |
65 | return -EINVAL; |
66 | |
67 | dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags); |
68 | if (IS_ERR(ptr: dmabuf)) |
69 | return PTR_ERR(ptr: dmabuf); |
70 | |
71 | fd = dma_buf_fd(dmabuf, flags: fd_flags); |
72 | if (fd < 0) { |
73 | dma_buf_put(dmabuf); |
74 | /* just return, as put will call release and that will free */ |
75 | } |
76 | return fd; |
77 | } |
78 | |
79 | static int dma_heap_open(struct inode *inode, struct file *file) |
80 | { |
81 | struct dma_heap *heap; |
82 | |
83 | heap = xa_load(&dma_heap_minors, index: iminor(inode)); |
84 | if (!heap) { |
85 | pr_err("dma_heap: minor %d unknown.\n" , iminor(inode)); |
86 | return -ENODEV; |
87 | } |
88 | |
89 | /* instance data as context */ |
90 | file->private_data = heap; |
91 | nonseekable_open(inode, filp: file); |
92 | |
93 | return 0; |
94 | } |
95 | |
96 | static long dma_heap_ioctl_allocate(struct file *file, void *data) |
97 | { |
98 | struct dma_heap_allocation_data *heap_allocation = data; |
99 | struct dma_heap *heap = file->private_data; |
100 | int fd; |
101 | |
102 | if (heap_allocation->fd) |
103 | return -EINVAL; |
104 | |
105 | if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) |
106 | return -EINVAL; |
107 | |
108 | if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) |
109 | return -EINVAL; |
110 | |
111 | fd = dma_heap_buffer_alloc(heap, len: heap_allocation->len, |
112 | fd_flags: heap_allocation->fd_flags, |
113 | heap_flags: heap_allocation->heap_flags); |
114 | if (fd < 0) |
115 | return fd; |
116 | |
117 | heap_allocation->fd = fd; |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | static unsigned int dma_heap_ioctl_cmds[] = { |
123 | DMA_HEAP_IOCTL_ALLOC, |
124 | }; |
125 | |
126 | static long dma_heap_ioctl(struct file *file, unsigned int ucmd, |
127 | unsigned long arg) |
128 | { |
129 | char stack_kdata[128]; |
130 | char *kdata = stack_kdata; |
131 | unsigned int kcmd; |
132 | unsigned int in_size, out_size, drv_size, ksize; |
133 | int nr = _IOC_NR(ucmd); |
134 | int ret = 0; |
135 | |
136 | if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) |
137 | return -EINVAL; |
138 | |
139 | nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); |
140 | /* Get the kernel ioctl cmd that matches */ |
141 | kcmd = dma_heap_ioctl_cmds[nr]; |
142 | |
143 | /* Figure out the delta between user cmd size and kernel cmd size */ |
144 | drv_size = _IOC_SIZE(kcmd); |
145 | out_size = _IOC_SIZE(ucmd); |
146 | in_size = out_size; |
147 | if ((ucmd & kcmd & IOC_IN) == 0) |
148 | in_size = 0; |
149 | if ((ucmd & kcmd & IOC_OUT) == 0) |
150 | out_size = 0; |
151 | ksize = max(max(in_size, out_size), drv_size); |
152 | |
153 | /* If necessary, allocate buffer for ioctl argument */ |
154 | if (ksize > sizeof(stack_kdata)) { |
155 | kdata = kmalloc(size: ksize, GFP_KERNEL); |
156 | if (!kdata) |
157 | return -ENOMEM; |
158 | } |
159 | |
160 | if (copy_from_user(to: kdata, from: (void __user *)arg, n: in_size) != 0) { |
161 | ret = -EFAULT; |
162 | goto err; |
163 | } |
164 | |
165 | /* zero out any difference between the kernel/user structure size */ |
166 | if (ksize > in_size) |
167 | memset(kdata + in_size, 0, ksize - in_size); |
168 | |
169 | switch (kcmd) { |
170 | case DMA_HEAP_IOCTL_ALLOC: |
171 | ret = dma_heap_ioctl_allocate(file, data: kdata); |
172 | break; |
173 | default: |
174 | ret = -ENOTTY; |
175 | goto err; |
176 | } |
177 | |
178 | if (copy_to_user(to: (void __user *)arg, from: kdata, n: out_size) != 0) |
179 | ret = -EFAULT; |
180 | err: |
181 | if (kdata != stack_kdata) |
182 | kfree(objp: kdata); |
183 | return ret; |
184 | } |
185 | |
186 | static const struct file_operations dma_heap_fops = { |
187 | .owner = THIS_MODULE, |
188 | .open = dma_heap_open, |
189 | .unlocked_ioctl = dma_heap_ioctl, |
190 | #ifdef CONFIG_COMPAT |
191 | .compat_ioctl = dma_heap_ioctl, |
192 | #endif |
193 | }; |
194 | |
195 | /** |
196 | * dma_heap_get_drvdata() - get per-subdriver data for the heap |
197 | * @heap: DMA-Heap to retrieve private data for |
198 | * |
199 | * Returns: |
200 | * The per-subdriver data for the heap. |
201 | */ |
202 | void *dma_heap_get_drvdata(struct dma_heap *heap) |
203 | { |
204 | return heap->priv; |
205 | } |
206 | |
207 | /** |
208 | * dma_heap_get_name() - get heap name |
209 | * @heap: DMA-Heap to retrieve private data for |
210 | * |
211 | * Returns: |
212 | * The char* for the heap name. |
213 | */ |
214 | const char *dma_heap_get_name(struct dma_heap *heap) |
215 | { |
216 | return heap->name; |
217 | } |
218 | |
219 | struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) |
220 | { |
221 | struct dma_heap *heap, *h, *err_ret; |
222 | struct device *dev_ret; |
223 | unsigned int minor; |
224 | int ret; |
225 | |
226 | if (!exp_info->name || !strcmp(exp_info->name, "" )) { |
227 | pr_err("dma_heap: Cannot add heap without a name\n" ); |
228 | return ERR_PTR(error: -EINVAL); |
229 | } |
230 | |
231 | if (!exp_info->ops || !exp_info->ops->allocate) { |
232 | pr_err("dma_heap: Cannot add heap with invalid ops struct\n" ); |
233 | return ERR_PTR(error: -EINVAL); |
234 | } |
235 | |
236 | heap = kzalloc(size: sizeof(*heap), GFP_KERNEL); |
237 | if (!heap) |
238 | return ERR_PTR(error: -ENOMEM); |
239 | |
240 | heap->name = exp_info->name; |
241 | heap->ops = exp_info->ops; |
242 | heap->priv = exp_info->priv; |
243 | |
244 | /* Find unused minor number */ |
245 | ret = xa_alloc(xa: &dma_heap_minors, id: &minor, entry: heap, |
246 | XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL); |
247 | if (ret < 0) { |
248 | pr_err("dma_heap: Unable to get minor number for heap\n" ); |
249 | err_ret = ERR_PTR(error: ret); |
250 | goto err0; |
251 | } |
252 | |
253 | /* Create device */ |
254 | heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor); |
255 | |
256 | cdev_init(&heap->heap_cdev, &dma_heap_fops); |
257 | ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1); |
258 | if (ret < 0) { |
259 | pr_err("dma_heap: Unable to add char device\n" ); |
260 | err_ret = ERR_PTR(error: ret); |
261 | goto err1; |
262 | } |
263 | |
264 | dev_ret = device_create(cls: dma_heap_class, |
265 | NULL, |
266 | devt: heap->heap_devt, |
267 | NULL, |
268 | fmt: heap->name); |
269 | if (IS_ERR(ptr: dev_ret)) { |
270 | pr_err("dma_heap: Unable to create device\n" ); |
271 | err_ret = ERR_CAST(ptr: dev_ret); |
272 | goto err2; |
273 | } |
274 | |
275 | mutex_lock(&heap_list_lock); |
276 | /* check the name is unique */ |
277 | list_for_each_entry(h, &heap_list, list) { |
278 | if (!strcmp(h->name, exp_info->name)) { |
279 | mutex_unlock(lock: &heap_list_lock); |
280 | pr_err("dma_heap: Already registered heap named %s\n" , |
281 | exp_info->name); |
282 | err_ret = ERR_PTR(error: -EINVAL); |
283 | goto err3; |
284 | } |
285 | } |
286 | |
287 | /* Add heap to the list */ |
288 | list_add(new: &heap->list, head: &heap_list); |
289 | mutex_unlock(lock: &heap_list_lock); |
290 | |
291 | return heap; |
292 | |
293 | err3: |
294 | device_destroy(cls: dma_heap_class, devt: heap->heap_devt); |
295 | err2: |
296 | cdev_del(&heap->heap_cdev); |
297 | err1: |
298 | xa_erase(&dma_heap_minors, index: minor); |
299 | err0: |
300 | kfree(objp: heap); |
301 | return err_ret; |
302 | } |
303 | |
304 | static char *dma_heap_devnode(const struct device *dev, umode_t *mode) |
305 | { |
306 | return kasprintf(GFP_KERNEL, fmt: "dma_heap/%s" , dev_name(dev)); |
307 | } |
308 | |
309 | static int dma_heap_init(void) |
310 | { |
311 | int ret; |
312 | |
313 | ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); |
314 | if (ret) |
315 | return ret; |
316 | |
317 | dma_heap_class = class_create(DEVNAME); |
318 | if (IS_ERR(ptr: dma_heap_class)) { |
319 | unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); |
320 | return PTR_ERR(ptr: dma_heap_class); |
321 | } |
322 | dma_heap_class->devnode = dma_heap_devnode; |
323 | |
324 | return 0; |
325 | } |
326 | subsys_initcall(dma_heap_init); |
327 | |