1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include "virtgpu_drv.h" |
3 | |
4 | #include <linux/dma-mapping.h> |
5 | |
6 | static void virtio_gpu_vram_free(struct drm_gem_object *obj) |
7 | { |
8 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
9 | struct virtio_gpu_device *vgdev = obj->dev->dev_private; |
10 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); |
11 | bool unmap; |
12 | |
13 | if (bo->created) { |
14 | spin_lock(lock: &vgdev->host_visible_lock); |
15 | unmap = drm_mm_node_allocated(node: &vram->vram_node); |
16 | spin_unlock(lock: &vgdev->host_visible_lock); |
17 | |
18 | if (unmap) |
19 | virtio_gpu_cmd_unmap(vgdev, bo); |
20 | |
21 | virtio_gpu_cmd_unref_resource(vgdev, bo); |
22 | virtio_gpu_notify(vgdev); |
23 | return; |
24 | } |
25 | } |
26 | |
27 | static const struct vm_operations_struct virtio_gpu_vram_vm_ops = { |
28 | .open = drm_gem_vm_open, |
29 | .close = drm_gem_vm_close, |
30 | }; |
31 | |
32 | static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, |
33 | struct vm_area_struct *vma) |
34 | { |
35 | int ret; |
36 | struct virtio_gpu_device *vgdev = obj->dev->dev_private; |
37 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
38 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); |
39 | unsigned long vm_size = vma->vm_end - vma->vm_start; |
40 | |
41 | if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) |
42 | return -EINVAL; |
43 | |
44 | wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING); |
45 | if (vram->map_state != STATE_OK) |
46 | return -EINVAL; |
47 | |
48 | vma->vm_pgoff -= drm_vma_node_start(node: &obj->vma_node); |
49 | vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND); |
50 | vma->vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags); |
51 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
52 | vma->vm_ops = &virtio_gpu_vram_vm_ops; |
53 | |
54 | if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC) |
55 | vma->vm_page_prot = pgprot_writecombine(prot: vma->vm_page_prot); |
56 | else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) |
57 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
58 | |
59 | /* Partial mappings of GEM buffers don't happen much in practice. */ |
60 | if (vm_size != vram->vram_node.size) |
61 | return -EINVAL; |
62 | |
63 | ret = io_remap_pfn_range(vma, addr: vma->vm_start, |
64 | pfn: vram->vram_node.start >> PAGE_SHIFT, |
65 | size: vm_size, prot: vma->vm_page_prot); |
66 | return ret; |
67 | } |
68 | |
69 | struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, |
70 | struct device *dev, |
71 | enum dma_data_direction dir) |
72 | { |
73 | struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; |
74 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); |
75 | struct sg_table *sgt; |
76 | dma_addr_t addr; |
77 | int ret; |
78 | |
79 | sgt = kzalloc(size: sizeof(*sgt), GFP_KERNEL); |
80 | if (!sgt) |
81 | return ERR_PTR(error: -ENOMEM); |
82 | |
83 | if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) { |
84 | // Virtio devices can access the dma-buf via its UUID. Return a stub |
85 | // sg_table so the dma-buf API still works. |
86 | if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) { |
87 | ret = -EIO; |
88 | goto out; |
89 | } |
90 | return sgt; |
91 | } |
92 | |
93 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
94 | if (ret) |
95 | goto out; |
96 | |
97 | addr = dma_map_resource(dev, phys_addr: vram->vram_node.start, |
98 | size: vram->vram_node.size, dir, |
99 | DMA_ATTR_SKIP_CPU_SYNC); |
100 | ret = dma_mapping_error(dev, dma_addr: addr); |
101 | if (ret) |
102 | goto out; |
103 | |
104 | sg_set_page(sg: sgt->sgl, NULL, len: vram->vram_node.size, offset: 0); |
105 | sg_dma_address(sgt->sgl) = addr; |
106 | sg_dma_len(sgt->sgl) = vram->vram_node.size; |
107 | |
108 | return sgt; |
109 | out: |
110 | sg_free_table(sgt); |
111 | kfree(objp: sgt); |
112 | return ERR_PTR(error: ret); |
113 | } |
114 | |
115 | void virtio_gpu_vram_unmap_dma_buf(struct device *dev, |
116 | struct sg_table *sgt, |
117 | enum dma_data_direction dir) |
118 | { |
119 | if (sgt->nents) { |
120 | dma_unmap_resource(dev, sg_dma_address(sgt->sgl), |
121 | sg_dma_len(sgt->sgl), dir, |
122 | DMA_ATTR_SKIP_CPU_SYNC); |
123 | } |
124 | sg_free_table(sgt); |
125 | kfree(objp: sgt); |
126 | } |
127 | |
128 | static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { |
129 | .open = virtio_gpu_gem_object_open, |
130 | .close = virtio_gpu_gem_object_close, |
131 | .free = virtio_gpu_vram_free, |
132 | .mmap = virtio_gpu_vram_mmap, |
133 | .export = virtgpu_gem_prime_export, |
134 | }; |
135 | |
136 | bool virtio_gpu_is_vram(struct virtio_gpu_object *bo) |
137 | { |
138 | return bo->base.base.funcs == &virtio_gpu_vram_funcs; |
139 | } |
140 | |
141 | static int virtio_gpu_vram_map(struct virtio_gpu_object *bo) |
142 | { |
143 | int ret; |
144 | uint64_t offset; |
145 | struct virtio_gpu_object_array *objs; |
146 | struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; |
147 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); |
148 | |
149 | if (!vgdev->has_host_visible) |
150 | return -EINVAL; |
151 | |
152 | spin_lock(lock: &vgdev->host_visible_lock); |
153 | ret = drm_mm_insert_node(mm: &vgdev->host_visible_mm, node: &vram->vram_node, |
154 | size: bo->base.base.size); |
155 | spin_unlock(lock: &vgdev->host_visible_lock); |
156 | |
157 | if (ret) |
158 | return ret; |
159 | |
160 | objs = virtio_gpu_array_alloc(nents: 1); |
161 | if (!objs) { |
162 | ret = -ENOMEM; |
163 | goto err_remove_node; |
164 | } |
165 | |
166 | virtio_gpu_array_add_obj(objs, obj: &bo->base.base); |
167 | /*TODO: Add an error checking helper function in drm_mm.h */ |
168 | offset = vram->vram_node.start - vgdev->host_visible_region.addr; |
169 | |
170 | ret = virtio_gpu_cmd_map(vgdev, objs, offset); |
171 | if (ret) { |
172 | virtio_gpu_array_put_free(objs); |
173 | goto err_remove_node; |
174 | } |
175 | |
176 | return 0; |
177 | |
178 | err_remove_node: |
179 | spin_lock(lock: &vgdev->host_visible_lock); |
180 | drm_mm_remove_node(node: &vram->vram_node); |
181 | spin_unlock(lock: &vgdev->host_visible_lock); |
182 | return ret; |
183 | } |
184 | |
185 | int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, |
186 | struct virtio_gpu_object_params *params, |
187 | struct virtio_gpu_object **bo_ptr) |
188 | { |
189 | struct drm_gem_object *obj; |
190 | struct virtio_gpu_object_vram *vram; |
191 | int ret; |
192 | |
193 | vram = kzalloc(size: sizeof(*vram), GFP_KERNEL); |
194 | if (!vram) |
195 | return -ENOMEM; |
196 | |
197 | obj = &vram->base.base.base; |
198 | obj->funcs = &virtio_gpu_vram_funcs; |
199 | |
200 | params->size = PAGE_ALIGN(params->size); |
201 | drm_gem_private_object_init(dev: vgdev->ddev, obj, size: params->size); |
202 | |
203 | /* Create fake offset */ |
204 | ret = drm_gem_create_mmap_offset(obj); |
205 | if (ret) { |
206 | kfree(objp: vram); |
207 | return ret; |
208 | } |
209 | |
210 | ret = virtio_gpu_resource_id_get(vgdev, resid: &vram->base.hw_res_handle); |
211 | if (ret) { |
212 | kfree(objp: vram); |
213 | return ret; |
214 | } |
215 | |
216 | virtio_gpu_cmd_resource_create_blob(vgdev, bo: &vram->base, params, NULL, |
217 | nents: 0); |
218 | if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) { |
219 | ret = virtio_gpu_vram_map(bo: &vram->base); |
220 | if (ret) { |
221 | virtio_gpu_vram_free(obj); |
222 | return ret; |
223 | } |
224 | } |
225 | |
226 | *bo_ptr = &vram->base; |
227 | return 0; |
228 | } |
229 | |