1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * Authors: |
6 | * Dave Airlie |
7 | * Alon Levy |
8 | * |
9 | * Permission is hereby granted, free of charge, to any person obtaining a |
10 | * copy of this software and associated documentation files (the "Software"), |
11 | * to deal in the Software without restriction, including without limitation |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
13 | * and/or sell copies of the Software, and to permit persons to whom the |
14 | * Software is furnished to do so, subject to the following conditions: |
15 | * |
16 | * The above copyright notice and this permission notice shall be included in |
17 | * all copies or substantial portions of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
22 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
25 | * OTHER DEALINGS IN THE SOFTWARE. |
26 | */ |
27 | |
28 | #include <linux/file.h> |
29 | #include <linux/sync_file.h> |
30 | #include <linux/uaccess.h> |
31 | |
32 | #include <drm/drm_file.h> |
33 | #include <drm/virtgpu_drm.h> |
34 | |
35 | #include "virtgpu_drv.h" |
36 | |
37 | #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \ |
38 | VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \ |
39 | VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) |
40 | |
41 | /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */ |
42 | static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev, |
43 | struct virtio_gpu_fpriv *vfpriv) |
44 | { |
45 | char dbgname[TASK_COMM_LEN]; |
46 | |
47 | get_task_comm(dbgname, current); |
48 | virtio_gpu_cmd_context_create(vgdev, id: vfpriv->ctx_id, |
49 | context_init: vfpriv->context_init, strlen(dbgname), |
50 | name: dbgname); |
51 | |
52 | vfpriv->context_created = true; |
53 | } |
54 | |
55 | void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) |
56 | { |
57 | struct virtio_gpu_device *vgdev = dev->dev_private; |
58 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
59 | |
60 | mutex_lock(&vfpriv->context_lock); |
61 | if (vfpriv->context_created) |
62 | goto out_unlock; |
63 | |
64 | virtio_gpu_create_context_locked(vgdev, vfpriv); |
65 | |
66 | out_unlock: |
67 | mutex_unlock(lock: &vfpriv->context_lock); |
68 | } |
69 | |
70 | static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, |
71 | struct drm_file *file) |
72 | { |
73 | struct virtio_gpu_device *vgdev = dev->dev_private; |
74 | struct drm_virtgpu_map *virtio_gpu_map = data; |
75 | |
76 | return virtio_gpu_mode_dumb_mmap(file_priv: file, dev: vgdev->ddev, |
77 | handle: virtio_gpu_map->handle, |
78 | offset_p: &virtio_gpu_map->offset); |
79 | } |
80 | |
81 | static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, |
82 | struct drm_file *file) |
83 | { |
84 | struct virtio_gpu_device *vgdev = dev->dev_private; |
85 | struct drm_virtgpu_getparam *param = data; |
86 | int value; |
87 | |
88 | switch (param->param) { |
89 | case VIRTGPU_PARAM_3D_FEATURES: |
90 | value = vgdev->has_virgl_3d ? 1 : 0; |
91 | break; |
92 | case VIRTGPU_PARAM_CAPSET_QUERY_FIX: |
93 | value = 1; |
94 | break; |
95 | case VIRTGPU_PARAM_RESOURCE_BLOB: |
96 | value = vgdev->has_resource_blob ? 1 : 0; |
97 | break; |
98 | case VIRTGPU_PARAM_HOST_VISIBLE: |
99 | value = vgdev->has_host_visible ? 1 : 0; |
100 | break; |
101 | case VIRTGPU_PARAM_CROSS_DEVICE: |
102 | value = vgdev->has_resource_assign_uuid ? 1 : 0; |
103 | break; |
104 | case VIRTGPU_PARAM_CONTEXT_INIT: |
105 | value = vgdev->has_context_init ? 1 : 0; |
106 | break; |
107 | case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: |
108 | value = vgdev->capset_id_mask; |
109 | break; |
110 | default: |
111 | return -EINVAL; |
112 | } |
113 | if (copy_to_user(u64_to_user_ptr(param->value), from: &value, n: sizeof(int))) |
114 | return -EFAULT; |
115 | |
116 | return 0; |
117 | } |
118 | |
119 | static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, |
120 | struct drm_file *file) |
121 | { |
122 | struct virtio_gpu_device *vgdev = dev->dev_private; |
123 | struct drm_virtgpu_resource_create *rc = data; |
124 | struct virtio_gpu_fence *fence; |
125 | int ret; |
126 | struct virtio_gpu_object *qobj; |
127 | struct drm_gem_object *obj; |
128 | uint32_t handle = 0; |
129 | struct virtio_gpu_object_params params = { 0 }; |
130 | |
131 | if (vgdev->has_virgl_3d) { |
132 | virtio_gpu_create_context(dev, file); |
133 | params.virgl = true; |
134 | params.target = rc->target; |
135 | params.bind = rc->bind; |
136 | params.depth = rc->depth; |
137 | params.array_size = rc->array_size; |
138 | params.last_level = rc->last_level; |
139 | params.nr_samples = rc->nr_samples; |
140 | params.flags = rc->flags; |
141 | } else { |
142 | if (rc->depth > 1) |
143 | return -EINVAL; |
144 | if (rc->nr_samples > 1) |
145 | return -EINVAL; |
146 | if (rc->last_level > 1) |
147 | return -EINVAL; |
148 | if (rc->target != 2) |
149 | return -EINVAL; |
150 | if (rc->array_size > 1) |
151 | return -EINVAL; |
152 | } |
153 | |
154 | params.format = rc->format; |
155 | params.width = rc->width; |
156 | params.height = rc->height; |
157 | params.size = rc->size; |
158 | /* allocate a single page size object */ |
159 | if (params.size == 0) |
160 | params.size = PAGE_SIZE; |
161 | |
162 | fence = virtio_gpu_fence_alloc(vgdev, base_fence_ctx: vgdev->fence_drv.context, ring_idx: 0); |
163 | if (!fence) |
164 | return -ENOMEM; |
165 | ret = virtio_gpu_object_create(vgdev, params: ¶ms, bo_ptr: &qobj, fence); |
166 | dma_fence_put(fence: &fence->f); |
167 | if (ret < 0) |
168 | return ret; |
169 | obj = &qobj->base.base; |
170 | |
171 | ret = drm_gem_handle_create(file_priv: file, obj, handlep: &handle); |
172 | if (ret) { |
173 | drm_gem_object_release(obj); |
174 | return ret; |
175 | } |
176 | |
177 | rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ |
178 | rc->bo_handle = handle; |
179 | |
180 | /* |
181 | * The handle owns the reference now. But we must drop our |
182 | * remaining reference *after* we no longer need to dereference |
183 | * the obj. Otherwise userspace could guess the handle and |
184 | * race closing it from another thread. |
185 | */ |
186 | drm_gem_object_put(obj); |
187 | |
188 | return 0; |
189 | } |
190 | |
191 | static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, |
192 | struct drm_file *file) |
193 | { |
194 | struct drm_virtgpu_resource_info *ri = data; |
195 | struct drm_gem_object *gobj = NULL; |
196 | struct virtio_gpu_object *qobj = NULL; |
197 | |
198 | gobj = drm_gem_object_lookup(filp: file, handle: ri->bo_handle); |
199 | if (gobj == NULL) |
200 | return -ENOENT; |
201 | |
202 | qobj = gem_to_virtio_gpu_obj(gobj); |
203 | |
204 | ri->size = qobj->base.base.size; |
205 | ri->res_handle = qobj->hw_res_handle; |
206 | if (qobj->host3d_blob || qobj->guest_blob) |
207 | ri->blob_mem = qobj->blob_mem; |
208 | |
209 | drm_gem_object_put(obj: gobj); |
210 | return 0; |
211 | } |
212 | |
213 | static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, |
214 | void *data, |
215 | struct drm_file *file) |
216 | { |
217 | struct virtio_gpu_device *vgdev = dev->dev_private; |
218 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
219 | struct drm_virtgpu_3d_transfer_from_host *args = data; |
220 | struct virtio_gpu_object *bo; |
221 | struct virtio_gpu_object_array *objs; |
222 | struct virtio_gpu_fence *fence; |
223 | int ret; |
224 | u32 offset = args->offset; |
225 | |
226 | if (vgdev->has_virgl_3d == false) |
227 | return -ENOSYS; |
228 | |
229 | virtio_gpu_create_context(dev, file); |
230 | objs = virtio_gpu_array_from_handles(drm_file: file, handles: &args->bo_handle, nents: 1); |
231 | if (objs == NULL) |
232 | return -ENOENT; |
233 | |
234 | bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
235 | if (bo->guest_blob && !bo->host3d_blob) { |
236 | ret = -EINVAL; |
237 | goto err_put_free; |
238 | } |
239 | |
240 | if (!bo->host3d_blob && (args->stride || args->layer_stride)) { |
241 | ret = -EINVAL; |
242 | goto err_put_free; |
243 | } |
244 | |
245 | ret = virtio_gpu_array_lock_resv(objs); |
246 | if (ret != 0) |
247 | goto err_put_free; |
248 | |
249 | fence = virtio_gpu_fence_alloc(vgdev, base_fence_ctx: vgdev->fence_drv.context, ring_idx: 0); |
250 | if (!fence) { |
251 | ret = -ENOMEM; |
252 | goto err_unlock; |
253 | } |
254 | |
255 | virtio_gpu_cmd_transfer_from_host_3d |
256 | (vgdev, ctx_id: vfpriv->ctx_id, offset, level: args->level, stride: args->stride, |
257 | layer_stride: args->layer_stride, box: &args->box, objs, fence); |
258 | dma_fence_put(fence: &fence->f); |
259 | virtio_gpu_notify(vgdev); |
260 | return 0; |
261 | |
262 | err_unlock: |
263 | virtio_gpu_array_unlock_resv(objs); |
264 | err_put_free: |
265 | virtio_gpu_array_put_free(objs); |
266 | return ret; |
267 | } |
268 | |
269 | static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, |
270 | struct drm_file *file) |
271 | { |
272 | struct virtio_gpu_device *vgdev = dev->dev_private; |
273 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
274 | struct drm_virtgpu_3d_transfer_to_host *args = data; |
275 | struct virtio_gpu_object *bo; |
276 | struct virtio_gpu_object_array *objs; |
277 | struct virtio_gpu_fence *fence; |
278 | int ret; |
279 | u32 offset = args->offset; |
280 | |
281 | objs = virtio_gpu_array_from_handles(drm_file: file, handles: &args->bo_handle, nents: 1); |
282 | if (objs == NULL) |
283 | return -ENOENT; |
284 | |
285 | bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
286 | if (bo->guest_blob && !bo->host3d_blob) { |
287 | ret = -EINVAL; |
288 | goto err_put_free; |
289 | } |
290 | |
291 | if (!vgdev->has_virgl_3d) { |
292 | virtio_gpu_cmd_transfer_to_host_2d |
293 | (vgdev, offset, |
294 | width: args->box.w, height: args->box.h, x: args->box.x, y: args->box.y, |
295 | objs, NULL); |
296 | } else { |
297 | virtio_gpu_create_context(dev, file); |
298 | |
299 | if (!bo->host3d_blob && (args->stride || args->layer_stride)) { |
300 | ret = -EINVAL; |
301 | goto err_put_free; |
302 | } |
303 | |
304 | ret = virtio_gpu_array_lock_resv(objs); |
305 | if (ret != 0) |
306 | goto err_put_free; |
307 | |
308 | ret = -ENOMEM; |
309 | fence = virtio_gpu_fence_alloc(vgdev, base_fence_ctx: vgdev->fence_drv.context, |
310 | ring_idx: 0); |
311 | if (!fence) |
312 | goto err_unlock; |
313 | |
314 | virtio_gpu_cmd_transfer_to_host_3d |
315 | (vgdev, |
316 | ctx_id: vfpriv ? vfpriv->ctx_id : 0, offset, level: args->level, |
317 | stride: args->stride, layer_stride: args->layer_stride, box: &args->box, objs, |
318 | fence); |
319 | dma_fence_put(fence: &fence->f); |
320 | } |
321 | virtio_gpu_notify(vgdev); |
322 | return 0; |
323 | |
324 | err_unlock: |
325 | virtio_gpu_array_unlock_resv(objs); |
326 | err_put_free: |
327 | virtio_gpu_array_put_free(objs); |
328 | return ret; |
329 | } |
330 | |
331 | static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, |
332 | struct drm_file *file) |
333 | { |
334 | struct drm_virtgpu_3d_wait *args = data; |
335 | struct drm_gem_object *obj; |
336 | long timeout = 15 * HZ; |
337 | int ret; |
338 | |
339 | obj = drm_gem_object_lookup(filp: file, handle: args->handle); |
340 | if (obj == NULL) |
341 | return -ENOENT; |
342 | |
343 | if (args->flags & VIRTGPU_WAIT_NOWAIT) { |
344 | ret = dma_resv_test_signaled(obj: obj->resv, usage: DMA_RESV_USAGE_READ); |
345 | } else { |
346 | ret = dma_resv_wait_timeout(obj: obj->resv, usage: DMA_RESV_USAGE_READ, |
347 | intr: true, timeout); |
348 | } |
349 | if (ret == 0) |
350 | ret = -EBUSY; |
351 | else if (ret > 0) |
352 | ret = 0; |
353 | |
354 | drm_gem_object_put(obj); |
355 | return ret; |
356 | } |
357 | |
358 | static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, |
359 | void *data, struct drm_file *file) |
360 | { |
361 | struct virtio_gpu_device *vgdev = dev->dev_private; |
362 | struct drm_virtgpu_get_caps *args = data; |
363 | unsigned size, host_caps_size; |
364 | int i; |
365 | int found_valid = -1; |
366 | int ret; |
367 | struct virtio_gpu_drv_cap_cache *cache_ent; |
368 | void *ptr; |
369 | |
370 | if (vgdev->num_capsets == 0) |
371 | return -ENOSYS; |
372 | |
373 | /* don't allow userspace to pass 0 */ |
374 | if (args->size == 0) |
375 | return -EINVAL; |
376 | |
377 | spin_lock(lock: &vgdev->display_info_lock); |
378 | for (i = 0; i < vgdev->num_capsets; i++) { |
379 | if (vgdev->capsets[i].id == args->cap_set_id) { |
380 | if (vgdev->capsets[i].max_version >= args->cap_set_ver) { |
381 | found_valid = i; |
382 | break; |
383 | } |
384 | } |
385 | } |
386 | |
387 | if (found_valid == -1) { |
388 | spin_unlock(lock: &vgdev->display_info_lock); |
389 | return -EINVAL; |
390 | } |
391 | |
392 | host_caps_size = vgdev->capsets[found_valid].max_size; |
393 | /* only copy to user the minimum of the host caps size or the guest caps size */ |
394 | size = min(args->size, host_caps_size); |
395 | |
396 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { |
397 | if (cache_ent->id == args->cap_set_id && |
398 | cache_ent->version == args->cap_set_ver) { |
399 | spin_unlock(lock: &vgdev->display_info_lock); |
400 | goto copy_exit; |
401 | } |
402 | } |
403 | spin_unlock(lock: &vgdev->display_info_lock); |
404 | |
405 | /* not in cache - need to talk to hw */ |
406 | ret = virtio_gpu_cmd_get_capset(vgdev, idx: found_valid, version: args->cap_set_ver, |
407 | cache_p: &cache_ent); |
408 | if (ret) |
409 | return ret; |
410 | virtio_gpu_notify(vgdev); |
411 | |
412 | copy_exit: |
413 | ret = wait_event_timeout(vgdev->resp_wq, |
414 | atomic_read(&cache_ent->is_valid), 5 * HZ); |
415 | if (!ret) |
416 | return -EBUSY; |
417 | |
418 | /* is_valid check must proceed before copy of the cache entry. */ |
419 | smp_rmb(); |
420 | |
421 | ptr = cache_ent->caps_cache; |
422 | |
423 | if (copy_to_user(u64_to_user_ptr(args->addr), from: ptr, n: size)) |
424 | return -EFAULT; |
425 | |
426 | return 0; |
427 | } |
428 | |
429 | static int verify_blob(struct virtio_gpu_device *vgdev, |
430 | struct virtio_gpu_fpriv *vfpriv, |
431 | struct virtio_gpu_object_params *params, |
432 | struct drm_virtgpu_resource_create_blob *rc_blob, |
433 | bool *guest_blob, bool *host3d_blob) |
434 | { |
435 | if (!vgdev->has_resource_blob) |
436 | return -EINVAL; |
437 | |
438 | if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) |
439 | return -EINVAL; |
440 | |
441 | if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { |
442 | if (!vgdev->has_resource_assign_uuid) |
443 | return -EINVAL; |
444 | } |
445 | |
446 | switch (rc_blob->blob_mem) { |
447 | case VIRTGPU_BLOB_MEM_GUEST: |
448 | *guest_blob = true; |
449 | break; |
450 | case VIRTGPU_BLOB_MEM_HOST3D_GUEST: |
451 | *guest_blob = true; |
452 | fallthrough; |
453 | case VIRTGPU_BLOB_MEM_HOST3D: |
454 | *host3d_blob = true; |
455 | break; |
456 | default: |
457 | return -EINVAL; |
458 | } |
459 | |
460 | if (*host3d_blob) { |
461 | if (!vgdev->has_virgl_3d) |
462 | return -EINVAL; |
463 | |
464 | /* Must be dword aligned. */ |
465 | if (rc_blob->cmd_size % 4 != 0) |
466 | return -EINVAL; |
467 | |
468 | params->ctx_id = vfpriv->ctx_id; |
469 | params->blob_id = rc_blob->blob_id; |
470 | } else { |
471 | if (rc_blob->blob_id != 0) |
472 | return -EINVAL; |
473 | |
474 | if (rc_blob->cmd_size != 0) |
475 | return -EINVAL; |
476 | } |
477 | |
478 | params->blob_mem = rc_blob->blob_mem; |
479 | params->size = rc_blob->size; |
480 | params->blob = true; |
481 | params->blob_flags = rc_blob->blob_flags; |
482 | return 0; |
483 | } |
484 | |
485 | static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev, |
486 | void *data, |
487 | struct drm_file *file) |
488 | { |
489 | int ret = 0; |
490 | uint32_t handle = 0; |
491 | bool guest_blob = false; |
492 | bool host3d_blob = false; |
493 | struct drm_gem_object *obj; |
494 | struct virtio_gpu_object *bo; |
495 | struct virtio_gpu_object_params params = { 0 }; |
496 | struct virtio_gpu_device *vgdev = dev->dev_private; |
497 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
498 | struct drm_virtgpu_resource_create_blob *rc_blob = data; |
499 | |
500 | if (verify_blob(vgdev, vfpriv, params: ¶ms, rc_blob, |
501 | guest_blob: &guest_blob, host3d_blob: &host3d_blob)) |
502 | return -EINVAL; |
503 | |
504 | if (vgdev->has_virgl_3d) |
505 | virtio_gpu_create_context(dev, file); |
506 | |
507 | if (rc_blob->cmd_size) { |
508 | void *buf; |
509 | |
510 | buf = memdup_user(u64_to_user_ptr(rc_blob->cmd), |
511 | rc_blob->cmd_size); |
512 | |
513 | if (IS_ERR(ptr: buf)) |
514 | return PTR_ERR(ptr: buf); |
515 | |
516 | virtio_gpu_cmd_submit(vgdev, data: buf, data_size: rc_blob->cmd_size, |
517 | ctx_id: vfpriv->ctx_id, NULL, NULL); |
518 | } |
519 | |
520 | if (guest_blob) |
521 | ret = virtio_gpu_object_create(vgdev, params: ¶ms, bo_ptr: &bo, NULL); |
522 | else if (!guest_blob && host3d_blob) |
523 | ret = virtio_gpu_vram_create(vgdev, params: ¶ms, bo_ptr: &bo); |
524 | else |
525 | return -EINVAL; |
526 | |
527 | if (ret < 0) |
528 | return ret; |
529 | |
530 | bo->guest_blob = guest_blob; |
531 | bo->host3d_blob = host3d_blob; |
532 | bo->blob_mem = rc_blob->blob_mem; |
533 | bo->blob_flags = rc_blob->blob_flags; |
534 | |
535 | obj = &bo->base.base; |
536 | if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { |
537 | ret = virtio_gpu_resource_assign_uuid(vgdev, bo); |
538 | if (ret) { |
539 | drm_gem_object_release(obj); |
540 | return ret; |
541 | } |
542 | } |
543 | |
544 | ret = drm_gem_handle_create(file_priv: file, obj, handlep: &handle); |
545 | if (ret) { |
546 | drm_gem_object_release(obj); |
547 | return ret; |
548 | } |
549 | |
550 | rc_blob->res_handle = bo->hw_res_handle; |
551 | rc_blob->bo_handle = handle; |
552 | |
553 | /* |
554 | * The handle owns the reference now. But we must drop our |
555 | * remaining reference *after* we no longer need to dereference |
556 | * the obj. Otherwise userspace could guess the handle and |
557 | * race closing it from another thread. |
558 | */ |
559 | drm_gem_object_put(obj); |
560 | |
561 | return 0; |
562 | } |
563 | |
564 | static int virtio_gpu_context_init_ioctl(struct drm_device *dev, |
565 | void *data, struct drm_file *file) |
566 | { |
567 | int ret = 0; |
568 | uint32_t num_params, i, param, value; |
569 | uint64_t valid_ring_mask; |
570 | size_t len; |
571 | struct drm_virtgpu_context_set_param *ctx_set_params = NULL; |
572 | struct virtio_gpu_device *vgdev = dev->dev_private; |
573 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
574 | struct drm_virtgpu_context_init *args = data; |
575 | |
576 | num_params = args->num_params; |
577 | len = num_params * sizeof(struct drm_virtgpu_context_set_param); |
578 | |
579 | if (!vgdev->has_context_init || !vgdev->has_virgl_3d) |
580 | return -EINVAL; |
581 | |
582 | /* Number of unique parameters supported at this time. */ |
583 | if (num_params > 3) |
584 | return -EINVAL; |
585 | |
586 | ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params), |
587 | len); |
588 | |
589 | if (IS_ERR(ptr: ctx_set_params)) |
590 | return PTR_ERR(ptr: ctx_set_params); |
591 | |
592 | mutex_lock(&vfpriv->context_lock); |
593 | if (vfpriv->context_created) { |
594 | ret = -EEXIST; |
595 | goto out_unlock; |
596 | } |
597 | |
598 | for (i = 0; i < num_params; i++) { |
599 | param = ctx_set_params[i].param; |
600 | value = ctx_set_params[i].value; |
601 | |
602 | switch (param) { |
603 | case VIRTGPU_CONTEXT_PARAM_CAPSET_ID: |
604 | if (value > MAX_CAPSET_ID) { |
605 | ret = -EINVAL; |
606 | goto out_unlock; |
607 | } |
608 | |
609 | if ((vgdev->capset_id_mask & (1ULL << value)) == 0) { |
610 | ret = -EINVAL; |
611 | goto out_unlock; |
612 | } |
613 | |
614 | /* Context capset ID already set */ |
615 | if (vfpriv->context_init & |
616 | VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) { |
617 | ret = -EINVAL; |
618 | goto out_unlock; |
619 | } |
620 | |
621 | vfpriv->context_init |= value; |
622 | break; |
623 | case VIRTGPU_CONTEXT_PARAM_NUM_RINGS: |
624 | if (vfpriv->base_fence_ctx) { |
625 | ret = -EINVAL; |
626 | goto out_unlock; |
627 | } |
628 | |
629 | if (value > MAX_RINGS) { |
630 | ret = -EINVAL; |
631 | goto out_unlock; |
632 | } |
633 | |
634 | vfpriv->base_fence_ctx = dma_fence_context_alloc(num: value); |
635 | vfpriv->num_rings = value; |
636 | break; |
637 | case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK: |
638 | if (vfpriv->ring_idx_mask) { |
639 | ret = -EINVAL; |
640 | goto out_unlock; |
641 | } |
642 | |
643 | vfpriv->ring_idx_mask = value; |
644 | break; |
645 | default: |
646 | ret = -EINVAL; |
647 | goto out_unlock; |
648 | } |
649 | } |
650 | |
651 | if (vfpriv->ring_idx_mask) { |
652 | valid_ring_mask = 0; |
653 | for (i = 0; i < vfpriv->num_rings; i++) |
654 | valid_ring_mask |= 1ULL << i; |
655 | |
656 | if (~valid_ring_mask & vfpriv->ring_idx_mask) { |
657 | ret = -EINVAL; |
658 | goto out_unlock; |
659 | } |
660 | } |
661 | |
662 | virtio_gpu_create_context_locked(vgdev, vfpriv); |
663 | virtio_gpu_notify(vgdev); |
664 | |
665 | out_unlock: |
666 | mutex_unlock(lock: &vfpriv->context_lock); |
667 | kfree(objp: ctx_set_params); |
668 | return ret; |
669 | } |
670 | |
671 | struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { |
672 | DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, |
673 | DRM_RENDER_ALLOW), |
674 | |
675 | DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl, |
676 | DRM_RENDER_ALLOW), |
677 | |
678 | DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl, |
679 | DRM_RENDER_ALLOW), |
680 | |
681 | DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE, |
682 | virtio_gpu_resource_create_ioctl, |
683 | DRM_RENDER_ALLOW), |
684 | |
685 | DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl, |
686 | DRM_RENDER_ALLOW), |
687 | |
688 | /* make transfer async to the main ring? - no sure, can we |
689 | * thread these in the underlying GL |
690 | */ |
691 | DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, |
692 | virtio_gpu_transfer_from_host_ioctl, |
693 | DRM_RENDER_ALLOW), |
694 | DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, |
695 | virtio_gpu_transfer_to_host_ioctl, |
696 | DRM_RENDER_ALLOW), |
697 | |
698 | DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl, |
699 | DRM_RENDER_ALLOW), |
700 | |
701 | DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, |
702 | DRM_RENDER_ALLOW), |
703 | |
704 | DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, |
705 | virtio_gpu_resource_create_blob_ioctl, |
706 | DRM_RENDER_ALLOW), |
707 | |
708 | DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl, |
709 | DRM_RENDER_ALLOW), |
710 | }; |
711 | |