1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * Authors: |
6 | * Dave Airlie <airlied@redhat.com> |
7 | * Gerd Hoffmann <kraxel@redhat.com> |
8 | * |
9 | * Permission is hereby granted, free of charge, to any person obtaining a |
10 | * copy of this software and associated documentation files (the "Software"), |
11 | * to deal in the Software without restriction, including without limitation |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
13 | * and/or sell copies of the Software, and to permit persons to whom the |
14 | * Software is furnished to do so, subject to the following conditions: |
15 | * |
16 | * The above copyright notice and this permission notice (including the next |
17 | * paragraph) shall be included in all copies or substantial portions of the |
18 | * Software. |
19 | * |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
26 | * OTHER DEALINGS IN THE SOFTWARE. |
27 | */ |
28 | |
29 | #include <linux/dma-mapping.h> |
30 | #include <linux/virtio.h> |
31 | #include <linux/virtio_config.h> |
32 | #include <linux/virtio_ring.h> |
33 | |
34 | #include <drm/drm_edid.h> |
35 | |
36 | #include "virtgpu_drv.h" |
37 | #include "virtgpu_trace.h" |
38 | |
39 | #define MAX_INLINE_CMD_SIZE 96 |
40 | #define MAX_INLINE_RESP_SIZE 24 |
41 | #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ |
42 | + MAX_INLINE_CMD_SIZE \ |
43 | + MAX_INLINE_RESP_SIZE) |
44 | |
45 | static void convert_to_hw_box(struct virtio_gpu_box *dst, |
46 | const struct drm_virtgpu_3d_box *src) |
47 | { |
48 | dst->x = cpu_to_le32(src->x); |
49 | dst->y = cpu_to_le32(src->y); |
50 | dst->z = cpu_to_le32(src->z); |
51 | dst->w = cpu_to_le32(src->w); |
52 | dst->h = cpu_to_le32(src->h); |
53 | dst->d = cpu_to_le32(src->d); |
54 | } |
55 | |
56 | void virtio_gpu_ctrl_ack(struct virtqueue *vq) |
57 | { |
58 | struct drm_device *dev = vq->vdev->priv; |
59 | struct virtio_gpu_device *vgdev = dev->dev_private; |
60 | |
61 | schedule_work(work: &vgdev->ctrlq.dequeue_work); |
62 | } |
63 | |
64 | void virtio_gpu_cursor_ack(struct virtqueue *vq) |
65 | { |
66 | struct drm_device *dev = vq->vdev->priv; |
67 | struct virtio_gpu_device *vgdev = dev->dev_private; |
68 | |
69 | schedule_work(work: &vgdev->cursorq.dequeue_work); |
70 | } |
71 | |
72 | int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) |
73 | { |
74 | vgdev->vbufs = kmem_cache_create(name: "virtio-gpu-vbufs" , |
75 | VBUFFER_SIZE, |
76 | align: __alignof__(struct virtio_gpu_vbuffer), |
77 | flags: 0, NULL); |
78 | if (!vgdev->vbufs) |
79 | return -ENOMEM; |
80 | return 0; |
81 | } |
82 | |
83 | void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) |
84 | { |
85 | kmem_cache_destroy(s: vgdev->vbufs); |
86 | vgdev->vbufs = NULL; |
87 | } |
88 | |
89 | static struct virtio_gpu_vbuffer* |
90 | virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, |
91 | int size, int resp_size, void *resp_buf, |
92 | virtio_gpu_resp_cb resp_cb) |
93 | { |
94 | struct virtio_gpu_vbuffer *vbuf; |
95 | |
96 | vbuf = kmem_cache_zalloc(k: vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL); |
97 | |
98 | BUG_ON(size > MAX_INLINE_CMD_SIZE || |
99 | size < sizeof(struct virtio_gpu_ctrl_hdr)); |
100 | vbuf->buf = (void *)vbuf + sizeof(*vbuf); |
101 | vbuf->size = size; |
102 | |
103 | vbuf->resp_cb = resp_cb; |
104 | vbuf->resp_size = resp_size; |
105 | if (resp_size <= MAX_INLINE_RESP_SIZE) |
106 | vbuf->resp_buf = (void *)vbuf->buf + size; |
107 | else |
108 | vbuf->resp_buf = resp_buf; |
109 | BUG_ON(!vbuf->resp_buf); |
110 | return vbuf; |
111 | } |
112 | |
113 | static struct virtio_gpu_ctrl_hdr * |
114 | virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) |
115 | { |
116 | /* this assumes a vbuf contains a command that starts with a |
117 | * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor |
118 | * virtqueues. |
119 | */ |
120 | return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; |
121 | } |
122 | |
123 | static struct virtio_gpu_update_cursor* |
124 | virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, |
125 | struct virtio_gpu_vbuffer **vbuffer_p) |
126 | { |
127 | struct virtio_gpu_vbuffer *vbuf; |
128 | |
129 | vbuf = virtio_gpu_get_vbuf |
130 | (vgdev, size: sizeof(struct virtio_gpu_update_cursor), |
131 | resp_size: 0, NULL, NULL); |
132 | if (IS_ERR(ptr: vbuf)) { |
133 | *vbuffer_p = NULL; |
134 | return ERR_CAST(ptr: vbuf); |
135 | } |
136 | *vbuffer_p = vbuf; |
137 | return (struct virtio_gpu_update_cursor *)vbuf->buf; |
138 | } |
139 | |
140 | static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, |
141 | virtio_gpu_resp_cb cb, |
142 | struct virtio_gpu_vbuffer **vbuffer_p, |
143 | int cmd_size, int resp_size, |
144 | void *resp_buf) |
145 | { |
146 | struct virtio_gpu_vbuffer *vbuf; |
147 | |
148 | vbuf = virtio_gpu_get_vbuf(vgdev, size: cmd_size, |
149 | resp_size, resp_buf, resp_cb: cb); |
150 | *vbuffer_p = vbuf; |
151 | return (struct virtio_gpu_command *)vbuf->buf; |
152 | } |
153 | |
154 | static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, |
155 | struct virtio_gpu_vbuffer **vbuffer_p, |
156 | int size) |
157 | { |
158 | return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, cmd_size: size, |
159 | resp_size: sizeof(struct virtio_gpu_ctrl_hdr), |
160 | NULL); |
161 | } |
162 | |
163 | static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, |
164 | struct virtio_gpu_vbuffer **vbuffer_p, |
165 | int size, |
166 | virtio_gpu_resp_cb cb) |
167 | { |
168 | return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, cmd_size: size, |
169 | resp_size: sizeof(struct virtio_gpu_ctrl_hdr), |
170 | NULL); |
171 | } |
172 | |
173 | static void free_vbuf(struct virtio_gpu_device *vgdev, |
174 | struct virtio_gpu_vbuffer *vbuf) |
175 | { |
176 | if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) |
177 | kfree(objp: vbuf->resp_buf); |
178 | kvfree(addr: vbuf->data_buf); |
179 | kmem_cache_free(s: vgdev->vbufs, objp: vbuf); |
180 | } |
181 | |
182 | static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) |
183 | { |
184 | struct virtio_gpu_vbuffer *vbuf; |
185 | unsigned int len; |
186 | int freed = 0; |
187 | |
188 | while ((vbuf = virtqueue_get_buf(vq, len: &len))) { |
189 | list_add_tail(new: &vbuf->list, head: reclaim_list); |
190 | freed++; |
191 | } |
192 | if (freed == 0) |
193 | DRM_DEBUG("Huh? zero vbufs reclaimed" ); |
194 | } |
195 | |
196 | void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) |
197 | { |
198 | struct virtio_gpu_device *vgdev = |
199 | container_of(work, struct virtio_gpu_device, |
200 | ctrlq.dequeue_work); |
201 | struct list_head reclaim_list; |
202 | struct virtio_gpu_vbuffer *entry, *tmp; |
203 | struct virtio_gpu_ctrl_hdr *resp; |
204 | u64 fence_id; |
205 | |
206 | INIT_LIST_HEAD(list: &reclaim_list); |
207 | spin_lock(lock: &vgdev->ctrlq.qlock); |
208 | do { |
209 | virtqueue_disable_cb(vq: vgdev->ctrlq.vq); |
210 | reclaim_vbufs(vq: vgdev->ctrlq.vq, reclaim_list: &reclaim_list); |
211 | |
212 | } while (!virtqueue_enable_cb(vq: vgdev->ctrlq.vq)); |
213 | spin_unlock(lock: &vgdev->ctrlq.qlock); |
214 | |
215 | list_for_each_entry(entry, &reclaim_list, list) { |
216 | resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; |
217 | |
218 | trace_virtio_gpu_cmd_response(vq: vgdev->ctrlq.vq, hdr: resp, seqno: entry->seqno); |
219 | |
220 | if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { |
221 | if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { |
222 | struct virtio_gpu_ctrl_hdr *cmd; |
223 | cmd = virtio_gpu_vbuf_ctrl_hdr(vbuf: entry); |
224 | DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n" , |
225 | le32_to_cpu(resp->type), |
226 | le32_to_cpu(cmd->type)); |
227 | } else |
228 | DRM_DEBUG("response 0x%x\n" , le32_to_cpu(resp->type)); |
229 | } |
230 | if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { |
231 | fence_id = le64_to_cpu(resp->fence_id); |
232 | virtio_gpu_fence_event_process(vdev: vgdev, fence_id); |
233 | } |
234 | if (entry->resp_cb) |
235 | entry->resp_cb(vgdev, entry); |
236 | } |
237 | wake_up(&vgdev->ctrlq.ack_queue); |
238 | |
239 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { |
240 | if (entry->objs) |
241 | virtio_gpu_array_put_free_delayed(vgdev, objs: entry->objs); |
242 | list_del(entry: &entry->list); |
243 | free_vbuf(vgdev, vbuf: entry); |
244 | } |
245 | } |
246 | |
247 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work) |
248 | { |
249 | struct virtio_gpu_device *vgdev = |
250 | container_of(work, struct virtio_gpu_device, |
251 | cursorq.dequeue_work); |
252 | struct list_head reclaim_list; |
253 | struct virtio_gpu_vbuffer *entry, *tmp; |
254 | |
255 | INIT_LIST_HEAD(list: &reclaim_list); |
256 | spin_lock(lock: &vgdev->cursorq.qlock); |
257 | do { |
258 | virtqueue_disable_cb(vq: vgdev->cursorq.vq); |
259 | reclaim_vbufs(vq: vgdev->cursorq.vq, reclaim_list: &reclaim_list); |
260 | } while (!virtqueue_enable_cb(vq: vgdev->cursorq.vq)); |
261 | spin_unlock(lock: &vgdev->cursorq.qlock); |
262 | |
263 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { |
264 | struct virtio_gpu_ctrl_hdr *resp = |
265 | (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; |
266 | |
267 | trace_virtio_gpu_cmd_response(vq: vgdev->cursorq.vq, hdr: resp, seqno: entry->seqno); |
268 | list_del(entry: &entry->list); |
269 | free_vbuf(vgdev, vbuf: entry); |
270 | } |
271 | wake_up(&vgdev->cursorq.ack_queue); |
272 | } |
273 | |
274 | /* Create sg_table from a vmalloc'd buffer. */ |
275 | static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) |
276 | { |
277 | int ret, s, i; |
278 | struct sg_table *sgt; |
279 | struct scatterlist *sg; |
280 | struct page *pg; |
281 | |
282 | if (WARN_ON(!PAGE_ALIGNED(data))) |
283 | return NULL; |
284 | |
285 | sgt = kmalloc(size: sizeof(*sgt), GFP_KERNEL); |
286 | if (!sgt) |
287 | return NULL; |
288 | |
289 | *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); |
290 | ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); |
291 | if (ret) { |
292 | kfree(objp: sgt); |
293 | return NULL; |
294 | } |
295 | |
296 | for_each_sgtable_sg(sgt, sg, i) { |
297 | pg = vmalloc_to_page(addr: data); |
298 | if (!pg) { |
299 | sg_free_table(sgt); |
300 | kfree(objp: sgt); |
301 | return NULL; |
302 | } |
303 | |
304 | s = min_t(int, PAGE_SIZE, size); |
305 | sg_set_page(sg, page: pg, len: s, offset: 0); |
306 | |
307 | size -= s; |
308 | data += s; |
309 | } |
310 | |
311 | return sgt; |
312 | } |
313 | |
314 | static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, |
315 | struct virtio_gpu_vbuffer *vbuf, |
316 | struct virtio_gpu_fence *fence, |
317 | int elemcnt, |
318 | struct scatterlist **sgs, |
319 | int outcnt, |
320 | int incnt) |
321 | { |
322 | struct virtqueue *vq = vgdev->ctrlq.vq; |
323 | int ret, idx; |
324 | |
325 | if (!drm_dev_enter(dev: vgdev->ddev, idx: &idx)) { |
326 | if (fence && vbuf->objs) |
327 | virtio_gpu_array_unlock_resv(objs: vbuf->objs); |
328 | free_vbuf(vgdev, vbuf); |
329 | return -ENODEV; |
330 | } |
331 | |
332 | if (vgdev->has_indirect) |
333 | elemcnt = 1; |
334 | |
335 | again: |
336 | spin_lock(lock: &vgdev->ctrlq.qlock); |
337 | |
338 | if (vq->num_free < elemcnt) { |
339 | spin_unlock(lock: &vgdev->ctrlq.qlock); |
340 | virtio_gpu_notify(vgdev); |
341 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); |
342 | goto again; |
343 | } |
344 | |
345 | /* now that the position of the vbuf in the virtqueue is known, we can |
346 | * finally set the fence id |
347 | */ |
348 | if (fence) { |
349 | virtio_gpu_fence_emit(vgdev, cmd_hdr: virtio_gpu_vbuf_ctrl_hdr(vbuf), |
350 | fence); |
351 | if (vbuf->objs) { |
352 | virtio_gpu_array_add_fence(objs: vbuf->objs, fence: &fence->f); |
353 | virtio_gpu_array_unlock_resv(objs: vbuf->objs); |
354 | } |
355 | } |
356 | |
357 | ret = virtqueue_add_sgs(vq, sgs, out_sgs: outcnt, in_sgs: incnt, data: vbuf, GFP_ATOMIC); |
358 | WARN_ON(ret); |
359 | |
360 | vbuf->seqno = ++vgdev->ctrlq.seqno; |
361 | trace_virtio_gpu_cmd_queue(vq, hdr: virtio_gpu_vbuf_ctrl_hdr(vbuf), seqno: vbuf->seqno); |
362 | |
363 | atomic_inc(v: &vgdev->pending_commands); |
364 | |
365 | spin_unlock(lock: &vgdev->ctrlq.qlock); |
366 | |
367 | drm_dev_exit(idx); |
368 | return 0; |
369 | } |
370 | |
371 | static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
372 | struct virtio_gpu_vbuffer *vbuf, |
373 | struct virtio_gpu_fence *fence) |
374 | { |
375 | struct scatterlist *sgs[3], vcmd, vout, vresp; |
376 | struct sg_table *sgt = NULL; |
377 | int elemcnt = 0, outcnt = 0, incnt = 0, ret; |
378 | |
379 | /* set up vcmd */ |
380 | sg_init_one(&vcmd, vbuf->buf, vbuf->size); |
381 | elemcnt++; |
382 | sgs[outcnt] = &vcmd; |
383 | outcnt++; |
384 | |
385 | /* set up vout */ |
386 | if (vbuf->data_size) { |
387 | if (is_vmalloc_addr(x: vbuf->data_buf)) { |
388 | int sg_ents; |
389 | sgt = vmalloc_to_sgt(data: vbuf->data_buf, size: vbuf->data_size, |
390 | sg_ents: &sg_ents); |
391 | if (!sgt) { |
392 | if (fence && vbuf->objs) |
393 | virtio_gpu_array_unlock_resv(objs: vbuf->objs); |
394 | return -ENOMEM; |
395 | } |
396 | |
397 | elemcnt += sg_ents; |
398 | sgs[outcnt] = sgt->sgl; |
399 | } else { |
400 | sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); |
401 | elemcnt++; |
402 | sgs[outcnt] = &vout; |
403 | } |
404 | outcnt++; |
405 | } |
406 | |
407 | /* set up vresp */ |
408 | if (vbuf->resp_size) { |
409 | sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); |
410 | elemcnt++; |
411 | sgs[outcnt + incnt] = &vresp; |
412 | incnt++; |
413 | } |
414 | |
415 | ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, |
416 | incnt); |
417 | |
418 | if (sgt) { |
419 | sg_free_table(sgt); |
420 | kfree(objp: sgt); |
421 | } |
422 | return ret; |
423 | } |
424 | |
425 | void virtio_gpu_notify(struct virtio_gpu_device *vgdev) |
426 | { |
427 | bool notify; |
428 | |
429 | if (!atomic_read(v: &vgdev->pending_commands)) |
430 | return; |
431 | |
432 | spin_lock(lock: &vgdev->ctrlq.qlock); |
433 | atomic_set(v: &vgdev->pending_commands, i: 0); |
434 | notify = virtqueue_kick_prepare(vq: vgdev->ctrlq.vq); |
435 | spin_unlock(lock: &vgdev->ctrlq.qlock); |
436 | |
437 | if (notify) |
438 | virtqueue_notify(vq: vgdev->ctrlq.vq); |
439 | } |
440 | |
441 | static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, |
442 | struct virtio_gpu_vbuffer *vbuf) |
443 | { |
444 | return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); |
445 | } |
446 | |
447 | static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
448 | struct virtio_gpu_vbuffer *vbuf) |
449 | { |
450 | struct virtqueue *vq = vgdev->cursorq.vq; |
451 | struct scatterlist *sgs[1], ccmd; |
452 | int idx, ret, outcnt; |
453 | bool notify; |
454 | |
455 | if (!drm_dev_enter(dev: vgdev->ddev, idx: &idx)) { |
456 | free_vbuf(vgdev, vbuf); |
457 | return; |
458 | } |
459 | |
460 | sg_init_one(&ccmd, vbuf->buf, vbuf->size); |
461 | sgs[0] = &ccmd; |
462 | outcnt = 1; |
463 | |
464 | spin_lock(lock: &vgdev->cursorq.qlock); |
465 | retry: |
466 | ret = virtqueue_add_sgs(vq, sgs, out_sgs: outcnt, in_sgs: 0, data: vbuf, GFP_ATOMIC); |
467 | if (ret == -ENOSPC) { |
468 | spin_unlock(lock: &vgdev->cursorq.qlock); |
469 | wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); |
470 | spin_lock(lock: &vgdev->cursorq.qlock); |
471 | goto retry; |
472 | } else { |
473 | vbuf->seqno = ++vgdev->cursorq.seqno; |
474 | trace_virtio_gpu_cmd_queue(vq, |
475 | hdr: virtio_gpu_vbuf_ctrl_hdr(vbuf), |
476 | seqno: vbuf->seqno); |
477 | |
478 | notify = virtqueue_kick_prepare(vq); |
479 | } |
480 | |
481 | spin_unlock(lock: &vgdev->cursorq.qlock); |
482 | |
483 | if (notify) |
484 | virtqueue_notify(vq); |
485 | |
486 | drm_dev_exit(idx); |
487 | } |
488 | |
489 | /* just create gem objects for userspace and long lived objects, |
490 | * just use dma_alloced pages for the queue objects? |
491 | */ |
492 | |
493 | /* create a basic resource */ |
494 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, |
495 | struct virtio_gpu_object *bo, |
496 | struct virtio_gpu_object_params *params, |
497 | struct virtio_gpu_object_array *objs, |
498 | struct virtio_gpu_fence *fence) |
499 | { |
500 | struct virtio_gpu_resource_create_2d *cmd_p; |
501 | struct virtio_gpu_vbuffer *vbuf; |
502 | |
503 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
504 | memset(cmd_p, 0, sizeof(*cmd_p)); |
505 | vbuf->objs = objs; |
506 | |
507 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); |
508 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
509 | cmd_p->format = cpu_to_le32(params->format); |
510 | cmd_p->width = cpu_to_le32(params->width); |
511 | cmd_p->height = cpu_to_le32(params->height); |
512 | |
513 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
514 | bo->created = true; |
515 | } |
516 | |
517 | static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, |
518 | struct virtio_gpu_vbuffer *vbuf) |
519 | { |
520 | struct virtio_gpu_object *bo; |
521 | |
522 | bo = vbuf->resp_cb_data; |
523 | vbuf->resp_cb_data = NULL; |
524 | |
525 | virtio_gpu_cleanup_object(bo); |
526 | } |
527 | |
528 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, |
529 | struct virtio_gpu_object *bo) |
530 | { |
531 | struct virtio_gpu_resource_unref *cmd_p; |
532 | struct virtio_gpu_vbuffer *vbuf; |
533 | int ret; |
534 | |
535 | cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p), |
536 | cb: virtio_gpu_cmd_unref_cb); |
537 | memset(cmd_p, 0, sizeof(*cmd_p)); |
538 | |
539 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); |
540 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
541 | |
542 | vbuf->resp_cb_data = bo; |
543 | ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
544 | if (ret < 0) |
545 | virtio_gpu_cleanup_object(bo); |
546 | } |
547 | |
548 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, |
549 | uint32_t scanout_id, uint32_t resource_id, |
550 | uint32_t width, uint32_t height, |
551 | uint32_t x, uint32_t y) |
552 | { |
553 | struct virtio_gpu_set_scanout *cmd_p; |
554 | struct virtio_gpu_vbuffer *vbuf; |
555 | |
556 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
557 | memset(cmd_p, 0, sizeof(*cmd_p)); |
558 | |
559 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); |
560 | cmd_p->resource_id = cpu_to_le32(resource_id); |
561 | cmd_p->scanout_id = cpu_to_le32(scanout_id); |
562 | cmd_p->r.width = cpu_to_le32(width); |
563 | cmd_p->r.height = cpu_to_le32(height); |
564 | cmd_p->r.x = cpu_to_le32(x); |
565 | cmd_p->r.y = cpu_to_le32(y); |
566 | |
567 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
568 | } |
569 | |
570 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, |
571 | uint32_t resource_id, |
572 | uint32_t x, uint32_t y, |
573 | uint32_t width, uint32_t height, |
574 | struct virtio_gpu_object_array *objs, |
575 | struct virtio_gpu_fence *fence) |
576 | { |
577 | struct virtio_gpu_resource_flush *cmd_p; |
578 | struct virtio_gpu_vbuffer *vbuf; |
579 | |
580 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
581 | memset(cmd_p, 0, sizeof(*cmd_p)); |
582 | vbuf->objs = objs; |
583 | |
584 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); |
585 | cmd_p->resource_id = cpu_to_le32(resource_id); |
586 | cmd_p->r.width = cpu_to_le32(width); |
587 | cmd_p->r.height = cpu_to_le32(height); |
588 | cmd_p->r.x = cpu_to_le32(x); |
589 | cmd_p->r.y = cpu_to_le32(y); |
590 | |
591 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
592 | } |
593 | |
594 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, |
595 | uint64_t offset, |
596 | uint32_t width, uint32_t height, |
597 | uint32_t x, uint32_t y, |
598 | struct virtio_gpu_object_array *objs, |
599 | struct virtio_gpu_fence *fence) |
600 | { |
601 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
602 | struct virtio_gpu_transfer_to_host_2d *cmd_p; |
603 | struct virtio_gpu_vbuffer *vbuf; |
604 | bool use_dma_api = !virtio_has_dma_quirk(vdev: vgdev->vdev); |
605 | |
606 | if (virtio_gpu_is_shmem(bo) && use_dma_api) |
607 | dma_sync_sgtable_for_device(dev: vgdev->vdev->dev.parent, |
608 | sgt: bo->base.sgt, dir: DMA_TO_DEVICE); |
609 | |
610 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
611 | memset(cmd_p, 0, sizeof(*cmd_p)); |
612 | vbuf->objs = objs; |
613 | |
614 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); |
615 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
616 | cmd_p->offset = cpu_to_le64(offset); |
617 | cmd_p->r.width = cpu_to_le32(width); |
618 | cmd_p->r.height = cpu_to_le32(height); |
619 | cmd_p->r.x = cpu_to_le32(x); |
620 | cmd_p->r.y = cpu_to_le32(y); |
621 | |
622 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
623 | } |
624 | |
625 | static void |
626 | virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, |
627 | uint32_t resource_id, |
628 | struct virtio_gpu_mem_entry *ents, |
629 | uint32_t nents, |
630 | struct virtio_gpu_fence *fence) |
631 | { |
632 | struct virtio_gpu_resource_attach_backing *cmd_p; |
633 | struct virtio_gpu_vbuffer *vbuf; |
634 | |
635 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
636 | memset(cmd_p, 0, sizeof(*cmd_p)); |
637 | |
638 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); |
639 | cmd_p->resource_id = cpu_to_le32(resource_id); |
640 | cmd_p->nr_entries = cpu_to_le32(nents); |
641 | |
642 | vbuf->data_buf = ents; |
643 | vbuf->data_size = sizeof(*ents) * nents; |
644 | |
645 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
646 | } |
647 | |
648 | static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, |
649 | struct virtio_gpu_vbuffer *vbuf) |
650 | { |
651 | struct virtio_gpu_resp_display_info *resp = |
652 | (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; |
653 | int i; |
654 | |
655 | spin_lock(lock: &vgdev->display_info_lock); |
656 | for (i = 0; i < vgdev->num_scanouts; i++) { |
657 | vgdev->outputs[i].info = resp->pmodes[i]; |
658 | if (resp->pmodes[i].enabled) { |
659 | DRM_DEBUG("output %d: %dx%d+%d+%d" , i, |
660 | le32_to_cpu(resp->pmodes[i].r.width), |
661 | le32_to_cpu(resp->pmodes[i].r.height), |
662 | le32_to_cpu(resp->pmodes[i].r.x), |
663 | le32_to_cpu(resp->pmodes[i].r.y)); |
664 | } else { |
665 | DRM_DEBUG("output %d: disabled" , i); |
666 | } |
667 | } |
668 | |
669 | vgdev->display_info_pending = false; |
670 | spin_unlock(lock: &vgdev->display_info_lock); |
671 | wake_up(&vgdev->resp_wq); |
672 | |
673 | if (!drm_helper_hpd_irq_event(dev: vgdev->ddev)) |
674 | drm_kms_helper_hotplug_event(dev: vgdev->ddev); |
675 | } |
676 | |
677 | static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, |
678 | struct virtio_gpu_vbuffer *vbuf) |
679 | { |
680 | struct virtio_gpu_get_capset_info *cmd = |
681 | (struct virtio_gpu_get_capset_info *)vbuf->buf; |
682 | struct virtio_gpu_resp_capset_info *resp = |
683 | (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; |
684 | int i = le32_to_cpu(cmd->capset_index); |
685 | |
686 | spin_lock(lock: &vgdev->display_info_lock); |
687 | if (vgdev->capsets) { |
688 | vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); |
689 | vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); |
690 | vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); |
691 | } else { |
692 | DRM_ERROR("invalid capset memory." ); |
693 | } |
694 | spin_unlock(lock: &vgdev->display_info_lock); |
695 | wake_up(&vgdev->resp_wq); |
696 | } |
697 | |
698 | static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, |
699 | struct virtio_gpu_vbuffer *vbuf) |
700 | { |
701 | struct virtio_gpu_get_capset *cmd = |
702 | (struct virtio_gpu_get_capset *)vbuf->buf; |
703 | struct virtio_gpu_resp_capset *resp = |
704 | (struct virtio_gpu_resp_capset *)vbuf->resp_buf; |
705 | struct virtio_gpu_drv_cap_cache *cache_ent; |
706 | |
707 | spin_lock(lock: &vgdev->display_info_lock); |
708 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { |
709 | if (cache_ent->version == le32_to_cpu(cmd->capset_version) && |
710 | cache_ent->id == le32_to_cpu(cmd->capset_id)) { |
711 | memcpy(cache_ent->caps_cache, resp->capset_data, |
712 | cache_ent->size); |
713 | /* Copy must occur before is_valid is signalled. */ |
714 | smp_wmb(); |
715 | atomic_set(v: &cache_ent->is_valid, i: 1); |
716 | break; |
717 | } |
718 | } |
719 | spin_unlock(lock: &vgdev->display_info_lock); |
720 | wake_up_all(&vgdev->resp_wq); |
721 | } |
722 | |
723 | static int virtio_get_edid_block(void *data, u8 *buf, |
724 | unsigned int block, size_t len) |
725 | { |
726 | struct virtio_gpu_resp_edid *resp = data; |
727 | size_t start = block * EDID_LENGTH; |
728 | |
729 | if (start + len > le32_to_cpu(resp->size)) |
730 | return -EINVAL; |
731 | memcpy(buf, resp->edid + start, len); |
732 | return 0; |
733 | } |
734 | |
735 | static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, |
736 | struct virtio_gpu_vbuffer *vbuf) |
737 | { |
738 | struct virtio_gpu_cmd_get_edid *cmd = |
739 | (struct virtio_gpu_cmd_get_edid *)vbuf->buf; |
740 | struct virtio_gpu_resp_edid *resp = |
741 | (struct virtio_gpu_resp_edid *)vbuf->resp_buf; |
742 | uint32_t scanout = le32_to_cpu(cmd->scanout); |
743 | struct virtio_gpu_output *output; |
744 | struct edid *new_edid, *old_edid; |
745 | |
746 | if (scanout >= vgdev->num_scanouts) |
747 | return; |
748 | output = vgdev->outputs + scanout; |
749 | |
750 | new_edid = drm_do_get_edid(connector: &output->conn, get_edid_block: virtio_get_edid_block, data: resp); |
751 | drm_connector_update_edid_property(connector: &output->conn, edid: new_edid); |
752 | |
753 | spin_lock(lock: &vgdev->display_info_lock); |
754 | old_edid = output->edid; |
755 | output->edid = new_edid; |
756 | spin_unlock(lock: &vgdev->display_info_lock); |
757 | |
758 | kfree(objp: old_edid); |
759 | wake_up(&vgdev->resp_wq); |
760 | } |
761 | |
762 | int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) |
763 | { |
764 | struct virtio_gpu_ctrl_hdr *cmd_p; |
765 | struct virtio_gpu_vbuffer *vbuf; |
766 | void *resp_buf; |
767 | |
768 | resp_buf = kzalloc(size: sizeof(struct virtio_gpu_resp_display_info), |
769 | GFP_KERNEL); |
770 | if (!resp_buf) |
771 | return -ENOMEM; |
772 | |
773 | cmd_p = virtio_gpu_alloc_cmd_resp |
774 | (vgdev, cb: &virtio_gpu_cmd_get_display_info_cb, vbuffer_p: &vbuf, |
775 | cmd_size: sizeof(*cmd_p), resp_size: sizeof(struct virtio_gpu_resp_display_info), |
776 | resp_buf); |
777 | memset(cmd_p, 0, sizeof(*cmd_p)); |
778 | |
779 | vgdev->display_info_pending = true; |
780 | cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); |
781 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
782 | return 0; |
783 | } |
784 | |
785 | int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) |
786 | { |
787 | struct virtio_gpu_get_capset_info *cmd_p; |
788 | struct virtio_gpu_vbuffer *vbuf; |
789 | void *resp_buf; |
790 | |
791 | resp_buf = kzalloc(size: sizeof(struct virtio_gpu_resp_capset_info), |
792 | GFP_KERNEL); |
793 | if (!resp_buf) |
794 | return -ENOMEM; |
795 | |
796 | cmd_p = virtio_gpu_alloc_cmd_resp |
797 | (vgdev, cb: &virtio_gpu_cmd_get_capset_info_cb, vbuffer_p: &vbuf, |
798 | cmd_size: sizeof(*cmd_p), resp_size: sizeof(struct virtio_gpu_resp_capset_info), |
799 | resp_buf); |
800 | memset(cmd_p, 0, sizeof(*cmd_p)); |
801 | |
802 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); |
803 | cmd_p->capset_index = cpu_to_le32(idx); |
804 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
805 | return 0; |
806 | } |
807 | |
808 | int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, |
809 | int idx, int version, |
810 | struct virtio_gpu_drv_cap_cache **cache_p) |
811 | { |
812 | struct virtio_gpu_get_capset *cmd_p; |
813 | struct virtio_gpu_vbuffer *vbuf; |
814 | int max_size; |
815 | struct virtio_gpu_drv_cap_cache *cache_ent; |
816 | struct virtio_gpu_drv_cap_cache *search_ent; |
817 | void *resp_buf; |
818 | |
819 | *cache_p = NULL; |
820 | |
821 | if (idx >= vgdev->num_capsets) |
822 | return -EINVAL; |
823 | |
824 | if (version > vgdev->capsets[idx].max_version) |
825 | return -EINVAL; |
826 | |
827 | cache_ent = kzalloc(size: sizeof(*cache_ent), GFP_KERNEL); |
828 | if (!cache_ent) |
829 | return -ENOMEM; |
830 | |
831 | max_size = vgdev->capsets[idx].max_size; |
832 | cache_ent->caps_cache = kmalloc(size: max_size, GFP_KERNEL); |
833 | if (!cache_ent->caps_cache) { |
834 | kfree(objp: cache_ent); |
835 | return -ENOMEM; |
836 | } |
837 | |
838 | resp_buf = kzalloc(size: sizeof(struct virtio_gpu_resp_capset) + max_size, |
839 | GFP_KERNEL); |
840 | if (!resp_buf) { |
841 | kfree(objp: cache_ent->caps_cache); |
842 | kfree(objp: cache_ent); |
843 | return -ENOMEM; |
844 | } |
845 | |
846 | cache_ent->version = version; |
847 | cache_ent->id = vgdev->capsets[idx].id; |
848 | atomic_set(v: &cache_ent->is_valid, i: 0); |
849 | cache_ent->size = max_size; |
850 | spin_lock(lock: &vgdev->display_info_lock); |
851 | /* Search while under lock in case it was added by another task. */ |
852 | list_for_each_entry(search_ent, &vgdev->cap_cache, head) { |
853 | if (search_ent->id == vgdev->capsets[idx].id && |
854 | search_ent->version == version) { |
855 | *cache_p = search_ent; |
856 | break; |
857 | } |
858 | } |
859 | if (!*cache_p) |
860 | list_add_tail(new: &cache_ent->head, head: &vgdev->cap_cache); |
861 | spin_unlock(lock: &vgdev->display_info_lock); |
862 | |
863 | if (*cache_p) { |
864 | /* Entry was found, so free everything that was just created. */ |
865 | kfree(objp: resp_buf); |
866 | kfree(objp: cache_ent->caps_cache); |
867 | kfree(objp: cache_ent); |
868 | return 0; |
869 | } |
870 | |
871 | cmd_p = virtio_gpu_alloc_cmd_resp |
872 | (vgdev, cb: &virtio_gpu_cmd_capset_cb, vbuffer_p: &vbuf, cmd_size: sizeof(*cmd_p), |
873 | resp_size: sizeof(struct virtio_gpu_resp_capset) + max_size, |
874 | resp_buf); |
875 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); |
876 | cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); |
877 | cmd_p->capset_version = cpu_to_le32(version); |
878 | *cache_p = cache_ent; |
879 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
880 | |
881 | return 0; |
882 | } |
883 | |
884 | int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) |
885 | { |
886 | struct virtio_gpu_cmd_get_edid *cmd_p; |
887 | struct virtio_gpu_vbuffer *vbuf; |
888 | void *resp_buf; |
889 | int scanout; |
890 | |
891 | if (WARN_ON(!vgdev->has_edid)) |
892 | return -EINVAL; |
893 | |
894 | for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { |
895 | resp_buf = kzalloc(size: sizeof(struct virtio_gpu_resp_edid), |
896 | GFP_KERNEL); |
897 | if (!resp_buf) |
898 | return -ENOMEM; |
899 | |
900 | cmd_p = virtio_gpu_alloc_cmd_resp |
901 | (vgdev, cb: &virtio_gpu_cmd_get_edid_cb, vbuffer_p: &vbuf, |
902 | cmd_size: sizeof(*cmd_p), resp_size: sizeof(struct virtio_gpu_resp_edid), |
903 | resp_buf); |
904 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); |
905 | cmd_p->scanout = cpu_to_le32(scanout); |
906 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
907 | } |
908 | |
909 | return 0; |
910 | } |
911 | |
912 | void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, |
913 | uint32_t context_init, uint32_t nlen, |
914 | const char *name) |
915 | { |
916 | struct virtio_gpu_ctx_create *cmd_p; |
917 | struct virtio_gpu_vbuffer *vbuf; |
918 | |
919 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
920 | memset(cmd_p, 0, sizeof(*cmd_p)); |
921 | |
922 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); |
923 | cmd_p->hdr.ctx_id = cpu_to_le32(id); |
924 | cmd_p->nlen = cpu_to_le32(nlen); |
925 | cmd_p->context_init = cpu_to_le32(context_init); |
926 | strscpy(p: cmd_p->debug_name, q: name, size: sizeof(cmd_p->debug_name)); |
927 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
928 | } |
929 | |
930 | void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, |
931 | uint32_t id) |
932 | { |
933 | struct virtio_gpu_ctx_destroy *cmd_p; |
934 | struct virtio_gpu_vbuffer *vbuf; |
935 | |
936 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
937 | memset(cmd_p, 0, sizeof(*cmd_p)); |
938 | |
939 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); |
940 | cmd_p->hdr.ctx_id = cpu_to_le32(id); |
941 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
942 | } |
943 | |
944 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, |
945 | uint32_t ctx_id, |
946 | struct virtio_gpu_object_array *objs) |
947 | { |
948 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
949 | struct virtio_gpu_ctx_resource *cmd_p; |
950 | struct virtio_gpu_vbuffer *vbuf; |
951 | |
952 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
953 | memset(cmd_p, 0, sizeof(*cmd_p)); |
954 | vbuf->objs = objs; |
955 | |
956 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); |
957 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
958 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
959 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
960 | } |
961 | |
962 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, |
963 | uint32_t ctx_id, |
964 | struct virtio_gpu_object_array *objs) |
965 | { |
966 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
967 | struct virtio_gpu_ctx_resource *cmd_p; |
968 | struct virtio_gpu_vbuffer *vbuf; |
969 | |
970 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
971 | memset(cmd_p, 0, sizeof(*cmd_p)); |
972 | vbuf->objs = objs; |
973 | |
974 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); |
975 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
976 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
977 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
978 | } |
979 | |
980 | void |
981 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, |
982 | struct virtio_gpu_object *bo, |
983 | struct virtio_gpu_object_params *params, |
984 | struct virtio_gpu_object_array *objs, |
985 | struct virtio_gpu_fence *fence) |
986 | { |
987 | struct virtio_gpu_resource_create_3d *cmd_p; |
988 | struct virtio_gpu_vbuffer *vbuf; |
989 | |
990 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
991 | memset(cmd_p, 0, sizeof(*cmd_p)); |
992 | vbuf->objs = objs; |
993 | |
994 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); |
995 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
996 | cmd_p->format = cpu_to_le32(params->format); |
997 | cmd_p->width = cpu_to_le32(params->width); |
998 | cmd_p->height = cpu_to_le32(params->height); |
999 | |
1000 | cmd_p->target = cpu_to_le32(params->target); |
1001 | cmd_p->bind = cpu_to_le32(params->bind); |
1002 | cmd_p->depth = cpu_to_le32(params->depth); |
1003 | cmd_p->array_size = cpu_to_le32(params->array_size); |
1004 | cmd_p->last_level = cpu_to_le32(params->last_level); |
1005 | cmd_p->nr_samples = cpu_to_le32(params->nr_samples); |
1006 | cmd_p->flags = cpu_to_le32(params->flags); |
1007 | |
1008 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
1009 | |
1010 | bo->created = true; |
1011 | } |
1012 | |
1013 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, |
1014 | uint32_t ctx_id, |
1015 | uint64_t offset, uint32_t level, |
1016 | uint32_t stride, |
1017 | uint32_t layer_stride, |
1018 | struct drm_virtgpu_3d_box *box, |
1019 | struct virtio_gpu_object_array *objs, |
1020 | struct virtio_gpu_fence *fence) |
1021 | { |
1022 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
1023 | struct virtio_gpu_transfer_host_3d *cmd_p; |
1024 | struct virtio_gpu_vbuffer *vbuf; |
1025 | bool use_dma_api = !virtio_has_dma_quirk(vdev: vgdev->vdev); |
1026 | |
1027 | if (virtio_gpu_is_shmem(bo) && use_dma_api) |
1028 | dma_sync_sgtable_for_device(dev: vgdev->vdev->dev.parent, |
1029 | sgt: bo->base.sgt, dir: DMA_TO_DEVICE); |
1030 | |
1031 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
1032 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1033 | |
1034 | vbuf->objs = objs; |
1035 | |
1036 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); |
1037 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
1038 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1039 | convert_to_hw_box(dst: &cmd_p->box, src: box); |
1040 | cmd_p->offset = cpu_to_le64(offset); |
1041 | cmd_p->level = cpu_to_le32(level); |
1042 | cmd_p->stride = cpu_to_le32(stride); |
1043 | cmd_p->layer_stride = cpu_to_le32(layer_stride); |
1044 | |
1045 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
1046 | } |
1047 | |
1048 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, |
1049 | uint32_t ctx_id, |
1050 | uint64_t offset, uint32_t level, |
1051 | uint32_t stride, |
1052 | uint32_t layer_stride, |
1053 | struct drm_virtgpu_3d_box *box, |
1054 | struct virtio_gpu_object_array *objs, |
1055 | struct virtio_gpu_fence *fence) |
1056 | { |
1057 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
1058 | struct virtio_gpu_transfer_host_3d *cmd_p; |
1059 | struct virtio_gpu_vbuffer *vbuf; |
1060 | |
1061 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
1062 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1063 | |
1064 | vbuf->objs = objs; |
1065 | |
1066 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); |
1067 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
1068 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1069 | convert_to_hw_box(dst: &cmd_p->box, src: box); |
1070 | cmd_p->offset = cpu_to_le64(offset); |
1071 | cmd_p->level = cpu_to_le32(level); |
1072 | cmd_p->stride = cpu_to_le32(stride); |
1073 | cmd_p->layer_stride = cpu_to_le32(layer_stride); |
1074 | |
1075 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
1076 | } |
1077 | |
1078 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, |
1079 | void *data, uint32_t data_size, |
1080 | uint32_t ctx_id, |
1081 | struct virtio_gpu_object_array *objs, |
1082 | struct virtio_gpu_fence *fence) |
1083 | { |
1084 | struct virtio_gpu_cmd_submit *cmd_p; |
1085 | struct virtio_gpu_vbuffer *vbuf; |
1086 | |
1087 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
1088 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1089 | |
1090 | vbuf->data_buf = data; |
1091 | vbuf->data_size = data_size; |
1092 | vbuf->objs = objs; |
1093 | |
1094 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); |
1095 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
1096 | cmd_p->size = cpu_to_le32(data_size); |
1097 | |
1098 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
1099 | } |
1100 | |
1101 | void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
1102 | struct virtio_gpu_object *obj, |
1103 | struct virtio_gpu_mem_entry *ents, |
1104 | unsigned int nents) |
1105 | { |
1106 | virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id: obj->hw_res_handle, |
1107 | ents, nents, NULL); |
1108 | } |
1109 | |
1110 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
1111 | struct virtio_gpu_output *output) |
1112 | { |
1113 | struct virtio_gpu_vbuffer *vbuf; |
1114 | struct virtio_gpu_update_cursor *cur_p; |
1115 | |
1116 | output->cursor.pos.scanout_id = cpu_to_le32(output->index); |
1117 | cur_p = virtio_gpu_alloc_cursor(vgdev, vbuffer_p: &vbuf); |
1118 | memcpy(cur_p, &output->cursor, sizeof(output->cursor)); |
1119 | virtio_gpu_queue_cursor(vgdev, vbuf); |
1120 | } |
1121 | |
1122 | static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev, |
1123 | struct virtio_gpu_vbuffer *vbuf) |
1124 | { |
1125 | struct virtio_gpu_object *obj = |
1126 | gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); |
1127 | struct virtio_gpu_resp_resource_uuid *resp = |
1128 | (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf; |
1129 | uint32_t resp_type = le32_to_cpu(resp->hdr.type); |
1130 | |
1131 | spin_lock(lock: &vgdev->resource_export_lock); |
1132 | WARN_ON(obj->uuid_state != STATE_INITIALIZING); |
1133 | |
1134 | if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID && |
1135 | obj->uuid_state == STATE_INITIALIZING) { |
1136 | import_uuid(dst: &obj->uuid, src: resp->uuid); |
1137 | obj->uuid_state = STATE_OK; |
1138 | } else { |
1139 | obj->uuid_state = STATE_ERR; |
1140 | } |
1141 | spin_unlock(lock: &vgdev->resource_export_lock); |
1142 | |
1143 | wake_up_all(&vgdev->resp_wq); |
1144 | } |
1145 | |
1146 | int |
1147 | virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, |
1148 | struct virtio_gpu_object_array *objs) |
1149 | { |
1150 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
1151 | struct virtio_gpu_resource_assign_uuid *cmd_p; |
1152 | struct virtio_gpu_vbuffer *vbuf; |
1153 | struct virtio_gpu_resp_resource_uuid *resp_buf; |
1154 | |
1155 | resp_buf = kzalloc(size: sizeof(*resp_buf), GFP_KERNEL); |
1156 | if (!resp_buf) { |
1157 | spin_lock(lock: &vgdev->resource_export_lock); |
1158 | bo->uuid_state = STATE_ERR; |
1159 | spin_unlock(lock: &vgdev->resource_export_lock); |
1160 | virtio_gpu_array_put_free(objs); |
1161 | return -ENOMEM; |
1162 | } |
1163 | |
1164 | cmd_p = virtio_gpu_alloc_cmd_resp |
1165 | (vgdev, cb: virtio_gpu_cmd_resource_uuid_cb, vbuffer_p: &vbuf, cmd_size: sizeof(*cmd_p), |
1166 | resp_size: sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf); |
1167 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1168 | |
1169 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID); |
1170 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1171 | |
1172 | vbuf->objs = objs; |
1173 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
1174 | return 0; |
1175 | } |
1176 | |
1177 | static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev, |
1178 | struct virtio_gpu_vbuffer *vbuf) |
1179 | { |
1180 | struct virtio_gpu_object *bo = |
1181 | gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); |
1182 | struct virtio_gpu_resp_map_info *resp = |
1183 | (struct virtio_gpu_resp_map_info *)vbuf->resp_buf; |
1184 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); |
1185 | uint32_t resp_type = le32_to_cpu(resp->hdr.type); |
1186 | |
1187 | spin_lock(lock: &vgdev->host_visible_lock); |
1188 | |
1189 | if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) { |
1190 | vram->map_info = resp->map_info; |
1191 | vram->map_state = STATE_OK; |
1192 | } else { |
1193 | vram->map_state = STATE_ERR; |
1194 | } |
1195 | |
1196 | spin_unlock(lock: &vgdev->host_visible_lock); |
1197 | wake_up_all(&vgdev->resp_wq); |
1198 | } |
1199 | |
1200 | int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, |
1201 | struct virtio_gpu_object_array *objs, uint64_t offset) |
1202 | { |
1203 | struct virtio_gpu_resource_map_blob *cmd_p; |
1204 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
1205 | struct virtio_gpu_vbuffer *vbuf; |
1206 | struct virtio_gpu_resp_map_info *resp_buf; |
1207 | |
1208 | resp_buf = kzalloc(size: sizeof(*resp_buf), GFP_KERNEL); |
1209 | if (!resp_buf) |
1210 | return -ENOMEM; |
1211 | |
1212 | cmd_p = virtio_gpu_alloc_cmd_resp |
1213 | (vgdev, cb: virtio_gpu_cmd_resource_map_cb, vbuffer_p: &vbuf, cmd_size: sizeof(*cmd_p), |
1214 | resp_size: sizeof(struct virtio_gpu_resp_map_info), resp_buf); |
1215 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1216 | |
1217 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB); |
1218 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1219 | cmd_p->offset = cpu_to_le64(offset); |
1220 | vbuf->objs = objs; |
1221 | |
1222 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
1223 | return 0; |
1224 | } |
1225 | |
1226 | void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, |
1227 | struct virtio_gpu_object *bo) |
1228 | { |
1229 | struct virtio_gpu_resource_unmap_blob *cmd_p; |
1230 | struct virtio_gpu_vbuffer *vbuf; |
1231 | |
1232 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
1233 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1234 | |
1235 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB); |
1236 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1237 | |
1238 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
1239 | } |
1240 | |
1241 | void |
1242 | virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, |
1243 | struct virtio_gpu_object *bo, |
1244 | struct virtio_gpu_object_params *params, |
1245 | struct virtio_gpu_mem_entry *ents, |
1246 | uint32_t nents) |
1247 | { |
1248 | struct virtio_gpu_resource_create_blob *cmd_p; |
1249 | struct virtio_gpu_vbuffer *vbuf; |
1250 | |
1251 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
1252 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1253 | |
1254 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB); |
1255 | cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id); |
1256 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1257 | cmd_p->blob_mem = cpu_to_le32(params->blob_mem); |
1258 | cmd_p->blob_flags = cpu_to_le32(params->blob_flags); |
1259 | cmd_p->blob_id = cpu_to_le64(params->blob_id); |
1260 | cmd_p->size = cpu_to_le64(params->size); |
1261 | cmd_p->nr_entries = cpu_to_le32(nents); |
1262 | |
1263 | vbuf->data_buf = ents; |
1264 | vbuf->data_size = sizeof(*ents) * nents; |
1265 | |
1266 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
1267 | bo->created = true; |
1268 | } |
1269 | |
1270 | void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, |
1271 | uint32_t scanout_id, |
1272 | struct virtio_gpu_object *bo, |
1273 | struct drm_framebuffer *fb, |
1274 | uint32_t width, uint32_t height, |
1275 | uint32_t x, uint32_t y) |
1276 | { |
1277 | uint32_t i; |
1278 | struct virtio_gpu_set_scanout_blob *cmd_p; |
1279 | struct virtio_gpu_vbuffer *vbuf; |
1280 | uint32_t format = virtio_gpu_translate_format(drm_fourcc: fb->format->format); |
1281 | |
1282 | cmd_p = virtio_gpu_alloc_cmd(vgdev, vbuffer_p: &vbuf, size: sizeof(*cmd_p)); |
1283 | memset(cmd_p, 0, sizeof(*cmd_p)); |
1284 | |
1285 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB); |
1286 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1287 | cmd_p->scanout_id = cpu_to_le32(scanout_id); |
1288 | |
1289 | cmd_p->format = cpu_to_le32(format); |
1290 | cmd_p->width = cpu_to_le32(fb->width); |
1291 | cmd_p->height = cpu_to_le32(fb->height); |
1292 | |
1293 | for (i = 0; i < 4; i++) { |
1294 | cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]); |
1295 | cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]); |
1296 | } |
1297 | |
1298 | cmd_p->r.width = cpu_to_le32(width); |
1299 | cmd_p->r.height = cpu_to_le32(height); |
1300 | cmd_p->r.x = cpu_to_le32(x); |
1301 | cmd_p->r.y = cpu_to_le32(y); |
1302 | |
1303 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
1304 | } |
1305 | |