1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining |
6 | * a copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sublicense, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice (including the |
14 | * next paragraph) shall be included in all copies or substantial |
15 | * portions of the Software. |
16 | * |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | */ |
25 | |
26 | #ifndef VIRTIO_DRV_H |
27 | #define VIRTIO_DRV_H |
28 | |
29 | #include <linux/dma-direction.h> |
30 | #include <linux/virtio.h> |
31 | #include <linux/virtio_ids.h> |
32 | #include <linux/virtio_config.h> |
33 | #include <linux/virtio_gpu.h> |
34 | |
35 | #include <drm/drm_atomic.h> |
36 | #include <drm/drm_drv.h> |
37 | #include <drm/drm_encoder.h> |
38 | #include <drm/drm_fourcc.h> |
39 | #include <drm/drm_framebuffer.h> |
40 | #include <drm/drm_gem.h> |
41 | #include <drm/drm_gem_shmem_helper.h> |
42 | #include <drm/drm_ioctl.h> |
43 | #include <drm/drm_probe_helper.h> |
44 | #include <drm/virtgpu_drm.h> |
45 | |
46 | #define DRIVER_NAME "virtio_gpu" |
47 | #define DRIVER_DESC "virtio GPU" |
48 | #define DRIVER_DATE "0" |
49 | |
50 | #define DRIVER_MAJOR 0 |
51 | #define DRIVER_MINOR 1 |
52 | #define DRIVER_PATCHLEVEL 0 |
53 | |
54 | #define STATE_INITIALIZING 0 |
55 | #define STATE_OK 1 |
56 | #define STATE_ERR 2 |
57 | |
58 | #define MAX_CAPSET_ID 63 |
59 | #define MAX_RINGS 64 |
60 | |
61 | struct virtio_gpu_object_params { |
62 | unsigned long size; |
63 | bool dumb; |
64 | /* 3d */ |
65 | bool virgl; |
66 | bool blob; |
67 | |
68 | /* classic resources only */ |
69 | uint32_t format; |
70 | uint32_t width; |
71 | uint32_t height; |
72 | uint32_t target; |
73 | uint32_t bind; |
74 | uint32_t depth; |
75 | uint32_t array_size; |
76 | uint32_t last_level; |
77 | uint32_t nr_samples; |
78 | uint32_t flags; |
79 | |
80 | /* blob resources only */ |
81 | uint32_t ctx_id; |
82 | uint32_t blob_mem; |
83 | uint32_t blob_flags; |
84 | uint64_t blob_id; |
85 | }; |
86 | |
87 | struct virtio_gpu_object { |
88 | struct drm_gem_shmem_object base; |
89 | uint32_t hw_res_handle; |
90 | bool dumb; |
91 | bool created; |
92 | bool host3d_blob, guest_blob; |
93 | uint32_t blob_mem, blob_flags; |
94 | |
95 | int uuid_state; |
96 | uuid_t uuid; |
97 | }; |
98 | #define gem_to_virtio_gpu_obj(gobj) \ |
99 | container_of((gobj), struct virtio_gpu_object, base.base) |
100 | |
101 | struct virtio_gpu_object_shmem { |
102 | struct virtio_gpu_object base; |
103 | }; |
104 | |
105 | struct virtio_gpu_object_vram { |
106 | struct virtio_gpu_object base; |
107 | uint32_t map_state; |
108 | uint32_t map_info; |
109 | struct drm_mm_node vram_node; |
110 | }; |
111 | |
112 | #define to_virtio_gpu_shmem(virtio_gpu_object) \ |
113 | container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base) |
114 | |
115 | #define to_virtio_gpu_vram(virtio_gpu_object) \ |
116 | container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base) |
117 | |
118 | struct virtio_gpu_object_array { |
119 | struct ww_acquire_ctx ticket; |
120 | struct list_head next; |
121 | u32 nents, total; |
122 | struct drm_gem_object *objs[] __counted_by(total); |
123 | }; |
124 | |
125 | struct virtio_gpu_vbuffer; |
126 | struct virtio_gpu_device; |
127 | |
128 | typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev, |
129 | struct virtio_gpu_vbuffer *vbuf); |
130 | |
131 | struct virtio_gpu_fence_driver { |
132 | atomic64_t last_fence_id; |
133 | uint64_t current_fence_id; |
134 | uint64_t context; |
135 | struct list_head fences; |
136 | spinlock_t lock; |
137 | }; |
138 | |
139 | struct virtio_gpu_fence_event { |
140 | struct drm_pending_event base; |
141 | struct drm_event event; |
142 | }; |
143 | |
144 | struct virtio_gpu_fence { |
145 | struct dma_fence f; |
146 | uint32_t ring_idx; |
147 | uint64_t fence_id; |
148 | bool emit_fence_info; |
149 | struct virtio_gpu_fence_event *e; |
150 | struct virtio_gpu_fence_driver *drv; |
151 | struct list_head node; |
152 | }; |
153 | |
154 | struct virtio_gpu_vbuffer { |
155 | char *buf; |
156 | int size; |
157 | |
158 | void *data_buf; |
159 | uint32_t data_size; |
160 | |
161 | char *resp_buf; |
162 | int resp_size; |
163 | virtio_gpu_resp_cb resp_cb; |
164 | void *resp_cb_data; |
165 | |
166 | struct virtio_gpu_object_array *objs; |
167 | struct list_head list; |
168 | |
169 | uint32_t seqno; |
170 | }; |
171 | |
172 | struct virtio_gpu_output { |
173 | int index; |
174 | struct drm_crtc crtc; |
175 | struct drm_connector conn; |
176 | struct drm_encoder enc; |
177 | struct virtio_gpu_display_one info; |
178 | struct virtio_gpu_update_cursor cursor; |
179 | struct edid *edid; |
180 | int cur_x; |
181 | int cur_y; |
182 | bool needs_modeset; |
183 | }; |
184 | #define drm_crtc_to_virtio_gpu_output(x) \ |
185 | container_of(x, struct virtio_gpu_output, crtc) |
186 | |
187 | struct virtio_gpu_framebuffer { |
188 | struct drm_framebuffer base; |
189 | struct virtio_gpu_fence *fence; |
190 | }; |
191 | #define to_virtio_gpu_framebuffer(x) \ |
192 | container_of(x, struct virtio_gpu_framebuffer, base) |
193 | |
194 | struct virtio_gpu_queue { |
195 | struct virtqueue *vq; |
196 | spinlock_t qlock; |
197 | wait_queue_head_t ack_queue; |
198 | struct work_struct dequeue_work; |
199 | uint32_t seqno; |
200 | }; |
201 | |
202 | struct virtio_gpu_drv_capset { |
203 | uint32_t id; |
204 | uint32_t max_version; |
205 | uint32_t max_size; |
206 | }; |
207 | |
208 | struct virtio_gpu_drv_cap_cache { |
209 | struct list_head head; |
210 | void *caps_cache; |
211 | uint32_t id; |
212 | uint32_t version; |
213 | uint32_t size; |
214 | atomic_t is_valid; |
215 | }; |
216 | |
217 | struct virtio_gpu_device { |
218 | struct drm_device *ddev; |
219 | |
220 | struct virtio_device *vdev; |
221 | |
222 | struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; |
223 | uint32_t num_scanouts; |
224 | |
225 | struct virtio_gpu_queue ctrlq; |
226 | struct virtio_gpu_queue cursorq; |
227 | struct kmem_cache *vbufs; |
228 | |
229 | atomic_t pending_commands; |
230 | |
231 | struct ida resource_ida; |
232 | |
233 | wait_queue_head_t resp_wq; |
234 | /* current display info */ |
235 | spinlock_t display_info_lock; |
236 | bool display_info_pending; |
237 | |
238 | struct virtio_gpu_fence_driver fence_drv; |
239 | |
240 | struct ida ctx_id_ida; |
241 | |
242 | bool has_virgl_3d; |
243 | bool has_edid; |
244 | bool has_indirect; |
245 | bool has_resource_assign_uuid; |
246 | bool has_resource_blob; |
247 | bool has_host_visible; |
248 | bool has_context_init; |
249 | struct virtio_shm_region host_visible_region; |
250 | struct drm_mm host_visible_mm; |
251 | |
252 | struct work_struct config_changed_work; |
253 | |
254 | struct work_struct obj_free_work; |
255 | spinlock_t obj_free_lock; |
256 | struct list_head obj_free_list; |
257 | |
258 | struct virtio_gpu_drv_capset *capsets; |
259 | uint32_t num_capsets; |
260 | uint64_t capset_id_mask; |
261 | struct list_head cap_cache; |
262 | |
263 | /* protects uuid state when exporting */ |
264 | spinlock_t resource_export_lock; |
265 | /* protects map state and host_visible_mm */ |
266 | spinlock_t host_visible_lock; |
267 | }; |
268 | |
269 | struct virtio_gpu_fpriv { |
270 | uint32_t ctx_id; |
271 | uint32_t context_init; |
272 | bool context_created; |
273 | uint32_t num_rings; |
274 | uint64_t base_fence_ctx; |
275 | uint64_t ring_idx_mask; |
276 | struct mutex context_lock; |
277 | }; |
278 | |
279 | /* virtgpu_ioctl.c */ |
280 | #define DRM_VIRTIO_NUM_IOCTLS 12 |
281 | extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; |
282 | void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); |
283 | |
284 | /* virtgpu_kms.c */ |
285 | int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev); |
286 | void virtio_gpu_deinit(struct drm_device *dev); |
287 | void virtio_gpu_release(struct drm_device *dev); |
288 | int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file); |
289 | void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); |
290 | |
291 | /* virtgpu_gem.c */ |
292 | int virtio_gpu_gem_object_open(struct drm_gem_object *obj, |
293 | struct drm_file *file); |
294 | void virtio_gpu_gem_object_close(struct drm_gem_object *obj, |
295 | struct drm_file *file); |
296 | int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, |
297 | struct drm_device *dev, |
298 | struct drm_mode_create_dumb *args); |
299 | int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, |
300 | struct drm_device *dev, |
301 | uint32_t handle, uint64_t *offset_p); |
302 | |
303 | struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); |
304 | struct virtio_gpu_object_array* |
305 | virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); |
306 | void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, |
307 | struct drm_gem_object *obj); |
308 | int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); |
309 | void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); |
310 | void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, |
311 | struct dma_fence *fence); |
312 | void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); |
313 | void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, |
314 | struct virtio_gpu_object_array *objs); |
315 | void virtio_gpu_array_put_free_work(struct work_struct *work); |
316 | |
317 | /* virtgpu_vq.c */ |
318 | int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); |
319 | void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); |
320 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, |
321 | struct virtio_gpu_object *bo, |
322 | struct virtio_gpu_object_params *params, |
323 | struct virtio_gpu_object_array *objs, |
324 | struct virtio_gpu_fence *fence); |
325 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, |
326 | struct virtio_gpu_object *bo); |
327 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, |
328 | uint64_t offset, |
329 | uint32_t width, uint32_t height, |
330 | uint32_t x, uint32_t y, |
331 | struct virtio_gpu_object_array *objs, |
332 | struct virtio_gpu_fence *fence); |
333 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, |
334 | uint32_t resource_id, |
335 | uint32_t x, uint32_t y, |
336 | uint32_t width, uint32_t height, |
337 | struct virtio_gpu_object_array *objs, |
338 | struct virtio_gpu_fence *fence); |
339 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, |
340 | uint32_t scanout_id, uint32_t resource_id, |
341 | uint32_t width, uint32_t height, |
342 | uint32_t x, uint32_t y); |
343 | void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
344 | struct virtio_gpu_object *obj, |
345 | struct virtio_gpu_mem_entry *ents, |
346 | unsigned int nents); |
347 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
348 | struct virtio_gpu_output *output); |
349 | int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev); |
350 | int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx); |
351 | int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, |
352 | int idx, int version, |
353 | struct virtio_gpu_drv_cap_cache **cache_p); |
354 | int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev); |
355 | void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, |
356 | uint32_t context_init, uint32_t nlen, |
357 | const char *name); |
358 | void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, |
359 | uint32_t id); |
360 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, |
361 | uint32_t ctx_id, |
362 | struct virtio_gpu_object_array *objs); |
363 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, |
364 | uint32_t ctx_id, |
365 | struct virtio_gpu_object_array *objs); |
366 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, |
367 | void *data, uint32_t data_size, |
368 | uint32_t ctx_id, |
369 | struct virtio_gpu_object_array *objs, |
370 | struct virtio_gpu_fence *fence); |
371 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, |
372 | uint32_t ctx_id, |
373 | uint64_t offset, uint32_t level, |
374 | uint32_t stride, |
375 | uint32_t layer_stride, |
376 | struct drm_virtgpu_3d_box *box, |
377 | struct virtio_gpu_object_array *objs, |
378 | struct virtio_gpu_fence *fence); |
379 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, |
380 | uint32_t ctx_id, |
381 | uint64_t offset, uint32_t level, |
382 | uint32_t stride, |
383 | uint32_t layer_stride, |
384 | struct drm_virtgpu_3d_box *box, |
385 | struct virtio_gpu_object_array *objs, |
386 | struct virtio_gpu_fence *fence); |
387 | void |
388 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, |
389 | struct virtio_gpu_object *bo, |
390 | struct virtio_gpu_object_params *params, |
391 | struct virtio_gpu_object_array *objs, |
392 | struct virtio_gpu_fence *fence); |
393 | void virtio_gpu_ctrl_ack(struct virtqueue *vq); |
394 | void virtio_gpu_cursor_ack(struct virtqueue *vq); |
395 | void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); |
396 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work); |
397 | void virtio_gpu_notify(struct virtio_gpu_device *vgdev); |
398 | |
399 | int |
400 | virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, |
401 | struct virtio_gpu_object_array *objs); |
402 | |
403 | int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, |
404 | struct virtio_gpu_object_array *objs, uint64_t offset); |
405 | |
406 | void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, |
407 | struct virtio_gpu_object *bo); |
408 | |
409 | void |
410 | virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, |
411 | struct virtio_gpu_object *bo, |
412 | struct virtio_gpu_object_params *params, |
413 | struct virtio_gpu_mem_entry *ents, |
414 | uint32_t nents); |
415 | void |
416 | virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, |
417 | uint32_t scanout_id, |
418 | struct virtio_gpu_object *bo, |
419 | struct drm_framebuffer *fb, |
420 | uint32_t width, uint32_t height, |
421 | uint32_t x, uint32_t y); |
422 | |
423 | /* virtgpu_display.c */ |
424 | int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); |
425 | void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); |
426 | |
427 | /* virtgpu_plane.c */ |
428 | uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc); |
429 | struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, |
430 | enum drm_plane_type type, |
431 | int index); |
432 | |
433 | /* virtgpu_fence.c */ |
434 | struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, |
435 | uint64_t base_fence_ctx, |
436 | uint32_t ring_idx); |
437 | void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, |
438 | struct virtio_gpu_ctrl_hdr *cmd_hdr, |
439 | struct virtio_gpu_fence *fence); |
440 | void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, |
441 | u64 fence_id); |
442 | |
443 | /* virtgpu_object.c */ |
444 | void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo); |
445 | struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, |
446 | size_t size); |
447 | int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, |
448 | struct virtio_gpu_object_params *params, |
449 | struct virtio_gpu_object **bo_ptr, |
450 | struct virtio_gpu_fence *fence); |
451 | |
452 | bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo); |
453 | |
454 | int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, |
455 | uint32_t *resid); |
456 | /* virtgpu_prime.c */ |
457 | int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, |
458 | struct virtio_gpu_object *bo); |
459 | struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, |
460 | int flags); |
461 | struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, |
462 | struct dma_buf *buf); |
463 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( |
464 | struct drm_device *dev, struct dma_buf_attachment *attach, |
465 | struct sg_table *sgt); |
466 | |
467 | /* virtgpu_debugfs.c */ |
468 | void virtio_gpu_debugfs_init(struct drm_minor *minor); |
469 | |
470 | /* virtgpu_vram.c */ |
471 | bool virtio_gpu_is_vram(struct virtio_gpu_object *bo); |
472 | int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, |
473 | struct virtio_gpu_object_params *params, |
474 | struct virtio_gpu_object **bo_ptr); |
475 | struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, |
476 | struct device *dev, |
477 | enum dma_data_direction dir); |
478 | void virtio_gpu_vram_unmap_dma_buf(struct device *dev, |
479 | struct sg_table *sgt, |
480 | enum dma_data_direction dir); |
481 | |
482 | /* virtgpu_submit.c */ |
483 | int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, |
484 | struct drm_file *file); |
485 | |
486 | #endif |
487 | |