1 | /* |
2 | * Copyright 2017 Intel Corporation. All rights reserved. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Zhiyuan Lv <zhiyuan.lv@intel.com> |
25 | * |
26 | * Contributors: |
27 | * Xiaoguang Chen |
28 | * Tina Zhang <tina.zhang@intel.com> |
29 | */ |
30 | |
31 | #include <linux/dma-buf.h> |
32 | #include <linux/mdev.h> |
33 | |
34 | #include <drm/drm_fourcc.h> |
35 | #include <drm/drm_plane.h> |
36 | |
37 | #include "gem/i915_gem_dmabuf.h" |
38 | |
39 | #include "i915_drv.h" |
40 | #include "i915_reg.h" |
41 | #include "gvt.h" |
42 | |
43 | #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) |
44 | |
45 | static int vgpu_gem_get_pages(struct drm_i915_gem_object *obj) |
46 | { |
47 | struct drm_i915_private *dev_priv = to_i915(dev: obj->base.dev); |
48 | struct intel_vgpu *vgpu; |
49 | struct sg_table *st; |
50 | struct scatterlist *sg; |
51 | int i, j, ret; |
52 | gen8_pte_t __iomem *gtt_entries; |
53 | struct intel_vgpu_fb_info *fb_info; |
54 | unsigned int page_num; /* limited by sg_alloc_table */ |
55 | |
56 | if (overflows_type(obj->base.size >> PAGE_SHIFT, page_num)) |
57 | return -E2BIG; |
58 | |
59 | page_num = obj->base.size >> PAGE_SHIFT; |
60 | fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; |
61 | if (drm_WARN_ON(&dev_priv->drm, !fb_info)) |
62 | return -ENODEV; |
63 | |
64 | vgpu = fb_info->obj->vgpu; |
65 | if (drm_WARN_ON(&dev_priv->drm, !vgpu)) |
66 | return -ENODEV; |
67 | |
68 | st = kmalloc(size: sizeof(*st), GFP_KERNEL); |
69 | if (unlikely(!st)) |
70 | return -ENOMEM; |
71 | |
72 | ret = sg_alloc_table(st, page_num, GFP_KERNEL); |
73 | if (ret) { |
74 | kfree(objp: st); |
75 | return ret; |
76 | } |
77 | gtt_entries = (gen8_pte_t __iomem *)to_gt(i915: dev_priv)->ggtt->gsm + |
78 | (fb_info->start >> PAGE_SHIFT); |
79 | for_each_sg(st->sgl, sg, page_num, i) { |
80 | dma_addr_t dma_addr = |
81 | GEN8_DECODE_PTE(readq(>t_entries[i])); |
82 | if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { |
83 | ret = -EINVAL; |
84 | goto out; |
85 | } |
86 | |
87 | sg->offset = 0; |
88 | sg->length = PAGE_SIZE; |
89 | sg_dma_len(sg) = PAGE_SIZE; |
90 | sg_dma_address(sg) = dma_addr; |
91 | } |
92 | |
93 | __i915_gem_object_set_pages(obj, pages: st); |
94 | out: |
95 | if (ret) { |
96 | dma_addr_t dma_addr; |
97 | |
98 | for_each_sg(st->sgl, sg, i, j) { |
99 | dma_addr = sg_dma_address(sg); |
100 | if (dma_addr) |
101 | intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); |
102 | } |
103 | sg_free_table(st); |
104 | kfree(objp: st); |
105 | } |
106 | |
107 | return ret; |
108 | |
109 | } |
110 | |
111 | static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, |
112 | struct sg_table *pages) |
113 | { |
114 | struct scatterlist *sg; |
115 | |
116 | if (obj->base.dma_buf) { |
117 | struct intel_vgpu_fb_info *fb_info = obj->gvt_info; |
118 | struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; |
119 | struct intel_vgpu *vgpu = obj->vgpu; |
120 | int i; |
121 | |
122 | for_each_sg(pages->sgl, sg, fb_info->size, i) |
123 | intel_gvt_dma_unmap_guest_page(vgpu, |
124 | sg_dma_address(sg)); |
125 | } |
126 | |
127 | sg_free_table(pages); |
128 | kfree(objp: pages); |
129 | } |
130 | |
131 | static void dmabuf_gem_object_free(struct kref *kref) |
132 | { |
133 | struct intel_vgpu_dmabuf_obj *obj = |
134 | container_of(kref, struct intel_vgpu_dmabuf_obj, kref); |
135 | struct intel_vgpu *vgpu = obj->vgpu; |
136 | struct list_head *pos; |
137 | struct intel_vgpu_dmabuf_obj *dmabuf_obj; |
138 | |
139 | if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) && |
140 | !list_empty(head: &vgpu->dmabuf_obj_list_head)) { |
141 | list_for_each(pos, &vgpu->dmabuf_obj_list_head) { |
142 | dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); |
143 | if (dmabuf_obj == obj) { |
144 | list_del(entry: pos); |
145 | idr_remove(&vgpu->object_idr, |
146 | id: dmabuf_obj->dmabuf_id); |
147 | kfree(objp: dmabuf_obj->info); |
148 | kfree(objp: dmabuf_obj); |
149 | break; |
150 | } |
151 | } |
152 | } else { |
153 | /* Free the orphan dmabuf_objs here */ |
154 | kfree(objp: obj->info); |
155 | kfree(objp: obj); |
156 | } |
157 | } |
158 | |
159 | |
160 | static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj) |
161 | { |
162 | kref_get(kref: &obj->kref); |
163 | } |
164 | |
165 | static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj) |
166 | { |
167 | kref_put(kref: &obj->kref, release: dmabuf_gem_object_free); |
168 | } |
169 | |
170 | static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj) |
171 | { |
172 | |
173 | struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info; |
174 | struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; |
175 | struct intel_vgpu *vgpu = obj->vgpu; |
176 | |
177 | if (vgpu) { |
178 | mutex_lock(&vgpu->dmabuf_lock); |
179 | gem_obj->base.dma_buf = NULL; |
180 | dmabuf_obj_put(obj); |
181 | mutex_unlock(lock: &vgpu->dmabuf_lock); |
182 | } else { |
183 | /* vgpu is NULL, as it has been removed already */ |
184 | gem_obj->base.dma_buf = NULL; |
185 | dmabuf_obj_put(obj); |
186 | } |
187 | } |
188 | |
189 | static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { |
190 | .name = "i915_gem_object_vgpu" , |
191 | .flags = I915_GEM_OBJECT_IS_PROXY, |
192 | .get_pages = vgpu_gem_get_pages, |
193 | .put_pages = vgpu_gem_put_pages, |
194 | .release = vgpu_gem_release, |
195 | }; |
196 | |
197 | static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, |
198 | struct intel_vgpu_fb_info *info) |
199 | { |
200 | static struct lock_class_key lock_class; |
201 | struct drm_i915_private *dev_priv = to_i915(dev); |
202 | struct drm_i915_gem_object *obj; |
203 | |
204 | obj = i915_gem_object_alloc(); |
205 | if (obj == NULL) |
206 | return NULL; |
207 | |
208 | drm_gem_private_object_init(dev, obj: &obj->base, |
209 | roundup(info->size, PAGE_SIZE)); |
210 | i915_gem_object_init(obj, ops: &intel_vgpu_gem_ops, key: &lock_class, alloc_flags: 0); |
211 | i915_gem_object_set_readonly(obj); |
212 | |
213 | obj->read_domains = I915_GEM_DOMAIN_GTT; |
214 | obj->write_domain = 0; |
215 | if (GRAPHICS_VER(dev_priv) >= 9) { |
216 | unsigned int tiling_mode = 0; |
217 | unsigned int stride = 0; |
218 | |
219 | switch (info->drm_format_mod) { |
220 | case DRM_FORMAT_MOD_LINEAR: |
221 | tiling_mode = I915_TILING_NONE; |
222 | break; |
223 | case I915_FORMAT_MOD_X_TILED: |
224 | tiling_mode = I915_TILING_X; |
225 | stride = info->stride; |
226 | break; |
227 | case I915_FORMAT_MOD_Y_TILED: |
228 | case I915_FORMAT_MOD_Yf_TILED: |
229 | tiling_mode = I915_TILING_Y; |
230 | stride = info->stride; |
231 | break; |
232 | default: |
233 | gvt_dbg_core("invalid drm_format_mod %llx for tiling\n" , |
234 | info->drm_format_mod); |
235 | } |
236 | obj->tiling_and_stride = tiling_mode | stride; |
237 | } else { |
238 | obj->tiling_and_stride = info->drm_format_mod ? |
239 | I915_TILING_X : 0; |
240 | } |
241 | |
242 | return obj; |
243 | } |
244 | |
245 | static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c) |
246 | { |
247 | if (c && c->x_hot <= c->width && c->y_hot <= c->height) |
248 | return true; |
249 | else |
250 | return false; |
251 | } |
252 | |
253 | static int vgpu_get_plane_info(struct drm_device *dev, |
254 | struct intel_vgpu *vgpu, |
255 | struct intel_vgpu_fb_info *info, |
256 | int plane_id) |
257 | { |
258 | struct intel_vgpu_primary_plane_format p; |
259 | struct intel_vgpu_cursor_plane_format c; |
260 | int ret, tile_height = 1; |
261 | |
262 | memset(info, 0, sizeof(*info)); |
263 | |
264 | if (plane_id == DRM_PLANE_TYPE_PRIMARY) { |
265 | ret = intel_vgpu_decode_primary_plane(vgpu, plane: &p); |
266 | if (ret) |
267 | return ret; |
268 | info->start = p.base; |
269 | info->start_gpa = p.base_gpa; |
270 | info->width = p.width; |
271 | info->height = p.height; |
272 | info->stride = p.stride; |
273 | info->drm_format = p.drm_format; |
274 | |
275 | switch (p.tiled) { |
276 | case PLANE_CTL_TILED_LINEAR: |
277 | info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; |
278 | break; |
279 | case PLANE_CTL_TILED_X: |
280 | info->drm_format_mod = I915_FORMAT_MOD_X_TILED; |
281 | tile_height = 8; |
282 | break; |
283 | case PLANE_CTL_TILED_Y: |
284 | info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; |
285 | tile_height = 32; |
286 | break; |
287 | case PLANE_CTL_TILED_YF: |
288 | info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; |
289 | tile_height = 32; |
290 | break; |
291 | default: |
292 | gvt_vgpu_err("invalid tiling mode: %x\n" , p.tiled); |
293 | } |
294 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { |
295 | ret = intel_vgpu_decode_cursor_plane(vgpu, plane: &c); |
296 | if (ret) |
297 | return ret; |
298 | info->start = c.base; |
299 | info->start_gpa = c.base_gpa; |
300 | info->width = c.width; |
301 | info->height = c.height; |
302 | info->stride = c.width * (c.bpp / 8); |
303 | info->drm_format = c.drm_format; |
304 | info->drm_format_mod = 0; |
305 | info->x_pos = c.x_pos; |
306 | info->y_pos = c.y_pos; |
307 | |
308 | if (validate_hotspot(c: &c)) { |
309 | info->x_hot = c.x_hot; |
310 | info->y_hot = c.y_hot; |
311 | } else { |
312 | info->x_hot = UINT_MAX; |
313 | info->y_hot = UINT_MAX; |
314 | } |
315 | } else { |
316 | gvt_vgpu_err("invalid plane id:%d\n" , plane_id); |
317 | return -EINVAL; |
318 | } |
319 | |
320 | info->size = info->stride * roundup(info->height, tile_height); |
321 | if (info->size == 0) { |
322 | gvt_vgpu_err("fb size is zero\n" ); |
323 | return -EINVAL; |
324 | } |
325 | |
326 | if (info->start & (PAGE_SIZE - 1)) { |
327 | gvt_vgpu_err("Not aligned fb address:0x%llx\n" , info->start); |
328 | return -EFAULT; |
329 | } |
330 | |
331 | if (!intel_gvt_ggtt_validate_range(vgpu, addr: info->start, size: info->size)) { |
332 | gvt_vgpu_err("invalid gma addr\n" ); |
333 | return -EFAULT; |
334 | } |
335 | |
336 | return 0; |
337 | } |
338 | |
339 | static struct intel_vgpu_dmabuf_obj * |
340 | pick_dmabuf_by_info(struct intel_vgpu *vgpu, |
341 | struct intel_vgpu_fb_info *latest_info) |
342 | { |
343 | struct list_head *pos; |
344 | struct intel_vgpu_fb_info *fb_info; |
345 | struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; |
346 | struct intel_vgpu_dmabuf_obj *ret = NULL; |
347 | |
348 | list_for_each(pos, &vgpu->dmabuf_obj_list_head) { |
349 | dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); |
350 | if (!dmabuf_obj->info) |
351 | continue; |
352 | |
353 | fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info; |
354 | if ((fb_info->start == latest_info->start) && |
355 | (fb_info->start_gpa == latest_info->start_gpa) && |
356 | (fb_info->size == latest_info->size) && |
357 | (fb_info->drm_format_mod == latest_info->drm_format_mod) && |
358 | (fb_info->drm_format == latest_info->drm_format) && |
359 | (fb_info->width == latest_info->width) && |
360 | (fb_info->height == latest_info->height)) { |
361 | ret = dmabuf_obj; |
362 | break; |
363 | } |
364 | } |
365 | |
366 | return ret; |
367 | } |
368 | |
369 | static struct intel_vgpu_dmabuf_obj * |
370 | pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id) |
371 | { |
372 | struct list_head *pos; |
373 | struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; |
374 | struct intel_vgpu_dmabuf_obj *ret = NULL; |
375 | |
376 | list_for_each(pos, &vgpu->dmabuf_obj_list_head) { |
377 | dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); |
378 | if (dmabuf_obj->dmabuf_id == id) { |
379 | ret = dmabuf_obj; |
380 | break; |
381 | } |
382 | } |
383 | |
384 | return ret; |
385 | } |
386 | |
387 | static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, |
388 | struct intel_vgpu_fb_info *fb_info) |
389 | { |
390 | gvt_dmabuf->drm_format = fb_info->drm_format; |
391 | gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; |
392 | gvt_dmabuf->width = fb_info->width; |
393 | gvt_dmabuf->height = fb_info->height; |
394 | gvt_dmabuf->stride = fb_info->stride; |
395 | gvt_dmabuf->size = fb_info->size; |
396 | gvt_dmabuf->x_pos = fb_info->x_pos; |
397 | gvt_dmabuf->y_pos = fb_info->y_pos; |
398 | gvt_dmabuf->x_hot = fb_info->x_hot; |
399 | gvt_dmabuf->y_hot = fb_info->y_hot; |
400 | } |
401 | |
402 | int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) |
403 | { |
404 | struct drm_device *dev = &vgpu->gvt->gt->i915->drm; |
405 | struct vfio_device_gfx_plane_info *gfx_plane_info = args; |
406 | struct intel_vgpu_dmabuf_obj *dmabuf_obj; |
407 | struct intel_vgpu_fb_info fb_info; |
408 | int ret = 0; |
409 | |
410 | if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF | |
411 | VFIO_GFX_PLANE_TYPE_PROBE)) |
412 | return ret; |
413 | else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) || |
414 | (!gfx_plane_info->flags)) |
415 | return -EINVAL; |
416 | |
417 | ret = vgpu_get_plane_info(dev, vgpu, info: &fb_info, |
418 | plane_id: gfx_plane_info->drm_plane_type); |
419 | if (ret != 0) |
420 | goto out; |
421 | |
422 | mutex_lock(&vgpu->dmabuf_lock); |
423 | /* If exists, pick up the exposed dmabuf_obj */ |
424 | dmabuf_obj = pick_dmabuf_by_info(vgpu, latest_info: &fb_info); |
425 | if (dmabuf_obj) { |
426 | update_fb_info(gvt_dmabuf: gfx_plane_info, fb_info: &fb_info); |
427 | gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id; |
428 | |
429 | /* This buffer may be released between query_plane ioctl and |
430 | * get_dmabuf ioctl. Add the refcount to make sure it won't |
431 | * be released between the two ioctls. |
432 | */ |
433 | if (!dmabuf_obj->initref) { |
434 | dmabuf_obj->initref = true; |
435 | dmabuf_obj_get(obj: dmabuf_obj); |
436 | } |
437 | ret = 0; |
438 | gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n" , |
439 | vgpu->id, kref_read(&dmabuf_obj->kref), |
440 | gfx_plane_info->dmabuf_id); |
441 | mutex_unlock(lock: &vgpu->dmabuf_lock); |
442 | goto out; |
443 | } |
444 | |
445 | mutex_unlock(lock: &vgpu->dmabuf_lock); |
446 | |
447 | /* Need to allocate a new one*/ |
448 | dmabuf_obj = kmalloc(size: sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL); |
449 | if (unlikely(!dmabuf_obj)) { |
450 | gvt_vgpu_err("alloc dmabuf_obj failed\n" ); |
451 | ret = -ENOMEM; |
452 | goto out; |
453 | } |
454 | |
455 | dmabuf_obj->info = kmalloc(size: sizeof(struct intel_vgpu_fb_info), |
456 | GFP_KERNEL); |
457 | if (unlikely(!dmabuf_obj->info)) { |
458 | gvt_vgpu_err("allocate intel vgpu fb info failed\n" ); |
459 | ret = -ENOMEM; |
460 | goto out_free_dmabuf; |
461 | } |
462 | memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info)); |
463 | |
464 | ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj; |
465 | |
466 | dmabuf_obj->vgpu = vgpu; |
467 | |
468 | ret = idr_alloc(&vgpu->object_idr, ptr: dmabuf_obj, start: 1, end: 0, GFP_NOWAIT); |
469 | if (ret < 0) |
470 | goto out_free_info; |
471 | gfx_plane_info->dmabuf_id = ret; |
472 | dmabuf_obj->dmabuf_id = ret; |
473 | |
474 | dmabuf_obj->initref = true; |
475 | |
476 | kref_init(kref: &dmabuf_obj->kref); |
477 | |
478 | update_fb_info(gvt_dmabuf: gfx_plane_info, fb_info: &fb_info); |
479 | |
480 | INIT_LIST_HEAD(list: &dmabuf_obj->list); |
481 | mutex_lock(&vgpu->dmabuf_lock); |
482 | list_add_tail(new: &dmabuf_obj->list, head: &vgpu->dmabuf_obj_list_head); |
483 | mutex_unlock(lock: &vgpu->dmabuf_lock); |
484 | |
485 | gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n" , vgpu->id, |
486 | __func__, kref_read(&dmabuf_obj->kref), ret); |
487 | |
488 | return 0; |
489 | |
490 | out_free_info: |
491 | kfree(objp: dmabuf_obj->info); |
492 | out_free_dmabuf: |
493 | kfree(objp: dmabuf_obj); |
494 | out: |
495 | /* ENODEV means plane isn't ready, which might be a normal case. */ |
496 | return (ret == -ENODEV) ? 0 : ret; |
497 | } |
498 | |
499 | /* To associate an exposed dmabuf with the dmabuf_obj */ |
500 | int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) |
501 | { |
502 | struct drm_device *dev = &vgpu->gvt->gt->i915->drm; |
503 | struct intel_vgpu_dmabuf_obj *dmabuf_obj; |
504 | struct drm_i915_gem_object *obj; |
505 | struct dma_buf *dmabuf; |
506 | int dmabuf_fd; |
507 | int ret = 0; |
508 | |
509 | mutex_lock(&vgpu->dmabuf_lock); |
510 | |
511 | dmabuf_obj = pick_dmabuf_by_num(vgpu, id: dmabuf_id); |
512 | if (dmabuf_obj == NULL) { |
513 | gvt_vgpu_err("invalid dmabuf id:%d\n" , dmabuf_id); |
514 | ret = -EINVAL; |
515 | goto out; |
516 | } |
517 | |
518 | obj = vgpu_create_gem(dev, info: dmabuf_obj->info); |
519 | if (obj == NULL) { |
520 | gvt_vgpu_err("create gvt gem obj failed\n" ); |
521 | ret = -ENOMEM; |
522 | goto out; |
523 | } |
524 | |
525 | obj->gvt_info = dmabuf_obj->info; |
526 | |
527 | dmabuf = i915_gem_prime_export(gem_obj: &obj->base, DRM_CLOEXEC | DRM_RDWR); |
528 | if (IS_ERR(ptr: dmabuf)) { |
529 | gvt_vgpu_err("export dma-buf failed\n" ); |
530 | ret = PTR_ERR(ptr: dmabuf); |
531 | goto out_free_gem; |
532 | } |
533 | |
534 | ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); |
535 | if (ret < 0) { |
536 | gvt_vgpu_err("create dma-buf fd failed ret:%d\n" , ret); |
537 | goto out_free_dmabuf; |
538 | } |
539 | dmabuf_fd = ret; |
540 | |
541 | dmabuf_obj_get(obj: dmabuf_obj); |
542 | |
543 | if (dmabuf_obj->initref) { |
544 | dmabuf_obj->initref = false; |
545 | dmabuf_obj_put(obj: dmabuf_obj); |
546 | } |
547 | |
548 | mutex_unlock(lock: &vgpu->dmabuf_lock); |
549 | |
550 | gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n" |
551 | " file count: %ld, GEM ref: %d\n" , |
552 | vgpu->id, dmabuf_obj->dmabuf_id, |
553 | kref_read(&dmabuf_obj->kref), |
554 | dmabuf_fd, |
555 | file_count(dmabuf->file), |
556 | kref_read(&obj->base.refcount)); |
557 | |
558 | i915_gem_object_put(obj); |
559 | |
560 | return dmabuf_fd; |
561 | |
562 | out_free_dmabuf: |
563 | dma_buf_put(dmabuf); |
564 | out_free_gem: |
565 | i915_gem_object_put(obj); |
566 | out: |
567 | mutex_unlock(lock: &vgpu->dmabuf_lock); |
568 | return ret; |
569 | } |
570 | |
571 | void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) |
572 | { |
573 | struct list_head *pos, *n; |
574 | struct intel_vgpu_dmabuf_obj *dmabuf_obj; |
575 | |
576 | mutex_lock(&vgpu->dmabuf_lock); |
577 | list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) { |
578 | dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); |
579 | dmabuf_obj->vgpu = NULL; |
580 | |
581 | idr_remove(&vgpu->object_idr, id: dmabuf_obj->dmabuf_id); |
582 | list_del(entry: pos); |
583 | |
584 | /* dmabuf_obj might be freed in dmabuf_obj_put */ |
585 | if (dmabuf_obj->initref) { |
586 | dmabuf_obj->initref = false; |
587 | dmabuf_obj_put(obj: dmabuf_obj); |
588 | } |
589 | |
590 | } |
591 | mutex_unlock(lock: &vgpu->dmabuf_lock); |
592 | } |
593 | |