1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <nvif/client.h>
25#include <nvif/driver.h>
26#include <nvif/fifo.h>
27#include <nvif/ioctl.h>
28#include <nvif/class.h>
29#include <nvif/cl0002.h>
30#include <nvif/unpack.h>
31
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_exec.h"
35#include "nouveau_gem.h"
36#include "nouveau_chan.h"
37#include "nouveau_abi16.h"
38#include "nouveau_vmm.h"
39#include "nouveau_sched.h"
40
41static struct nouveau_abi16 *
42nouveau_abi16(struct drm_file *file_priv)
43{
44 struct nouveau_cli *cli = nouveau_cli(fpriv: file_priv);
45 if (!cli->abi16) {
46 struct nouveau_abi16 *abi16;
47 cli->abi16 = abi16 = kzalloc(size: sizeof(*abi16), GFP_KERNEL);
48 if (cli->abi16) {
49 struct nv_device_v0 args = {
50 .device = ~0ULL,
51 };
52
53 INIT_LIST_HEAD(list: &abi16->channels);
54
55 /* allocate device object targeting client's default
56 * device (ie. the one that belongs to the fd it
57 * opened)
58 */
59 if (nvif_device_ctor(&cli->base.object, "abi16Device",
60 0, NV_DEVICE, &args, sizeof(args),
61 &abi16->device) == 0)
62 return cli->abi16;
63
64 kfree(objp: cli->abi16);
65 cli->abi16 = NULL;
66 }
67 }
68 return cli->abi16;
69}
70
71struct nouveau_abi16 *
72nouveau_abi16_get(struct drm_file *file_priv)
73{
74 struct nouveau_cli *cli = nouveau_cli(fpriv: file_priv);
75 mutex_lock(&cli->mutex);
76 if (nouveau_abi16(file_priv))
77 return cli->abi16;
78 mutex_unlock(lock: &cli->mutex);
79 return NULL;
80}
81
82int
83nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
84{
85 struct nouveau_cli *cli = (void *)abi16->device.object.client;
86 mutex_unlock(lock: &cli->mutex);
87 return ret;
88}
89
90s32
91nouveau_abi16_swclass(struct nouveau_drm *drm)
92{
93 switch (drm->client.device.info.family) {
94 case NV_DEVICE_INFO_V0_TNT:
95 return NVIF_CLASS_SW_NV04;
96 case NV_DEVICE_INFO_V0_CELSIUS:
97 case NV_DEVICE_INFO_V0_KELVIN:
98 case NV_DEVICE_INFO_V0_RANKINE:
99 case NV_DEVICE_INFO_V0_CURIE:
100 return NVIF_CLASS_SW_NV10;
101 case NV_DEVICE_INFO_V0_TESLA:
102 return NVIF_CLASS_SW_NV50;
103 case NV_DEVICE_INFO_V0_FERMI:
104 case NV_DEVICE_INFO_V0_KEPLER:
105 case NV_DEVICE_INFO_V0_MAXWELL:
106 case NV_DEVICE_INFO_V0_PASCAL:
107 case NV_DEVICE_INFO_V0_VOLTA:
108 return NVIF_CLASS_SW_GF100;
109 }
110
111 return 0x0000;
112}
113
114static void
115nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
116 struct nouveau_abi16_ntfy *ntfy)
117{
118 nvif_object_dtor(&ntfy->object);
119 nvkm_mm_free(&chan->heap, &ntfy->node);
120 list_del(entry: &ntfy->head);
121 kfree(objp: ntfy);
122}
123
124static void
125nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
126 struct nouveau_abi16_chan *chan)
127{
128 struct nouveau_abi16_ntfy *ntfy, *temp;
129
130 /* When a client exits without waiting for it's queued up jobs to
131 * finish it might happen that we fault the channel. This is due to
132 * drm_file_free() calling drm_gem_release() before the postclose()
133 * callback. Hence, we can't tear down this scheduler entity before
134 * uvmm mappings are unmapped. Currently, we can't detect this case.
135 *
136 * However, this should be rare and harmless, since the channel isn't
137 * needed anymore.
138 */
139 nouveau_sched_entity_fini(entity: &chan->sched_entity);
140
141 /* wait for all activity to stop before cleaning up */
142 if (chan->chan)
143 nouveau_channel_idle(chan->chan);
144
145 /* cleanup notifier state */
146 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
147 nouveau_abi16_ntfy_fini(chan, ntfy);
148 }
149
150 if (chan->ntfy) {
151 nouveau_vma_del(&chan->ntfy_vma);
152 nouveau_bo_unpin(chan->ntfy);
153 drm_gem_object_put(obj: &chan->ntfy->bo.base);
154 }
155
156 if (chan->heap.block_size)
157 nvkm_mm_fini(&chan->heap);
158
159 /* destroy channel object, all children will be killed too */
160 if (chan->chan) {
161 nvif_object_dtor(&chan->ce);
162 nouveau_channel_del(&chan->chan);
163 }
164
165 list_del(entry: &chan->head);
166 kfree(objp: chan);
167}
168
169void
170nouveau_abi16_fini(struct nouveau_abi16 *abi16)
171{
172 struct nouveau_cli *cli = (void *)abi16->device.object.client;
173 struct nouveau_abi16_chan *chan, *temp;
174
175 /* cleanup channels */
176 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
177 nouveau_abi16_chan_fini(abi16, chan);
178 }
179
180 /* destroy the device object */
181 nvif_device_dtor(&abi16->device);
182
183 kfree(objp: cli->abi16);
184 cli->abi16 = NULL;
185}
186
187static inline int
188getparam_dma_ib_max(struct nvif_device *device)
189{
190 const struct nvif_mclass dmas[] = {
191 { NV03_CHANNEL_DMA, 0 },
192 { NV10_CHANNEL_DMA, 0 },
193 { NV17_CHANNEL_DMA, 0 },
194 { NV40_CHANNEL_DMA, 0 },
195 {}
196 };
197
198 return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
199}
200
201int
202nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
203{
204 struct nouveau_cli *cli = nouveau_cli(fpriv: file_priv);
205 struct nouveau_drm *drm = nouveau_drm(dev);
206 struct nvif_device *device = &drm->client.device;
207 struct nvkm_gr *gr = nvxx_gr(device);
208 struct drm_nouveau_getparam *getparam = data;
209 struct pci_dev *pdev = to_pci_dev(dev->dev);
210
211 switch (getparam->param) {
212 case NOUVEAU_GETPARAM_CHIPSET_ID:
213 getparam->value = device->info.chipset;
214 break;
215 case NOUVEAU_GETPARAM_PCI_VENDOR:
216 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
217 getparam->value = pdev->vendor;
218 else
219 getparam->value = 0;
220 break;
221 case NOUVEAU_GETPARAM_PCI_DEVICE:
222 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
223 getparam->value = pdev->device;
224 else
225 getparam->value = 0;
226 break;
227 case NOUVEAU_GETPARAM_BUS_TYPE:
228 switch (device->info.platform) {
229 case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
230 case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
231 case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
232 case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
233 case NV_DEVICE_INFO_V0_IGP :
234 if (!pci_is_pcie(pdev))
235 getparam->value = 1;
236 else
237 getparam->value = 2;
238 break;
239 default:
240 WARN_ON(1);
241 break;
242 }
243 break;
244 case NOUVEAU_GETPARAM_FB_SIZE:
245 getparam->value = drm->gem.vram_available;
246 break;
247 case NOUVEAU_GETPARAM_AGP_SIZE:
248 getparam->value = drm->gem.gart_available;
249 break;
250 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
251 getparam->value = 0; /* deprecated */
252 break;
253 case NOUVEAU_GETPARAM_PTIMER_TIME:
254 getparam->value = nvif_device_time(device);
255 break;
256 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
257 getparam->value = 1;
258 break;
259 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
260 getparam->value = 1;
261 break;
262 case NOUVEAU_GETPARAM_GRAPH_UNITS:
263 getparam->value = nvkm_gr_units(gr);
264 break;
265 case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
266 int ib_max = getparam_dma_ib_max(device);
267
268 getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
269 break;
270 }
271 default:
272 NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
273 return -EINVAL;
274 }
275
276 return 0;
277}
278
279int
280nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
281{
282 struct drm_nouveau_channel_alloc *init = data;
283 struct nouveau_cli *cli = nouveau_cli(fpriv: file_priv);
284 struct nouveau_drm *drm = nouveau_drm(dev);
285 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
286 struct nouveau_abi16_chan *chan;
287 struct nvif_device *device;
288 u64 engine, runm;
289 int ret;
290
291 if (unlikely(!abi16))
292 return -ENOMEM;
293
294 if (!drm->channel)
295 return nouveau_abi16_put(abi16, ret: -ENODEV);
296
297 /* If uvmm wasn't initialized until now disable it completely to prevent
298 * userspace from mixing up UAPIs.
299 *
300 * The client lock is already acquired by nouveau_abi16_get().
301 */
302 __nouveau_cli_disable_uvmm_noinit(cli);
303
304 device = &abi16->device;
305 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
306
307 /* hack to allow channel engine type specification on kepler */
308 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
309 if (init->fb_ctxdma_handle == ~0) {
310 switch (init->tt_ctxdma_handle) {
311 case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
312 case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
313 case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
314 case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
315 case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
316 default:
317 return nouveau_abi16_put(abi16, ret: -ENOSYS);
318 }
319
320 init->fb_ctxdma_handle = 0;
321 init->tt_ctxdma_handle = 0;
322 }
323 }
324
325 if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
326 runm = nvif_fifo_runlist(device, engine);
327 else
328 runm = nvif_fifo_runlist_ce(device);
329
330 if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
331 return nouveau_abi16_put(abi16, ret: -EINVAL);
332
333 /* allocate "abi16 channel" data and make up a handle for it */
334 chan = kzalloc(size: sizeof(*chan), GFP_KERNEL);
335 if (!chan)
336 return nouveau_abi16_put(abi16, ret: -ENOMEM);
337
338 INIT_LIST_HEAD(list: &chan->notifiers);
339 list_add(new: &chan->head, head: &abi16->channels);
340
341 /* create channel object and initialise dma and fence management */
342 ret = nouveau_channel_new(drm, device, priv: false, runm, vram: init->fb_ctxdma_handle,
343 gart: init->tt_ctxdma_handle, &chan->chan);
344 if (ret)
345 goto done;
346
347 ret = nouveau_sched_entity_init(entity: &chan->sched_entity, sched: &drm->sched,
348 sched_wq: drm->sched_wq);
349 if (ret)
350 goto done;
351
352 init->channel = chan->chan->chid;
353
354 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
355 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
356 NOUVEAU_GEM_DOMAIN_GART;
357 else
358 if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
359 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
360 else
361 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
362
363 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
364 init->subchan[0].handle = 0x00000000;
365 init->subchan[0].grclass = 0x0000;
366 init->subchan[1].handle = chan->chan->nvsw.handle;
367 init->subchan[1].grclass = 0x506e;
368 init->nr_subchan = 2;
369 }
370
371 /* Workaround "nvc0" gallium driver using classes it doesn't allocate on
372 * Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
373 * channel init, now we know what that stuff actually is.
374 *
375 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
376 *
377 * Userspace was fixed prior to adding Ampere support.
378 */
379 switch (device->info.family) {
380 case NV_DEVICE_INFO_V0_VOLTA:
381 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
382 NULL, 0, &chan->ce);
383 if (ret)
384 goto done;
385 break;
386 case NV_DEVICE_INFO_V0_TURING:
387 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
388 NULL, 0, &chan->ce);
389 if (ret)
390 goto done;
391 break;
392 default:
393 break;
394 }
395
396 /* Named memory object area */
397 ret = nouveau_gem_new(cli, PAGE_SIZE, align: 0, NOUVEAU_GEM_DOMAIN_GART,
398 tile_mode: 0, tile_flags: 0, &chan->ntfy);
399 if (ret == 0)
400 ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
401 contig: false);
402 if (ret)
403 goto done;
404
405 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
406 ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
407 &chan->ntfy_vma);
408 if (ret)
409 goto done;
410 }
411
412 ret = drm_gem_handle_create(file_priv, obj: &chan->ntfy->bo.base,
413 handlep: &init->notifier_handle);
414 if (ret)
415 goto done;
416
417 ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
418done:
419 if (ret)
420 nouveau_abi16_chan_fini(abi16, chan);
421 return nouveau_abi16_put(abi16, ret);
422}
423
424static struct nouveau_abi16_chan *
425nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
426{
427 struct nouveau_abi16_chan *chan;
428
429 list_for_each_entry(chan, &abi16->channels, head) {
430 if (chan->chan->chid == channel)
431 return chan;
432 }
433
434 return NULL;
435}
436
437int
438nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
439{
440 union {
441 struct nvif_ioctl_v0 v0;
442 } *args = data;
443 struct nouveau_abi16_chan *chan;
444 struct nouveau_abi16 *abi16;
445 int ret = -ENOSYS;
446
447 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
448 switch (args->v0.type) {
449 case NVIF_IOCTL_V0_NEW:
450 case NVIF_IOCTL_V0_MTHD:
451 case NVIF_IOCTL_V0_SCLASS:
452 break;
453 default:
454 return -EACCES;
455 }
456 } else
457 return ret;
458
459 if (!(abi16 = nouveau_abi16(file_priv)))
460 return -ENOMEM;
461
462 if (args->v0.token != ~0ULL) {
463 if (!(chan = nouveau_abi16_chan(abi16, channel: args->v0.token)))
464 return -EINVAL;
465 args->v0.object = nvif_handle(&chan->chan->user);
466 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
467 return 0;
468 }
469
470 args->v0.object = nvif_handle(&abi16->device.object);
471 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
472 return 0;
473}
474
475int
476nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
477{
478 struct drm_nouveau_channel_free *req = data;
479 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
480 struct nouveau_abi16_chan *chan;
481
482 if (unlikely(!abi16))
483 return -ENOMEM;
484
485 chan = nouveau_abi16_chan(abi16, channel: req->channel);
486 if (!chan)
487 return nouveau_abi16_put(abi16, ret: -ENOENT);
488 nouveau_abi16_chan_fini(abi16, chan);
489 return nouveau_abi16_put(abi16, ret: 0);
490}
491
492int
493nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
494{
495 struct drm_nouveau_grobj_alloc *init = data;
496 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
497 struct nouveau_abi16_chan *chan;
498 struct nouveau_abi16_ntfy *ntfy;
499 struct nvif_client *client;
500 struct nvif_sclass *sclass;
501 s32 oclass = 0;
502 int ret, i;
503
504 if (unlikely(!abi16))
505 return -ENOMEM;
506
507 if (init->handle == ~0)
508 return nouveau_abi16_put(abi16, ret: -EINVAL);
509 client = abi16->device.object.client;
510
511 chan = nouveau_abi16_chan(abi16, channel: init->channel);
512 if (!chan)
513 return nouveau_abi16_put(abi16, ret: -ENOENT);
514
515 ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
516 if (ret < 0)
517 return nouveau_abi16_put(abi16, ret);
518
519 if ((init->class & 0x00ff) == 0x006e) {
520 /* nvsw: compatibility with older 0x*6e class identifier */
521 for (i = 0; !oclass && i < ret; i++) {
522 switch (sclass[i].oclass) {
523 case NVIF_CLASS_SW_NV04:
524 case NVIF_CLASS_SW_NV10:
525 case NVIF_CLASS_SW_NV50:
526 case NVIF_CLASS_SW_GF100:
527 oclass = sclass[i].oclass;
528 break;
529 default:
530 break;
531 }
532 }
533 } else
534 if ((init->class & 0x00ff) == 0x00b1) {
535 /* msvld: compatibility with incorrect version exposure */
536 for (i = 0; i < ret; i++) {
537 if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
538 oclass = sclass[i].oclass;
539 break;
540 }
541 }
542 } else
543 if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
544 /* mspdec: compatibility with incorrect version exposure */
545 for (i = 0; i < ret; i++) {
546 if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
547 oclass = sclass[i].oclass;
548 break;
549 }
550 }
551 } else
552 if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
553 /* msppp: compatibility with incorrect version exposure */
554 for (i = 0; i < ret; i++) {
555 if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
556 oclass = sclass[i].oclass;
557 break;
558 }
559 }
560 } else {
561 oclass = init->class;
562 }
563
564 nvif_object_sclass_put(&sclass);
565 if (!oclass)
566 return nouveau_abi16_put(abi16, ret: -EINVAL);
567
568 ntfy = kzalloc(size: sizeof(*ntfy), GFP_KERNEL);
569 if (!ntfy)
570 return nouveau_abi16_put(abi16, ret: -ENOMEM);
571
572 list_add(new: &ntfy->head, head: &chan->notifiers);
573
574 client->route = NVDRM_OBJECT_ABI16;
575 ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
576 oclass, NULL, 0, &ntfy->object);
577 client->route = NVDRM_OBJECT_NVIF;
578
579 if (ret)
580 nouveau_abi16_ntfy_fini(chan, ntfy);
581 return nouveau_abi16_put(abi16, ret);
582}
583
584int
585nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
586{
587 struct drm_nouveau_notifierobj_alloc *info = data;
588 struct nouveau_drm *drm = nouveau_drm(dev);
589 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
590 struct nouveau_abi16_chan *chan;
591 struct nouveau_abi16_ntfy *ntfy;
592 struct nvif_device *device = &abi16->device;
593 struct nvif_client *client;
594 struct nv_dma_v0 args = {};
595 int ret;
596
597 if (unlikely(!abi16))
598 return -ENOMEM;
599
600 /* completely unnecessary for these chipsets... */
601 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
602 return nouveau_abi16_put(abi16, ret: -EINVAL);
603 client = abi16->device.object.client;
604
605 chan = nouveau_abi16_chan(abi16, channel: info->channel);
606 if (!chan)
607 return nouveau_abi16_put(abi16, ret: -ENOENT);
608
609 ntfy = kzalloc(size: sizeof(*ntfy), GFP_KERNEL);
610 if (!ntfy)
611 return nouveau_abi16_put(abi16, ret: -ENOMEM);
612
613 list_add(new: &ntfy->head, head: &chan->notifiers);
614
615 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
616 &ntfy->node);
617 if (ret)
618 goto done;
619
620 args.start = ntfy->node->offset;
621 args.limit = ntfy->node->offset + ntfy->node->length - 1;
622 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
623 args.target = NV_DMA_V0_TARGET_VM;
624 args.access = NV_DMA_V0_ACCESS_VM;
625 args.start += chan->ntfy_vma->addr;
626 args.limit += chan->ntfy_vma->addr;
627 } else
628 if (drm->agp.bridge) {
629 args.target = NV_DMA_V0_TARGET_AGP;
630 args.access = NV_DMA_V0_ACCESS_RDWR;
631 args.start += drm->agp.base + chan->ntfy->offset;
632 args.limit += drm->agp.base + chan->ntfy->offset;
633 } else {
634 args.target = NV_DMA_V0_TARGET_VM;
635 args.access = NV_DMA_V0_ACCESS_RDWR;
636 args.start += chan->ntfy->offset;
637 args.limit += chan->ntfy->offset;
638 }
639
640 client->route = NVDRM_OBJECT_ABI16;
641 ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
642 NV_DMA_IN_MEMORY, &args, sizeof(args),
643 &ntfy->object);
644 client->route = NVDRM_OBJECT_NVIF;
645 if (ret)
646 goto done;
647
648 info->offset = ntfy->node->offset;
649done:
650 if (ret)
651 nouveau_abi16_ntfy_fini(chan, ntfy);
652 return nouveau_abi16_put(abi16, ret);
653}
654
655int
656nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
657{
658 struct drm_nouveau_gpuobj_free *fini = data;
659 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
660 struct nouveau_abi16_chan *chan;
661 struct nouveau_abi16_ntfy *ntfy;
662 int ret = -ENOENT;
663
664 if (unlikely(!abi16))
665 return -ENOMEM;
666
667 chan = nouveau_abi16_chan(abi16, channel: fini->channel);
668 if (!chan)
669 return nouveau_abi16_put(abi16, ret: -EINVAL);
670
671 /* synchronize with the user channel and destroy the gpu object */
672 nouveau_channel_idle(chan->chan);
673
674 list_for_each_entry(ntfy, &chan->notifiers, head) {
675 if (ntfy->object.handle == fini->handle) {
676 nouveau_abi16_ntfy_fini(chan, ntfy);
677 ret = 0;
678 break;
679 }
680 }
681
682 return nouveau_abi16_put(abi16, ret);
683}
684

source code of linux/drivers/gpu/drm/nouveau/nouveau_abi16.c