1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/mutex.h>
25#include <linux/log2.h>
26#include <linux/sched.h>
27#include <linux/sched/mm.h>
28#include <linux/sched/task.h>
29#include <linux/mmu_context.h>
30#include <linux/slab.h>
31#include <linux/amd-iommu.h>
32#include <linux/notifier.h>
33#include <linux/compat.h>
34#include <linux/mman.h>
35#include <linux/file.h>
36#include <linux/pm_runtime.h>
37#include "amdgpu_amdkfd.h"
38#include "amdgpu.h"
39
40struct mm_struct;
41
42#include "kfd_priv.h"
43#include "kfd_device_queue_manager.h"
44#include "kfd_iommu.h"
45#include "kfd_svm.h"
46#include "kfd_smi_events.h"
47
48/*
49 * List of struct kfd_process (field kfd_process).
50 * Unique/indexed by mm_struct*
51 */
52DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
53static DEFINE_MUTEX(kfd_processes_mutex);
54
55DEFINE_SRCU(kfd_processes_srcu);
56
57/* For process termination handling */
58static struct workqueue_struct *kfd_process_wq;
59
60/* Ordered, single-threaded workqueue for restoring evicted
61 * processes. Restoring multiple processes concurrently under memory
62 * pressure can lead to processes blocking each other from validating
63 * their BOs and result in a live-lock situation where processes
64 * remain evicted indefinitely.
65 */
66static struct workqueue_struct *kfd_restore_wq;
67
68static struct kfd_process *find_process(const struct task_struct *thread,
69 bool ref);
70static void kfd_process_ref_release(struct kref *ref);
71static struct kfd_process *create_process(const struct task_struct *thread);
72static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
73
74static void evict_process_worker(struct work_struct *work);
75static void restore_process_worker(struct work_struct *work);
76
77static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
78
79struct kfd_procfs_tree {
80 struct kobject *kobj;
81};
82
83static struct kfd_procfs_tree procfs;
84
85/*
86 * Structure for SDMA activity tracking
87 */
88struct kfd_sdma_activity_handler_workarea {
89 struct work_struct sdma_activity_work;
90 struct kfd_process_device *pdd;
91 uint64_t sdma_activity_counter;
92};
93
94struct temp_sdma_queue_list {
95 uint64_t __user *rptr;
96 uint64_t sdma_val;
97 unsigned int queue_id;
98 struct list_head list;
99};
100
101static void kfd_sdma_activity_worker(struct work_struct *work)
102{
103 struct kfd_sdma_activity_handler_workarea *workarea;
104 struct kfd_process_device *pdd;
105 uint64_t val;
106 struct mm_struct *mm;
107 struct queue *q;
108 struct qcm_process_device *qpd;
109 struct device_queue_manager *dqm;
110 int ret = 0;
111 struct temp_sdma_queue_list sdma_q_list;
112 struct temp_sdma_queue_list *sdma_q, *next;
113
114 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
115 sdma_activity_work);
116
117 pdd = workarea->pdd;
118 if (!pdd)
119 return;
120 dqm = pdd->dev->dqm;
121 qpd = &pdd->qpd;
122 if (!dqm || !qpd)
123 return;
124 /*
125 * Total SDMA activity is current SDMA activity + past SDMA activity
126 * Past SDMA count is stored in pdd.
127 * To get the current activity counters for all active SDMA queues,
128 * we loop over all SDMA queues and get their counts from user-space.
129 *
130 * We cannot call get_user() with dqm_lock held as it can cause
131 * a circular lock dependency situation. To read the SDMA stats,
132 * we need to do the following:
133 *
134 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
135 * with dqm_lock/dqm_unlock().
136 * 2. Call get_user() for each node in temporary list without dqm_lock.
137 * Save the SDMA count for each node and also add the count to the total
138 * SDMA count counter.
139 * Its possible, during this step, a few SDMA queue nodes got deleted
140 * from the qpd->queues_list.
141 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
142 * If any node got deleted, its SDMA count would be captured in the sdma
143 * past activity counter. So subtract the SDMA counter stored in step 2
144 * for this node from the total SDMA count.
145 */
146 INIT_LIST_HEAD(list: &sdma_q_list.list);
147
148 /*
149 * Create the temp list of all SDMA queues
150 */
151 dqm_lock(dqm);
152
153 list_for_each_entry(q, &qpd->queues_list, list) {
154 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
155 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
156 continue;
157
158 sdma_q = kzalloc(size: sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
159 if (!sdma_q) {
160 dqm_unlock(dqm);
161 goto cleanup;
162 }
163
164 INIT_LIST_HEAD(list: &sdma_q->list);
165 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
166 sdma_q->queue_id = q->properties.queue_id;
167 list_add_tail(new: &sdma_q->list, head: &sdma_q_list.list);
168 }
169
170 /*
171 * If the temp list is empty, then no SDMA queues nodes were found in
172 * qpd->queues_list. Return the past activity count as the total sdma
173 * count
174 */
175 if (list_empty(head: &sdma_q_list.list)) {
176 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
177 dqm_unlock(dqm);
178 return;
179 }
180
181 dqm_unlock(dqm);
182
183 /*
184 * Get the usage count for each SDMA queue in temp_list.
185 */
186 mm = get_task_mm(task: pdd->process->lead_thread);
187 if (!mm)
188 goto cleanup;
189
190 kthread_use_mm(mm);
191
192 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
193 val = 0;
194 ret = read_sdma_queue_counter(q_rptr: sdma_q->rptr, val: &val);
195 if (ret) {
196 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
197 sdma_q->queue_id);
198 } else {
199 sdma_q->sdma_val = val;
200 workarea->sdma_activity_counter += val;
201 }
202 }
203
204 kthread_unuse_mm(mm);
205 mmput(mm);
206
207 /*
208 * Do a second iteration over qpd_queues_list to check if any SDMA
209 * nodes got deleted while fetching SDMA counter.
210 */
211 dqm_lock(dqm);
212
213 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
214
215 list_for_each_entry(q, &qpd->queues_list, list) {
216 if (list_empty(head: &sdma_q_list.list))
217 break;
218
219 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
220 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
221 continue;
222
223 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
224 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
225 (sdma_q->queue_id == q->properties.queue_id)) {
226 list_del(entry: &sdma_q->list);
227 kfree(objp: sdma_q);
228 break;
229 }
230 }
231 }
232
233 dqm_unlock(dqm);
234
235 /*
236 * If temp list is not empty, it implies some queues got deleted
237 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
238 * count for each node from the total SDMA count.
239 */
240 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
241 workarea->sdma_activity_counter -= sdma_q->sdma_val;
242 list_del(entry: &sdma_q->list);
243 kfree(objp: sdma_q);
244 }
245
246 return;
247
248cleanup:
249 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
250 list_del(entry: &sdma_q->list);
251 kfree(objp: sdma_q);
252 }
253}
254
255/**
256 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
257 * by current process. Translates acquired wave count into number of compute units
258 * that are occupied.
259 *
260 * @attr: Handle of attribute that allows reporting of wave count. The attribute
261 * handle encapsulates GPU device it is associated with, thereby allowing collection
262 * of waves in flight, etc
263 * @buffer: Handle of user provided buffer updated with wave count
264 *
265 * Return: Number of bytes written to user buffer or an error value
266 */
267static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
268{
269 int cu_cnt;
270 int wave_cnt;
271 int max_waves_per_cu;
272 struct kfd_dev *dev = NULL;
273 struct kfd_process *proc = NULL;
274 struct kfd_process_device *pdd = NULL;
275
276 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
277 dev = pdd->dev;
278 if (dev->kfd2kgd->get_cu_occupancy == NULL)
279 return -EINVAL;
280
281 cu_cnt = 0;
282 proc = pdd->process;
283 if (pdd->qpd.queue_count == 0) {
284 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
285 dev->id, proc->pasid);
286 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%d\n", cu_cnt);
287 }
288
289 /* Collect wave count from device if it supports */
290 wave_cnt = 0;
291 max_waves_per_cu = 0;
292 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
293 &max_waves_per_cu);
294
295 /* Translate wave count to number of compute units */
296 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
297 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%d\n", cu_cnt);
298}
299
300static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
301 char *buffer)
302{
303 if (strcmp(attr->name, "pasid") == 0) {
304 struct kfd_process *p = container_of(attr, struct kfd_process,
305 attr_pasid);
306
307 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%d\n", p->pasid);
308 } else if (strncmp(attr->name, "vram_", 5) == 0) {
309 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
310 attr_vram);
311 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%llu\n", READ_ONCE(pdd->vram_usage));
312 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
313 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
314 attr_sdma);
315 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
316
317 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
318 kfd_sdma_activity_worker);
319
320 sdma_activity_work_handler.pdd = pdd;
321 sdma_activity_work_handler.sdma_activity_counter = 0;
322
323 schedule_work(work: &sdma_activity_work_handler.sdma_activity_work);
324
325 flush_work(work: &sdma_activity_work_handler.sdma_activity_work);
326
327 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%llu\n",
328 (sdma_activity_work_handler.sdma_activity_counter)/
329 SDMA_ACTIVITY_DIVISOR);
330 } else {
331 pr_err("Invalid attribute");
332 return -EINVAL;
333 }
334
335 return 0;
336}
337
338static void kfd_procfs_kobj_release(struct kobject *kobj)
339{
340 kfree(objp: kobj);
341}
342
343static const struct sysfs_ops kfd_procfs_ops = {
344 .show = kfd_procfs_show,
345};
346
347static const struct kobj_type procfs_type = {
348 .release = kfd_procfs_kobj_release,
349 .sysfs_ops = &kfd_procfs_ops,
350};
351
352void kfd_procfs_init(void)
353{
354 int ret = 0;
355
356 procfs.kobj = kfd_alloc_struct(procfs.kobj);
357 if (!procfs.kobj)
358 return;
359
360 ret = kobject_init_and_add(kobj: procfs.kobj, ktype: &procfs_type,
361 parent: &kfd_device->kobj, fmt: "proc");
362 if (ret) {
363 pr_warn("Could not create procfs proc folder");
364 /* If we fail to create the procfs, clean up */
365 kfd_procfs_shutdown();
366 }
367}
368
369void kfd_procfs_shutdown(void)
370{
371 if (procfs.kobj) {
372 kobject_del(kobj: procfs.kobj);
373 kobject_put(kobj: procfs.kobj);
374 procfs.kobj = NULL;
375 }
376}
377
378static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
379 struct attribute *attr, char *buffer)
380{
381 struct queue *q = container_of(kobj, struct queue, kobj);
382
383 if (!strcmp(attr->name, "size"))
384 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%llu",
385 q->properties.queue_size);
386 else if (!strcmp(attr->name, "type"))
387 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%d", q->properties.type);
388 else if (!strcmp(attr->name, "gpuid"))
389 return snprintf(buf: buffer, PAGE_SIZE, fmt: "%u", q->device->id);
390 else
391 pr_err("Invalid attribute");
392
393 return 0;
394}
395
396static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
397 struct attribute *attr, char *buffer)
398{
399 if (strcmp(attr->name, "evicted_ms") == 0) {
400 struct kfd_process_device *pdd = container_of(attr,
401 struct kfd_process_device,
402 attr_evict);
403 uint64_t evict_jiffies;
404
405 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
406
407 return snprintf(buffer,
408 PAGE_SIZE,
409 "%llu\n",
410 jiffies64_to_msecs(evict_jiffies));
411
412 /* Sysfs handle that gets CU occupancy is per device */
413 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
414 return kfd_get_cu_occupancy(attr, buffer);
415 } else {
416 pr_err("Invalid attribute");
417 }
418
419 return 0;
420}
421
422static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
423 struct attribute *attr, char *buf)
424{
425 struct kfd_process_device *pdd;
426
427 if (!strcmp(attr->name, "faults")) {
428 pdd = container_of(attr, struct kfd_process_device,
429 attr_faults);
430 return sysfs_emit(buf, fmt: "%llu\n", READ_ONCE(pdd->faults));
431 }
432 if (!strcmp(attr->name, "page_in")) {
433 pdd = container_of(attr, struct kfd_process_device,
434 attr_page_in);
435 return sysfs_emit(buf, fmt: "%llu\n", READ_ONCE(pdd->page_in));
436 }
437 if (!strcmp(attr->name, "page_out")) {
438 pdd = container_of(attr, struct kfd_process_device,
439 attr_page_out);
440 return sysfs_emit(buf, fmt: "%llu\n", READ_ONCE(pdd->page_out));
441 }
442 return 0;
443}
444
445static struct attribute attr_queue_size = {
446 .name = "size",
447 .mode = KFD_SYSFS_FILE_MODE
448};
449
450static struct attribute attr_queue_type = {
451 .name = "type",
452 .mode = KFD_SYSFS_FILE_MODE
453};
454
455static struct attribute attr_queue_gpuid = {
456 .name = "gpuid",
457 .mode = KFD_SYSFS_FILE_MODE
458};
459
460static struct attribute *procfs_queue_attrs[] = {
461 &attr_queue_size,
462 &attr_queue_type,
463 &attr_queue_gpuid,
464 NULL
465};
466ATTRIBUTE_GROUPS(procfs_queue);
467
468static const struct sysfs_ops procfs_queue_ops = {
469 .show = kfd_procfs_queue_show,
470};
471
472static const struct kobj_type procfs_queue_type = {
473 .sysfs_ops = &procfs_queue_ops,
474 .default_groups = procfs_queue_groups,
475};
476
477static const struct sysfs_ops procfs_stats_ops = {
478 .show = kfd_procfs_stats_show,
479};
480
481static const struct kobj_type procfs_stats_type = {
482 .sysfs_ops = &procfs_stats_ops,
483 .release = kfd_procfs_kobj_release,
484};
485
486static const struct sysfs_ops sysfs_counters_ops = {
487 .show = kfd_sysfs_counters_show,
488};
489
490static const struct kobj_type sysfs_counters_type = {
491 .sysfs_ops = &sysfs_counters_ops,
492 .release = kfd_procfs_kobj_release,
493};
494
495int kfd_procfs_add_queue(struct queue *q)
496{
497 struct kfd_process *proc;
498 int ret;
499
500 if (!q || !q->process)
501 return -EINVAL;
502 proc = q->process;
503
504 /* Create proc/<pid>/queues/<queue id> folder */
505 if (!proc->kobj_queues)
506 return -EFAULT;
507 ret = kobject_init_and_add(kobj: &q->kobj, ktype: &procfs_queue_type,
508 parent: proc->kobj_queues, fmt: "%u", q->properties.queue_id);
509 if (ret < 0) {
510 pr_warn("Creating proc/<pid>/queues/%u failed",
511 q->properties.queue_id);
512 kobject_put(kobj: &q->kobj);
513 return ret;
514 }
515
516 return 0;
517}
518
519static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
520 char *name)
521{
522 int ret;
523
524 if (!kobj || !attr || !name)
525 return;
526
527 attr->name = name;
528 attr->mode = KFD_SYSFS_FILE_MODE;
529 sysfs_attr_init(attr);
530
531 ret = sysfs_create_file(kobj, attr);
532 if (ret)
533 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
534}
535
536static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
537{
538 int ret;
539 int i;
540 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
541
542 if (!p || !p->kobj)
543 return;
544
545 /*
546 * Create sysfs files for each GPU:
547 * - proc/<pid>/stats_<gpuid>/
548 * - proc/<pid>/stats_<gpuid>/evicted_ms
549 * - proc/<pid>/stats_<gpuid>/cu_occupancy
550 */
551 for (i = 0; i < p->n_pdds; i++) {
552 struct kfd_process_device *pdd = p->pdds[i];
553
554 snprintf(buf: stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
555 fmt: "stats_%u", pdd->dev->id);
556 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
557 if (!pdd->kobj_stats)
558 return;
559
560 ret = kobject_init_and_add(kobj: pdd->kobj_stats,
561 ktype: &procfs_stats_type,
562 parent: p->kobj,
563 fmt: stats_dir_filename);
564
565 if (ret) {
566 pr_warn("Creating KFD proc/stats_%s folder failed",
567 stats_dir_filename);
568 kobject_put(kobj: pdd->kobj_stats);
569 pdd->kobj_stats = NULL;
570 return;
571 }
572
573 kfd_sysfs_create_file(kobj: pdd->kobj_stats, attr: &pdd->attr_evict,
574 name: "evicted_ms");
575 /* Add sysfs file to report compute unit occupancy */
576 if (pdd->dev->kfd2kgd->get_cu_occupancy)
577 kfd_sysfs_create_file(kobj: pdd->kobj_stats,
578 attr: &pdd->attr_cu_occupancy,
579 name: "cu_occupancy");
580 }
581}
582
583static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
584{
585 int ret = 0;
586 int i;
587 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
588
589 if (!p || !p->kobj)
590 return;
591
592 /*
593 * Create sysfs files for each GPU which supports SVM
594 * - proc/<pid>/counters_<gpuid>/
595 * - proc/<pid>/counters_<gpuid>/faults
596 * - proc/<pid>/counters_<gpuid>/page_in
597 * - proc/<pid>/counters_<gpuid>/page_out
598 */
599 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
600 struct kfd_process_device *pdd = p->pdds[i];
601 struct kobject *kobj_counters;
602
603 snprintf(buf: counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
604 fmt: "counters_%u", pdd->dev->id);
605 kobj_counters = kfd_alloc_struct(kobj_counters);
606 if (!kobj_counters)
607 return;
608
609 ret = kobject_init_and_add(kobj: kobj_counters, ktype: &sysfs_counters_type,
610 parent: p->kobj, fmt: counters_dir_filename);
611 if (ret) {
612 pr_warn("Creating KFD proc/%s folder failed",
613 counters_dir_filename);
614 kobject_put(kobj: kobj_counters);
615 return;
616 }
617
618 pdd->kobj_counters = kobj_counters;
619 kfd_sysfs_create_file(kobj: kobj_counters, attr: &pdd->attr_faults,
620 name: "faults");
621 kfd_sysfs_create_file(kobj: kobj_counters, attr: &pdd->attr_page_in,
622 name: "page_in");
623 kfd_sysfs_create_file(kobj: kobj_counters, attr: &pdd->attr_page_out,
624 name: "page_out");
625 }
626}
627
628static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
629{
630 int i;
631
632 if (!p || !p->kobj)
633 return;
634
635 /*
636 * Create sysfs files for each GPU:
637 * - proc/<pid>/vram_<gpuid>
638 * - proc/<pid>/sdma_<gpuid>
639 */
640 for (i = 0; i < p->n_pdds; i++) {
641 struct kfd_process_device *pdd = p->pdds[i];
642
643 snprintf(buf: pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, fmt: "vram_%u",
644 pdd->dev->id);
645 kfd_sysfs_create_file(kobj: p->kobj, attr: &pdd->attr_vram,
646 name: pdd->vram_filename);
647
648 snprintf(buf: pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, fmt: "sdma_%u",
649 pdd->dev->id);
650 kfd_sysfs_create_file(kobj: p->kobj, attr: &pdd->attr_sdma,
651 name: pdd->sdma_filename);
652 }
653}
654
655void kfd_procfs_del_queue(struct queue *q)
656{
657 if (!q)
658 return;
659
660 kobject_del(kobj: &q->kobj);
661 kobject_put(kobj: &q->kobj);
662}
663
664int kfd_process_create_wq(void)
665{
666 if (!kfd_process_wq)
667 kfd_process_wq = alloc_workqueue(fmt: "kfd_process_wq", flags: 0, max_active: 0);
668 if (!kfd_restore_wq)
669 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
670
671 if (!kfd_process_wq || !kfd_restore_wq) {
672 kfd_process_destroy_wq();
673 return -ENOMEM;
674 }
675
676 return 0;
677}
678
679void kfd_process_destroy_wq(void)
680{
681 if (kfd_process_wq) {
682 destroy_workqueue(wq: kfd_process_wq);
683 kfd_process_wq = NULL;
684 }
685 if (kfd_restore_wq) {
686 destroy_workqueue(wq: kfd_restore_wq);
687 kfd_restore_wq = NULL;
688 }
689}
690
691static void kfd_process_free_gpuvm(struct kgd_mem *mem,
692 struct kfd_process_device *pdd, void **kptr)
693{
694 struct kfd_dev *dev = pdd->dev;
695
696 if (kptr && *kptr) {
697 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
698 *kptr = NULL;
699 }
700
701 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
702 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
703 NULL);
704}
705
706/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
707 * This function should be only called right after the process
708 * is created and when kfd_processes_mutex is still being held
709 * to avoid concurrency. Because of that exclusiveness, we do
710 * not need to take p->mutex.
711 */
712static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
713 uint64_t gpu_va, uint32_t size,
714 uint32_t flags, struct kgd_mem **mem, void **kptr)
715{
716 struct kfd_dev *kdev = pdd->dev;
717 int err;
718
719 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
720 pdd->drm_priv, mem, NULL,
721 flags, false);
722 if (err)
723 goto err_alloc_mem;
724
725 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
726 pdd->drm_priv);
727 if (err)
728 goto err_map_mem;
729
730 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
731 if (err) {
732 pr_debug("Sync memory failed, wait interrupted by user signal\n");
733 goto sync_memory_failed;
734 }
735
736 if (kptr) {
737 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
738 (struct kgd_mem *)*mem, kptr, NULL);
739 if (err) {
740 pr_debug("Map GTT BO to kernel failed\n");
741 goto sync_memory_failed;
742 }
743 }
744
745 return err;
746
747sync_memory_failed:
748 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
749
750err_map_mem:
751 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
752 NULL);
753err_alloc_mem:
754 *mem = NULL;
755 *kptr = NULL;
756 return err;
757}
758
759/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
760 * process for IB usage The memory reserved is for KFD to submit
761 * IB to AMDGPU from kernel. If the memory is reserved
762 * successfully, ib_kaddr will have the CPU/kernel
763 * address. Check ib_kaddr before accessing the memory.
764 */
765static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
766{
767 struct qcm_process_device *qpd = &pdd->qpd;
768 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
769 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
770 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
771 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
772 struct kgd_mem *mem;
773 void *kaddr;
774 int ret;
775
776 if (qpd->ib_kaddr || !qpd->ib_base)
777 return 0;
778
779 /* ib_base is only set for dGPU */
780 ret = kfd_process_alloc_gpuvm(pdd, gpu_va: qpd->ib_base, PAGE_SIZE, flags,
781 mem: &mem, kptr: &kaddr);
782 if (ret)
783 return ret;
784
785 qpd->ib_mem = mem;
786 qpd->ib_kaddr = kaddr;
787
788 return 0;
789}
790
791static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
792{
793 struct qcm_process_device *qpd = &pdd->qpd;
794
795 if (!qpd->ib_kaddr || !qpd->ib_base)
796 return;
797
798 kfd_process_free_gpuvm(mem: qpd->ib_mem, pdd, kptr: &qpd->ib_kaddr);
799}
800
801struct kfd_process *kfd_create_process(struct file *filep)
802{
803 struct kfd_process *process;
804 struct task_struct *thread = current;
805 int ret;
806
807 if (!thread->mm)
808 return ERR_PTR(-EINVAL);
809
810 /* Only the pthreads threading model is supported. */
811 if (thread->group_leader->mm != thread->mm)
812 return ERR_PTR(-EINVAL);
813
814 /*
815 * take kfd processes mutex before starting of process creation
816 * so there won't be a case where two threads of the same process
817 * create two kfd_process structures
818 */
819 mutex_lock(lock: &kfd_processes_mutex);
820
821 /* A prior open of /dev/kfd could have already created the process. */
822 process = find_process(thread, ref: false);
823 if (process) {
824 pr_debug("Process already found\n");
825 } else {
826 process = create_process(thread);
827 if (IS_ERR(ptr: process))
828 goto out;
829
830 ret = kfd_process_init_cwsr_apu(p: process, filep);
831 if (ret)
832 goto out_destroy;
833
834 if (!procfs.kobj)
835 goto out;
836
837 process->kobj = kfd_alloc_struct(process->kobj);
838 if (!process->kobj) {
839 pr_warn("Creating procfs kobject failed");
840 goto out;
841 }
842 ret = kobject_init_and_add(kobj: process->kobj, ktype: &procfs_type,
843 parent: procfs.kobj, fmt: "%d",
844 (int)process->lead_thread->pid);
845 if (ret) {
846 pr_warn("Creating procfs pid directory failed");
847 kobject_put(kobj: process->kobj);
848 goto out;
849 }
850
851 kfd_sysfs_create_file(kobj: process->kobj, attr: &process->attr_pasid,
852 name: "pasid");
853
854 process->kobj_queues = kobject_create_and_add(name: "queues",
855 parent: process->kobj);
856 if (!process->kobj_queues)
857 pr_warn("Creating KFD proc/queues folder failed");
858
859 kfd_procfs_add_sysfs_stats(p: process);
860 kfd_procfs_add_sysfs_files(p: process);
861 kfd_procfs_add_sysfs_counters(p: process);
862 }
863out:
864 if (!IS_ERR(ptr: process))
865 kref_get(kref: &process->ref);
866 mutex_unlock(lock: &kfd_processes_mutex);
867
868 return process;
869
870out_destroy:
871 hash_del_rcu(node: &process->kfd_processes);
872 mutex_unlock(lock: &kfd_processes_mutex);
873 synchronize_srcu(ssp: &kfd_processes_srcu);
874 /* kfd_process_free_notifier will trigger the cleanup */
875 mmu_notifier_put(&process->mmu_notifier);
876 return ERR_PTR(error: ret);
877}
878
879struct kfd_process *kfd_get_process(const struct task_struct *thread)
880{
881 struct kfd_process *process;
882
883 if (!thread->mm)
884 return ERR_PTR(-EINVAL);
885
886 /* Only the pthreads threading model is supported. */
887 if (thread->group_leader->mm != thread->mm)
888 return ERR_PTR(-EINVAL);
889
890 process = find_process(thread, ref: false);
891 if (!process)
892 return ERR_PTR(-EINVAL);
893
894 return process;
895}
896
897static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
898{
899 struct kfd_process *process;
900
901 hash_for_each_possible_rcu(kfd_processes_table, process,
902 kfd_processes, (uintptr_t)mm)
903 if (process->mm == mm)
904 return process;
905
906 return NULL;
907}
908
909static struct kfd_process *find_process(const struct task_struct *thread,
910 bool ref)
911{
912 struct kfd_process *p;
913 int idx;
914
915 idx = srcu_read_lock(ssp: &kfd_processes_srcu);
916 p = find_process_by_mm(mm: thread->mm);
917 if (p && ref)
918 kref_get(kref: &p->ref);
919 srcu_read_unlock(ssp: &kfd_processes_srcu, idx);
920
921 return p;
922}
923
924void kfd_unref_process(struct kfd_process *p)
925{
926 kref_put(kref: &p->ref, release: kfd_process_ref_release);
927}
928
929/* This increments the process->ref counter. */
930struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
931{
932 struct task_struct *task = NULL;
933 struct kfd_process *p = NULL;
934
935 if (!pid) {
936 task = current;
937 get_task_struct(t: task);
938 } else {
939 task = get_pid_task(pid, PIDTYPE_PID);
940 }
941
942 if (task) {
943 p = find_process(thread: task, ref: true);
944 put_task_struct(t: task);
945 }
946
947 return p;
948}
949
950static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
951{
952 struct kfd_process *p = pdd->process;
953 void *mem;
954 int id;
955 int i;
956
957 /*
958 * Remove all handles from idr and release appropriate
959 * local memory object
960 */
961 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
962
963 for (i = 0; i < p->n_pdds; i++) {
964 struct kfd_process_device *peer_pdd = p->pdds[i];
965
966 if (!peer_pdd->drm_priv)
967 continue;
968 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
969 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
970 }
971
972 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
973 pdd->drm_priv, NULL);
974 kfd_process_device_remove_obj_handle(pdd, handle: id);
975 }
976}
977
978/*
979 * Just kunmap and unpin signal BO here. It will be freed in
980 * kfd_process_free_outstanding_kfd_bos()
981 */
982static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
983{
984 struct kfd_process_device *pdd;
985 struct kfd_dev *kdev;
986 void *mem;
987
988 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
989 if (!kdev)
990 return;
991
992 mutex_lock(lock: &p->mutex);
993
994 pdd = kfd_get_process_device_data(dev: kdev, p);
995 if (!pdd)
996 goto out;
997
998 mem = kfd_process_device_translate_handle(
999 p: pdd, GET_IDR_HANDLE(p->signal_handle));
1000 if (!mem)
1001 goto out;
1002
1003 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1004
1005out:
1006 mutex_unlock(lock: &p->mutex);
1007}
1008
1009static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1010{
1011 int i;
1012
1013 for (i = 0; i < p->n_pdds; i++)
1014 kfd_process_device_free_bos(pdd: p->pdds[i]);
1015}
1016
1017static void kfd_process_destroy_pdds(struct kfd_process *p)
1018{
1019 int i;
1020
1021 for (i = 0; i < p->n_pdds; i++) {
1022 struct kfd_process_device *pdd = p->pdds[i];
1023
1024 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1025 pdd->dev->id, p->pasid);
1026
1027 kfd_process_device_destroy_cwsr_dgpu(pdd);
1028 kfd_process_device_destroy_ib_mem(pdd);
1029
1030 if (pdd->drm_file) {
1031 amdgpu_amdkfd_gpuvm_release_process_vm(
1032 pdd->dev->adev, pdd->drm_priv);
1033 fput(pdd->drm_file);
1034 }
1035
1036 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1037 free_pages(addr: (unsigned long)pdd->qpd.cwsr_kaddr,
1038 order: get_order(KFD_CWSR_TBA_TMA_SIZE));
1039
1040 bitmap_free(bitmap: pdd->qpd.doorbell_bitmap);
1041 idr_destroy(&pdd->alloc_idr);
1042
1043 kfd_free_process_doorbells(kfd: pdd->dev, doorbell_index: pdd->doorbell_index);
1044
1045 if (pdd->dev->shared_resources.enable_mes)
1046 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1047 pdd->proc_ctx_bo);
1048 /*
1049 * before destroying pdd, make sure to report availability
1050 * for auto suspend
1051 */
1052 if (pdd->runtime_inuse) {
1053 pm_runtime_mark_last_busy(dev: adev_to_drm(pdd->dev->adev)->dev);
1054 pm_runtime_put_autosuspend(dev: adev_to_drm(pdd->dev->adev)->dev);
1055 pdd->runtime_inuse = false;
1056 }
1057
1058 kfree(objp: pdd);
1059 p->pdds[i] = NULL;
1060 }
1061 p->n_pdds = 0;
1062}
1063
1064static void kfd_process_remove_sysfs(struct kfd_process *p)
1065{
1066 struct kfd_process_device *pdd;
1067 int i;
1068
1069 if (!p->kobj)
1070 return;
1071
1072 sysfs_remove_file(kobj: p->kobj, attr: &p->attr_pasid);
1073 kobject_del(kobj: p->kobj_queues);
1074 kobject_put(kobj: p->kobj_queues);
1075 p->kobj_queues = NULL;
1076
1077 for (i = 0; i < p->n_pdds; i++) {
1078 pdd = p->pdds[i];
1079
1080 sysfs_remove_file(kobj: p->kobj, attr: &pdd->attr_vram);
1081 sysfs_remove_file(kobj: p->kobj, attr: &pdd->attr_sdma);
1082
1083 sysfs_remove_file(kobj: pdd->kobj_stats, attr: &pdd->attr_evict);
1084 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1085 sysfs_remove_file(kobj: pdd->kobj_stats,
1086 attr: &pdd->attr_cu_occupancy);
1087 kobject_del(kobj: pdd->kobj_stats);
1088 kobject_put(kobj: pdd->kobj_stats);
1089 pdd->kobj_stats = NULL;
1090 }
1091
1092 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1093 pdd = p->pdds[i];
1094
1095 sysfs_remove_file(kobj: pdd->kobj_counters, attr: &pdd->attr_faults);
1096 sysfs_remove_file(kobj: pdd->kobj_counters, attr: &pdd->attr_page_in);
1097 sysfs_remove_file(kobj: pdd->kobj_counters, attr: &pdd->attr_page_out);
1098 kobject_del(kobj: pdd->kobj_counters);
1099 kobject_put(kobj: pdd->kobj_counters);
1100 pdd->kobj_counters = NULL;
1101 }
1102
1103 kobject_del(kobj: p->kobj);
1104 kobject_put(kobj: p->kobj);
1105 p->kobj = NULL;
1106}
1107
1108/* No process locking is needed in this function, because the process
1109 * is not findable any more. We must assume that no other thread is
1110 * using it any more, otherwise we couldn't safely free the process
1111 * structure in the end.
1112 */
1113static void kfd_process_wq_release(struct work_struct *work)
1114{
1115 struct kfd_process *p = container_of(work, struct kfd_process,
1116 release_work);
1117
1118 kfd_process_dequeue_from_all_devices(p);
1119 pqm_uninit(pqm: &p->pqm);
1120
1121 /* Signal the eviction fence after user mode queues are
1122 * destroyed. This allows any BOs to be freed without
1123 * triggering pointless evictions or waiting for fences.
1124 */
1125 dma_fence_signal(p->ef);
1126
1127 kfd_process_remove_sysfs(p);
1128 kfd_iommu_unbind_process(p);
1129
1130 kfd_process_kunmap_signal_bo(p);
1131 kfd_process_free_outstanding_kfd_bos(p);
1132 svm_range_list_fini(p);
1133
1134 kfd_process_destroy_pdds(p);
1135 dma_fence_put(p->ef);
1136
1137 kfd_event_free_process(p);
1138
1139 kfd_pasid_free(pasid: p->pasid);
1140 mutex_destroy(lock: &p->mutex);
1141
1142 put_task_struct(t: p->lead_thread);
1143
1144 kfree(objp: p);
1145}
1146
1147static void kfd_process_ref_release(struct kref *ref)
1148{
1149 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1150
1151 INIT_WORK(&p->release_work, kfd_process_wq_release);
1152 queue_work(wq: kfd_process_wq, work: &p->release_work);
1153}
1154
1155static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1156{
1157 int idx = srcu_read_lock(ssp: &kfd_processes_srcu);
1158 struct kfd_process *p = find_process_by_mm(mm);
1159
1160 srcu_read_unlock(ssp: &kfd_processes_srcu, idx);
1161
1162 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1163}
1164
1165static void kfd_process_free_notifier(struct mmu_notifier *mn)
1166{
1167 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1168}
1169
1170static void kfd_process_notifier_release_internal(struct kfd_process *p)
1171{
1172 cancel_delayed_work_sync(dwork: &p->eviction_work);
1173 cancel_delayed_work_sync(dwork: &p->restore_work);
1174
1175 /* Indicate to other users that MM is no longer valid */
1176 p->mm = NULL;
1177
1178 mmu_notifier_put(&p->mmu_notifier);
1179}
1180
1181static void kfd_process_notifier_release(struct mmu_notifier *mn,
1182 struct mm_struct *mm)
1183{
1184 struct kfd_process *p;
1185
1186 /*
1187 * The kfd_process structure can not be free because the
1188 * mmu_notifier srcu is read locked
1189 */
1190 p = container_of(mn, struct kfd_process, mmu_notifier);
1191 if (WARN_ON(p->mm != mm))
1192 return;
1193
1194 mutex_lock(lock: &kfd_processes_mutex);
1195 /*
1196 * Do early return if table is empty.
1197 *
1198 * This could potentially happen if this function is called concurrently
1199 * by mmu_notifier and by kfd_cleanup_pocesses.
1200 *
1201 */
1202 if (hash_empty(kfd_processes_table)) {
1203 mutex_unlock(lock: &kfd_processes_mutex);
1204 return;
1205 }
1206 hash_del_rcu(node: &p->kfd_processes);
1207 mutex_unlock(lock: &kfd_processes_mutex);
1208 synchronize_srcu(ssp: &kfd_processes_srcu);
1209
1210 kfd_process_notifier_release_internal(p);
1211}
1212
1213static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1214 .release = kfd_process_notifier_release,
1215 .alloc_notifier = kfd_process_alloc_notifier,
1216 .free_notifier = kfd_process_free_notifier,
1217};
1218
1219/*
1220 * This code handles the case when driver is being unloaded before all
1221 * mm_struct are released. We need to safely free the kfd_process and
1222 * avoid race conditions with mmu_notifier that might try to free them.
1223 *
1224 */
1225void kfd_cleanup_processes(void)
1226{
1227 struct kfd_process *p;
1228 struct hlist_node *p_temp;
1229 unsigned int temp;
1230 HLIST_HEAD(cleanup_list);
1231
1232 /*
1233 * Move all remaining kfd_process from the process table to a
1234 * temp list for processing. Once done, callback from mmu_notifier
1235 * release will not see the kfd_process in the table and do early return,
1236 * avoiding double free issues.
1237 */
1238 mutex_lock(lock: &kfd_processes_mutex);
1239 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1240 hash_del_rcu(node: &p->kfd_processes);
1241 synchronize_srcu(ssp: &kfd_processes_srcu);
1242 hlist_add_head(n: &p->kfd_processes, h: &cleanup_list);
1243 }
1244 mutex_unlock(lock: &kfd_processes_mutex);
1245
1246 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1247 kfd_process_notifier_release_internal(p);
1248
1249 /*
1250 * Ensures that all outstanding free_notifier get called, triggering
1251 * the release of the kfd_process struct.
1252 */
1253 mmu_notifier_synchronize();
1254}
1255
1256static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1257{
1258 unsigned long offset;
1259 int i;
1260
1261 for (i = 0; i < p->n_pdds; i++) {
1262 struct kfd_dev *dev = p->pdds[i]->dev;
1263 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1264
1265 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1266 continue;
1267
1268 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1269 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1270 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1271 MAP_SHARED, offset);
1272
1273 if (IS_ERR_VALUE(qpd->tba_addr)) {
1274 int err = qpd->tba_addr;
1275
1276 pr_err("Failure to set tba address. error %d.\n", err);
1277 qpd->tba_addr = 0;
1278 qpd->cwsr_kaddr = NULL;
1279 return err;
1280 }
1281
1282 memcpy(to: qpd->cwsr_kaddr, from: dev->cwsr_isa, len: dev->cwsr_isa_size);
1283
1284 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1285 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1286 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1287 }
1288
1289 return 0;
1290}
1291
1292static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1293{
1294 struct kfd_dev *dev = pdd->dev;
1295 struct qcm_process_device *qpd = &pdd->qpd;
1296 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1297 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1298 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1299 struct kgd_mem *mem;
1300 void *kaddr;
1301 int ret;
1302
1303 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1304 return 0;
1305
1306 /* cwsr_base is only set for dGPU */
1307 ret = kfd_process_alloc_gpuvm(pdd, gpu_va: qpd->cwsr_base,
1308 KFD_CWSR_TBA_TMA_SIZE, flags, mem: &mem, kptr: &kaddr);
1309 if (ret)
1310 return ret;
1311
1312 qpd->cwsr_mem = mem;
1313 qpd->cwsr_kaddr = kaddr;
1314 qpd->tba_addr = qpd->cwsr_base;
1315
1316 memcpy(to: qpd->cwsr_kaddr, from: dev->cwsr_isa, len: dev->cwsr_isa_size);
1317
1318 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1319 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1320 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1321
1322 return 0;
1323}
1324
1325static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1326{
1327 struct kfd_dev *dev = pdd->dev;
1328 struct qcm_process_device *qpd = &pdd->qpd;
1329
1330 if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1331 return;
1332
1333 kfd_process_free_gpuvm(mem: qpd->cwsr_mem, pdd, kptr: &qpd->cwsr_kaddr);
1334}
1335
1336void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1337 uint64_t tba_addr,
1338 uint64_t tma_addr)
1339{
1340 if (qpd->cwsr_kaddr) {
1341 /* KFD trap handler is bound, record as second-level TBA/TMA
1342 * in first-level TMA. First-level trap will jump to second.
1343 */
1344 uint64_t *tma =
1345 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1346 tma[0] = tba_addr;
1347 tma[1] = tma_addr;
1348 } else {
1349 /* No trap handler bound, bind as first-level TBA/TMA. */
1350 qpd->tba_addr = tba_addr;
1351 qpd->tma_addr = tma_addr;
1352 }
1353}
1354
1355bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1356{
1357 int i;
1358
1359 /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1360 * boot time retry setting. Mixing processes with different
1361 * XNACK/retry settings can hang the GPU.
1362 *
1363 * Different GPUs can have different noretry settings depending
1364 * on HW bugs or limitations. We need to find at least one
1365 * XNACK mode for this process that's compatible with all GPUs.
1366 * Fortunately GPUs with retry enabled (noretry=0) can run code
1367 * built for XNACK-off. On GFXv9 it may perform slower.
1368 *
1369 * Therefore applications built for XNACK-off can always be
1370 * supported and will be our fallback if any GPU does not
1371 * support retry.
1372 */
1373 for (i = 0; i < p->n_pdds; i++) {
1374 struct kfd_dev *dev = p->pdds[i]->dev;
1375
1376 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1377 * support the SVM APIs and don't need to be considered
1378 * for the XNACK mode selection.
1379 */
1380 if (!KFD_IS_SOC15(dev))
1381 continue;
1382 /* Aldebaran can always support XNACK because it can support
1383 * per-process XNACK mode selection. But let the dev->noretry
1384 * setting still influence the default XNACK mode.
1385 */
1386 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
1387 continue;
1388
1389 /* GFXv10 and later GPUs do not support shader preemption
1390 * during page faults. This can lead to poor QoS for queue
1391 * management and memory-manager-related preemptions or
1392 * even deadlocks.
1393 */
1394 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1395 return false;
1396
1397 if (dev->noretry)
1398 return false;
1399 }
1400
1401 return true;
1402}
1403
1404/*
1405 * On return the kfd_process is fully operational and will be freed when the
1406 * mm is released
1407 */
1408static struct kfd_process *create_process(const struct task_struct *thread)
1409{
1410 struct kfd_process *process;
1411 struct mmu_notifier *mn;
1412 int err = -ENOMEM;
1413
1414 process = kzalloc(size: sizeof(*process), GFP_KERNEL);
1415 if (!process)
1416 goto err_alloc_process;
1417
1418 kref_init(kref: &process->ref);
1419 mutex_init(&process->mutex);
1420 process->mm = thread->mm;
1421 process->lead_thread = thread->group_leader;
1422 process->n_pdds = 0;
1423 process->queues_paused = false;
1424 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1425 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1426 process->last_restore_timestamp = get_jiffies_64();
1427 err = kfd_event_init_process(p: process);
1428 if (err)
1429 goto err_event_init;
1430 process->is_32bit_user_mode = in_compat_syscall();
1431
1432 process->pasid = kfd_pasid_alloc();
1433 if (process->pasid == 0) {
1434 err = -ENOSPC;
1435 goto err_alloc_pasid;
1436 }
1437
1438 err = pqm_init(pqm: &process->pqm, p: process);
1439 if (err != 0)
1440 goto err_process_pqm_init;
1441
1442 /* init process apertures*/
1443 err = kfd_init_apertures(process);
1444 if (err != 0)
1445 goto err_init_apertures;
1446
1447 /* Check XNACK support after PDDs are created in kfd_init_apertures */
1448 process->xnack_enabled = kfd_process_xnack_mode(p: process, supported: false);
1449
1450 err = svm_range_list_init(p: process);
1451 if (err)
1452 goto err_init_svm_range_list;
1453
1454 /* alloc_notifier needs to find the process in the hash table */
1455 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1456 (uintptr_t)process->mm);
1457
1458 /* Avoid free_notifier to start kfd_process_wq_release if
1459 * mmu_notifier_get failed because of pending signal.
1460 */
1461 kref_get(kref: &process->ref);
1462
1463 /* MMU notifier registration must be the last call that can fail
1464 * because after this point we cannot unwind the process creation.
1465 * After this point, mmu_notifier_put will trigger the cleanup by
1466 * dropping the last process reference in the free_notifier.
1467 */
1468 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1469 if (IS_ERR(ptr: mn)) {
1470 err = PTR_ERR(ptr: mn);
1471 goto err_register_notifier;
1472 }
1473 BUG_ON(mn != &process->mmu_notifier);
1474
1475 kfd_unref_process(p: process);
1476 get_task_struct(t: process->lead_thread);
1477
1478 return process;
1479
1480err_register_notifier:
1481 hash_del_rcu(node: &process->kfd_processes);
1482 svm_range_list_fini(p: process);
1483err_init_svm_range_list:
1484 kfd_process_free_outstanding_kfd_bos(p: process);
1485 kfd_process_destroy_pdds(p: process);
1486err_init_apertures:
1487 pqm_uninit(pqm: &process->pqm);
1488err_process_pqm_init:
1489 kfd_pasid_free(pasid: process->pasid);
1490err_alloc_pasid:
1491 kfd_event_free_process(p: process);
1492err_event_init:
1493 mutex_destroy(lock: &process->mutex);
1494 kfree(objp: process);
1495err_alloc_process:
1496 return ERR_PTR(error: err);
1497}
1498
1499static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1500 struct kfd_dev *dev)
1501{
1502 unsigned int i;
1503 int range_start = dev->shared_resources.non_cp_doorbells_start;
1504 int range_end = dev->shared_resources.non_cp_doorbells_end;
1505
1506 if (!KFD_IS_SOC15(dev))
1507 return 0;
1508
1509 qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1510 GFP_KERNEL);
1511 if (!qpd->doorbell_bitmap)
1512 return -ENOMEM;
1513
1514 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1515 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1516 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1517 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1518 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1519
1520 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
1521 if (i >= range_start && i <= range_end) {
1522 __set_bit(i, qpd->doorbell_bitmap);
1523 __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1524 qpd->doorbell_bitmap);
1525 }
1526 }
1527
1528 return 0;
1529}
1530
1531struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
1532 struct kfd_process *p)
1533{
1534 int i;
1535
1536 for (i = 0; i < p->n_pdds; i++)
1537 if (p->pdds[i]->dev == dev)
1538 return p->pdds[i];
1539
1540 return NULL;
1541}
1542
1543struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1544 struct kfd_process *p)
1545{
1546 struct kfd_process_device *pdd = NULL;
1547 int retval = 0;
1548
1549 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1550 return NULL;
1551 pdd = kzalloc(size: sizeof(*pdd), GFP_KERNEL);
1552 if (!pdd)
1553 return NULL;
1554
1555 if (init_doorbell_bitmap(qpd: &pdd->qpd, dev)) {
1556 pr_err("Failed to init doorbell for process\n");
1557 goto err_free_pdd;
1558 }
1559
1560 pdd->dev = dev;
1561 INIT_LIST_HEAD(list: &pdd->qpd.queues_list);
1562 INIT_LIST_HEAD(list: &pdd->qpd.priv_queue_list);
1563 pdd->qpd.dqm = dev->dqm;
1564 pdd->qpd.pqm = &p->pqm;
1565 pdd->qpd.evicted = 0;
1566 pdd->qpd.mapped_gws_queue = false;
1567 pdd->process = p;
1568 pdd->bound = PDD_UNBOUND;
1569 pdd->already_dequeued = false;
1570 pdd->runtime_inuse = false;
1571 pdd->vram_usage = 0;
1572 pdd->sdma_past_activity_counter = 0;
1573 pdd->user_gpu_id = dev->id;
1574 atomic64_set(&pdd->evict_duration_counter, 0);
1575
1576 if (dev->shared_resources.enable_mes) {
1577 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
1578 AMDGPU_MES_PROC_CTX_SIZE,
1579 &pdd->proc_ctx_bo,
1580 &pdd->proc_ctx_gpu_addr,
1581 &pdd->proc_ctx_cpu_ptr,
1582 false);
1583 if (retval) {
1584 pr_err("failed to allocate process context bo\n");
1585 goto err_free_pdd;
1586 }
1587 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
1588 }
1589
1590 p->pdds[p->n_pdds++] = pdd;
1591
1592 /* Init idr used for memory handle translation */
1593 idr_init(idr: &pdd->alloc_idr);
1594
1595 return pdd;
1596
1597err_free_pdd:
1598 kfree(objp: pdd);
1599 return NULL;
1600}
1601
1602/**
1603 * kfd_process_device_init_vm - Initialize a VM for a process-device
1604 *
1605 * @pdd: The process-device
1606 * @drm_file: Optional pointer to a DRM file descriptor
1607 *
1608 * If @drm_file is specified, it will be used to acquire the VM from
1609 * that file descriptor. If successful, the @pdd takes ownership of
1610 * the file descriptor.
1611 *
1612 * If @drm_file is NULL, a new VM is created.
1613 *
1614 * Returns 0 on success, -errno on failure.
1615 */
1616int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1617 struct file *drm_file)
1618{
1619 struct amdgpu_fpriv *drv_priv;
1620 struct amdgpu_vm *avm;
1621 struct kfd_process *p;
1622 struct kfd_dev *dev;
1623 int ret;
1624
1625 if (!drm_file)
1626 return -EINVAL;
1627
1628 if (pdd->drm_priv)
1629 return -EBUSY;
1630
1631 ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1632 if (ret)
1633 return ret;
1634 avm = &drv_priv->vm;
1635
1636 p = pdd->process;
1637 dev = pdd->dev;
1638
1639 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1640 &p->kgd_process_info,
1641 &p->ef);
1642 if (ret) {
1643 pr_err("Failed to create process VM object\n");
1644 return ret;
1645 }
1646 pdd->drm_priv = drm_file->private_data;
1647 atomic64_set(&pdd->tlb_seq, 0);
1648
1649 ret = kfd_process_device_reserve_ib_mem(pdd);
1650 if (ret)
1651 goto err_reserve_ib_mem;
1652 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1653 if (ret)
1654 goto err_init_cwsr;
1655
1656 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
1657 if (ret)
1658 goto err_set_pasid;
1659
1660 pdd->drm_file = drm_file;
1661
1662 return 0;
1663
1664err_set_pasid:
1665 kfd_process_device_destroy_cwsr_dgpu(pdd);
1666err_init_cwsr:
1667 kfd_process_device_destroy_ib_mem(pdd);
1668err_reserve_ib_mem:
1669 pdd->drm_priv = NULL;
1670 amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1671
1672 return ret;
1673}
1674
1675/*
1676 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1677 * to the device.
1678 * Unbinding occurs when the process dies or the device is removed.
1679 *
1680 * Assumes that the process lock is held.
1681 */
1682struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1683 struct kfd_process *p)
1684{
1685 struct kfd_process_device *pdd;
1686 int err;
1687
1688 pdd = kfd_get_process_device_data(dev, p);
1689 if (!pdd) {
1690 pr_err("Process device data doesn't exist\n");
1691 return ERR_PTR(-ENOMEM);
1692 }
1693
1694 if (!pdd->drm_priv)
1695 return ERR_PTR(-ENODEV);
1696
1697 /*
1698 * signal runtime-pm system to auto resume and prevent
1699 * further runtime suspend once device pdd is created until
1700 * pdd is destroyed.
1701 */
1702 if (!pdd->runtime_inuse) {
1703 err = pm_runtime_get_sync(dev: adev_to_drm(dev->adev)->dev);
1704 if (err < 0) {
1705 pm_runtime_put_autosuspend(dev: adev_to_drm(dev->adev)->dev);
1706 return ERR_PTR(error: err);
1707 }
1708 }
1709
1710 err = kfd_iommu_bind_process_to_device(pdd);
1711 if (err)
1712 goto out;
1713
1714 /*
1715 * make sure that runtime_usage counter is incremented just once
1716 * per pdd
1717 */
1718 pdd->runtime_inuse = true;
1719
1720 return pdd;
1721
1722out:
1723 /* balance runpm reference count and exit with error */
1724 if (!pdd->runtime_inuse) {
1725 pm_runtime_mark_last_busy(dev: adev_to_drm(dev->adev)->dev);
1726 pm_runtime_put_autosuspend(dev: adev_to_drm(dev->adev)->dev);
1727 }
1728
1729 return ERR_PTR(error: err);
1730}
1731
1732/* Create specific handle mapped to mem from process local memory idr
1733 * Assumes that the process lock is held.
1734 */
1735int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1736 void *mem)
1737{
1738 return idr_alloc(&pdd->alloc_idr, ptr: mem, start: 0, end: 0, GFP_KERNEL);
1739}
1740
1741/* Translate specific handle from process local memory idr
1742 * Assumes that the process lock is held.
1743 */
1744void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1745 int handle)
1746{
1747 if (handle < 0)
1748 return NULL;
1749
1750 return idr_find(&pdd->alloc_idr, id: handle);
1751}
1752
1753/* Remove specific handle from process local memory idr
1754 * Assumes that the process lock is held.
1755 */
1756void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1757 int handle)
1758{
1759 if (handle >= 0)
1760 idr_remove(&pdd->alloc_idr, id: handle);
1761}
1762
1763/* This increments the process->ref counter. */
1764struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1765{
1766 struct kfd_process *p, *ret_p = NULL;
1767 unsigned int temp;
1768
1769 int idx = srcu_read_lock(ssp: &kfd_processes_srcu);
1770
1771 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1772 if (p->pasid == pasid) {
1773 kref_get(kref: &p->ref);
1774 ret_p = p;
1775 break;
1776 }
1777 }
1778
1779 srcu_read_unlock(ssp: &kfd_processes_srcu, idx);
1780
1781 return ret_p;
1782}
1783
1784/* This increments the process->ref counter. */
1785struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1786{
1787 struct kfd_process *p;
1788
1789 int idx = srcu_read_lock(ssp: &kfd_processes_srcu);
1790
1791 p = find_process_by_mm(mm);
1792 if (p)
1793 kref_get(kref: &p->ref);
1794
1795 srcu_read_unlock(ssp: &kfd_processes_srcu, idx);
1796
1797 return p;
1798}
1799
1800/* kfd_process_evict_queues - Evict all user queues of a process
1801 *
1802 * Eviction is reference-counted per process-device. This means multiple
1803 * evictions from different sources can be nested safely.
1804 */
1805int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1806{
1807 int r = 0;
1808 int i;
1809 unsigned int n_evicted = 0;
1810
1811 for (i = 0; i < p->n_pdds; i++) {
1812 struct kfd_process_device *pdd = p->pdds[i];
1813
1814 kfd_smi_event_queue_eviction(dev: pdd->dev, pid: p->lead_thread->pid,
1815 trigger);
1816
1817 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1818 &pdd->qpd);
1819 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1820 * we would like to set all the queues to be in evicted state to prevent
1821 * them been add back since they actually not be saved right now.
1822 */
1823 if (r && r != -EIO) {
1824 pr_err("Failed to evict process queues\n");
1825 goto fail;
1826 }
1827 n_evicted++;
1828 }
1829
1830 return r;
1831
1832fail:
1833 /* To keep state consistent, roll back partial eviction by
1834 * restoring queues
1835 */
1836 for (i = 0; i < p->n_pdds; i++) {
1837 struct kfd_process_device *pdd = p->pdds[i];
1838
1839 if (n_evicted == 0)
1840 break;
1841
1842 kfd_smi_event_queue_restore(dev: pdd->dev, pid: p->lead_thread->pid);
1843
1844 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1845 &pdd->qpd))
1846 pr_err("Failed to restore queues\n");
1847
1848 n_evicted--;
1849 }
1850
1851 return r;
1852}
1853
1854/* kfd_process_restore_queues - Restore all user queues of a process */
1855int kfd_process_restore_queues(struct kfd_process *p)
1856{
1857 int r, ret = 0;
1858 int i;
1859
1860 for (i = 0; i < p->n_pdds; i++) {
1861 struct kfd_process_device *pdd = p->pdds[i];
1862
1863 kfd_smi_event_queue_restore(dev: pdd->dev, pid: p->lead_thread->pid);
1864
1865 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1866 &pdd->qpd);
1867 if (r) {
1868 pr_err("Failed to restore process queues\n");
1869 if (!ret)
1870 ret = r;
1871 }
1872 }
1873
1874 return ret;
1875}
1876
1877int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1878{
1879 int i;
1880
1881 for (i = 0; i < p->n_pdds; i++)
1882 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1883 return i;
1884 return -EINVAL;
1885}
1886
1887int
1888kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
1889 uint32_t *gpuid, uint32_t *gpuidx)
1890{
1891 int i;
1892
1893 for (i = 0; i < p->n_pdds; i++)
1894 if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
1895 *gpuid = p->pdds[i]->user_gpu_id;
1896 *gpuidx = i;
1897 return 0;
1898 }
1899 return -EINVAL;
1900}
1901
1902static void evict_process_worker(struct work_struct *work)
1903{
1904 int ret;
1905 struct kfd_process *p;
1906 struct delayed_work *dwork;
1907
1908 dwork = to_delayed_work(work);
1909
1910 /* Process termination destroys this worker thread. So during the
1911 * lifetime of this thread, kfd_process p will be valid
1912 */
1913 p = container_of(dwork, struct kfd_process, eviction_work);
1914 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1915 "Eviction fence mismatch\n");
1916
1917 /* Narrow window of overlap between restore and evict work
1918 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1919 * unreserves KFD BOs, it is possible to evicted again. But
1920 * restore has few more steps of finish. So lets wait for any
1921 * previous restore work to complete
1922 */
1923 flush_delayed_work(dwork: &p->restore_work);
1924
1925 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1926 ret = kfd_process_evict_queues(p, trigger: KFD_QUEUE_EVICTION_TRIGGER_TTM);
1927 if (!ret) {
1928 dma_fence_signal(p->ef);
1929 dma_fence_put(p->ef);
1930 p->ef = NULL;
1931 queue_delayed_work(wq: kfd_restore_wq, dwork: &p->restore_work,
1932 delay: msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1933
1934 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1935 } else
1936 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1937}
1938
1939static void restore_process_worker(struct work_struct *work)
1940{
1941 struct delayed_work *dwork;
1942 struct kfd_process *p;
1943 int ret = 0;
1944
1945 dwork = to_delayed_work(work);
1946
1947 /* Process termination destroys this worker thread. So during the
1948 * lifetime of this thread, kfd_process p will be valid
1949 */
1950 p = container_of(dwork, struct kfd_process, restore_work);
1951 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1952
1953 /* Setting last_restore_timestamp before successful restoration.
1954 * Otherwise this would have to be set by KGD (restore_process_bos)
1955 * before KFD BOs are unreserved. If not, the process can be evicted
1956 * again before the timestamp is set.
1957 * If restore fails, the timestamp will be set again in the next
1958 * attempt. This would mean that the minimum GPU quanta would be
1959 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1960 * functions)
1961 */
1962
1963 p->last_restore_timestamp = get_jiffies_64();
1964 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1965 &p->ef);
1966 if (ret) {
1967 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1968 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1969 ret = queue_delayed_work(wq: kfd_restore_wq, dwork: &p->restore_work,
1970 delay: msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1971 WARN(!ret, "reschedule restore work failed\n");
1972 return;
1973 }
1974
1975 ret = kfd_process_restore_queues(p);
1976 if (!ret)
1977 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1978 else
1979 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1980}
1981
1982void kfd_suspend_all_processes(void)
1983{
1984 struct kfd_process *p;
1985 unsigned int temp;
1986 int idx = srcu_read_lock(ssp: &kfd_processes_srcu);
1987
1988 WARN(debug_evictions, "Evicting all processes");
1989 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1990 cancel_delayed_work_sync(dwork: &p->eviction_work);
1991 cancel_delayed_work_sync(dwork: &p->restore_work);
1992
1993 if (kfd_process_evict_queues(p, trigger: KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
1994 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1995 dma_fence_signal(p->ef);
1996 dma_fence_put(p->ef);
1997 p->ef = NULL;
1998 }
1999 srcu_read_unlock(ssp: &kfd_processes_srcu, idx);
2000}
2001
2002int kfd_resume_all_processes(void)
2003{
2004 struct kfd_process *p;
2005 unsigned int temp;
2006 int ret = 0, idx = srcu_read_lock(ssp: &kfd_processes_srcu);
2007
2008 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2009 if (!queue_delayed_work(wq: kfd_restore_wq, dwork: &p->restore_work, delay: 0)) {
2010 pr_err("Restore process %d failed during resume\n",
2011 p->pasid);
2012 ret = -EFAULT;
2013 }
2014 }
2015 srcu_read_unlock(ssp: &kfd_processes_srcu, idx);
2016 return ret;
2017}
2018
2019int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
2020 struct vm_area_struct *vma)
2021{
2022 struct kfd_process_device *pdd;
2023 struct qcm_process_device *qpd;
2024
2025 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2026 pr_err("Incorrect CWSR mapping size.\n");
2027 return -EINVAL;
2028 }
2029
2030 pdd = kfd_get_process_device_data(dev, p: process);
2031 if (!pdd)
2032 return -EINVAL;
2033 qpd = &pdd->qpd;
2034
2035 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2036 order: get_order(KFD_CWSR_TBA_TMA_SIZE));
2037 if (!qpd->cwsr_kaddr) {
2038 pr_err("Error allocating per process CWSR buffer.\n");
2039 return -ENOMEM;
2040 }
2041
2042 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2043 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2044 /* Mapping pages to user process */
2045 return remap_pfn_range(vma, vma->vm_start,
2046 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2047 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2048}
2049
2050void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
2051{
2052 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
2053 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
2054 struct kfd_dev *dev = pdd->dev;
2055
2056 /*
2057 * It can be that we race and lose here, but that is extremely unlikely
2058 * and the worst thing which could happen is that we flush the changes
2059 * into the TLB once more which is harmless.
2060 */
2061 if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
2062 return;
2063
2064 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2065 /* Nothing to flush until a VMID is assigned, which
2066 * only happens when the first queue is created.
2067 */
2068 if (pdd->qpd.vmid)
2069 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
2070 pdd->qpd.vmid);
2071 } else {
2072 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
2073 pdd->process->pasid, type);
2074 }
2075}
2076
2077struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2078{
2079 int i;
2080
2081 if (gpu_id) {
2082 for (i = 0; i < p->n_pdds; i++) {
2083 struct kfd_process_device *pdd = p->pdds[i];
2084
2085 if (pdd->user_gpu_id == gpu_id)
2086 return pdd;
2087 }
2088 }
2089 return NULL;
2090}
2091
2092int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2093{
2094 int i;
2095
2096 if (!actual_gpu_id)
2097 return 0;
2098
2099 for (i = 0; i < p->n_pdds; i++) {
2100 struct kfd_process_device *pdd = p->pdds[i];
2101
2102 if (pdd->dev->id == actual_gpu_id)
2103 return pdd->user_gpu_id;
2104 }
2105 return -EINVAL;
2106}
2107
2108#if defined(CONFIG_DEBUG_FS)
2109
2110int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2111{
2112 struct kfd_process *p;
2113 unsigned int temp;
2114 int r = 0;
2115
2116 int idx = srcu_read_lock(&kfd_processes_srcu);
2117
2118 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2119 seq_printf(m, "Process %d PASID 0x%x:\n",
2120 p->lead_thread->tgid, p->pasid);
2121
2122 mutex_lock(&p->mutex);
2123 r = pqm_debugfs_mqds(m, &p->pqm);
2124 mutex_unlock(&p->mutex);
2125
2126 if (r)
2127 break;
2128 }
2129
2130 srcu_read_unlock(&kfd_processes_srcu, idx);
2131
2132 return r;
2133}
2134
2135#endif
2136
2137

source code of linux/drivers/gpu/drm/amd/amdkfd/kfd_process.c