1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4 */
5
6#include <drm/amdxdna_accel.h>
7#include <drm/drm_accel.h>
8#include <drm/drm_drv.h>
9#include <drm/drm_gem.h>
10#include <drm/drm_gem_shmem_helper.h>
11#include <drm/drm_ioctl.h>
12#include <drm/drm_managed.h>
13#include <drm/gpu_scheduler.h>
14#include <linux/iommu.h>
15#include <linux/pci.h>
16
17#include "amdxdna_ctx.h"
18#include "amdxdna_gem.h"
19#include "amdxdna_pci_drv.h"
20#include "amdxdna_pm.h"
21
22MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
23MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
24MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
25MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
26
27/*
28 * 0.0: Initial version
29 * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
30 * 0.2: Support getting last error hardware error
31 * 0.3: Support firmware debug buffer
32 * 0.4: Support getting resource information
33 * 0.5: Support getting telemetry data
34 * 0.6: Support preemption
35 */
36#define AMDXDNA_DRIVER_MAJOR 0
37#define AMDXDNA_DRIVER_MINOR 6
38
39/*
40 * Bind the driver base on (vendor_id, device_id) pair and later use the
41 * (device_id, rev_id) pair as a key to select the devices. The devices with
42 * same device_id have very similar interface to host driver.
43 */
44static const struct pci_device_id pci_ids[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
47 {0}
48};
49
50MODULE_DEVICE_TABLE(pci, pci_ids);
51
52static const struct amdxdna_device_id amdxdna_ids[] = {
53 { 0x1502, 0x0, &dev_npu1_info },
54 { 0x17f0, 0x0, &dev_npu2_info },
55 { 0x17f0, 0x10, &dev_npu4_info },
56 { 0x17f0, 0x11, &dev_npu5_info },
57 { 0x17f0, 0x20, &dev_npu6_info },
58 {0}
59};
60
61static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
62{
63 struct amdxdna_dev *xdna = to_xdna_dev(ddev);
64 struct amdxdna_client *client;
65 int ret;
66
67 client = kzalloc(sizeof(*client), GFP_KERNEL);
68 if (!client)
69 return -ENOMEM;
70
71 client->pid = pid_nr(rcu_access_pointer(filp->pid));
72 client->xdna = xdna;
73
74 client->sva = iommu_sva_bind_device(dev: xdna->ddev.dev, current->mm);
75 if (IS_ERR(ptr: client->sva)) {
76 ret = PTR_ERR(ptr: client->sva);
77 XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
78 goto failed;
79 }
80 client->pasid = iommu_sva_get_pasid(handle: client->sva);
81 if (client->pasid == IOMMU_PASID_INVALID) {
82 XDNA_ERR(xdna, "SVA get pasid failed");
83 ret = -ENODEV;
84 goto unbind_sva;
85 }
86 init_srcu_struct(&client->hwctx_srcu);
87 xa_init_flags(xa: &client->hwctx_xa, XA_FLAGS_ALLOC);
88 mutex_init(&client->mm_lock);
89
90 mutex_lock(&xdna->dev_lock);
91 list_add_tail(new: &client->node, head: &xdna->client_list);
92 mutex_unlock(lock: &xdna->dev_lock);
93
94 filp->driver_priv = client;
95 client->filp = filp;
96
97 XDNA_DBG(xdna, "pid %d opened", client->pid);
98 return 0;
99
100unbind_sva:
101 iommu_sva_unbind_device(handle: client->sva);
102failed:
103 kfree(objp: client);
104
105 return ret;
106}
107
108static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
109{
110 struct amdxdna_client *client = filp->driver_priv;
111 struct amdxdna_dev *xdna = to_xdna_dev(ddev);
112
113 XDNA_DBG(xdna, "closing pid %d", client->pid);
114
115 xa_destroy(&client->hwctx_xa);
116 cleanup_srcu_struct(ssp: &client->hwctx_srcu);
117 mutex_destroy(lock: &client->mm_lock);
118 if (client->dev_heap)
119 drm_gem_object_put(to_gobj(client->dev_heap));
120
121 iommu_sva_unbind_device(handle: client->sva);
122
123 XDNA_DBG(xdna, "pid %d closed", client->pid);
124 kfree(objp: client);
125}
126
127static int amdxdna_flush(struct file *f, fl_owner_t id)
128{
129 struct drm_file *filp = f->private_data;
130 struct amdxdna_client *client = filp->driver_priv;
131 struct amdxdna_dev *xdna = client->xdna;
132 int idx;
133
134 XDNA_DBG(xdna, "PID %d flushing...", client->pid);
135 if (!drm_dev_enter(dev: &xdna->ddev, idx: &idx))
136 return 0;
137
138 mutex_lock(&xdna->dev_lock);
139 list_del_init(entry: &client->node);
140 amdxdna_hwctx_remove_all(client);
141 mutex_unlock(lock: &xdna->dev_lock);
142
143 drm_dev_exit(idx);
144 return 0;
145}
146
147static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
148{
149 struct amdxdna_client *client = filp->driver_priv;
150 struct amdxdna_dev *xdna = to_xdna_dev(dev);
151 struct amdxdna_drm_get_info *args = data;
152 int ret;
153
154 if (!xdna->dev_info->ops->get_aie_info)
155 return -EOPNOTSUPP;
156
157 XDNA_DBG(xdna, "Request parameter %u", args->param);
158 mutex_lock(&xdna->dev_lock);
159 ret = xdna->dev_info->ops->get_aie_info(client, args);
160 mutex_unlock(lock: &xdna->dev_lock);
161 return ret;
162}
163
164static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
165 struct drm_file *filp)
166{
167 struct amdxdna_client *client = filp->driver_priv;
168 struct amdxdna_dev *xdna = to_xdna_dev(dev);
169 struct amdxdna_drm_get_array *args = data;
170
171 if (!xdna->dev_info->ops->get_array)
172 return -EOPNOTSUPP;
173
174 if (args->pad || !args->num_element || !args->element_size)
175 return -EINVAL;
176
177 guard(mutex)(T: &xdna->dev_lock);
178 return xdna->dev_info->ops->get_array(client, args);
179}
180
181static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
182{
183 struct amdxdna_client *client = filp->driver_priv;
184 struct amdxdna_dev *xdna = to_xdna_dev(dev);
185 struct amdxdna_drm_set_state *args = data;
186 int ret;
187
188 if (!xdna->dev_info->ops->set_aie_state)
189 return -EOPNOTSUPP;
190
191 XDNA_DBG(xdna, "Request parameter %u", args->param);
192 mutex_lock(&xdna->dev_lock);
193 ret = xdna->dev_info->ops->set_aie_state(client, args);
194 mutex_unlock(lock: &xdna->dev_lock);
195
196 return ret;
197}
198
199static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
200 /* Context */
201 DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
202 DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
203 DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
204 /* BO */
205 DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
206 DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
207 DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
208 /* Execution */
209 DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
210 /* AIE hardware */
211 DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
212 DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
213 DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
214};
215
216static const struct file_operations amdxdna_fops = {
217 .owner = THIS_MODULE,
218 .open = accel_open,
219 .release = drm_release,
220 .flush = amdxdna_flush,
221 .unlocked_ioctl = drm_ioctl,
222 .compat_ioctl = drm_compat_ioctl,
223 .poll = drm_poll,
224 .read = drm_read,
225 .llseek = noop_llseek,
226 .mmap = drm_gem_mmap,
227 .fop_flags = FOP_UNSIGNED_OFFSET,
228};
229
230const struct drm_driver amdxdna_drm_drv = {
231 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
232 DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
233 .fops = &amdxdna_fops,
234 .name = "amdxdna_accel_driver",
235 .desc = "AMD XDNA DRM implementation",
236 .major = AMDXDNA_DRIVER_MAJOR,
237 .minor = AMDXDNA_DRIVER_MINOR,
238 .open = amdxdna_drm_open,
239 .postclose = amdxdna_drm_close,
240 .ioctls = amdxdna_drm_ioctls,
241 .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
242
243 .gem_create_object = amdxdna_gem_create_object_cb,
244 .gem_prime_import = amdxdna_gem_prime_import,
245};
246
247static const struct amdxdna_dev_info *
248amdxdna_get_dev_info(struct pci_dev *pdev)
249{
250 int i;
251
252 for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
253 if (pdev->device == amdxdna_ids[i].device &&
254 pdev->revision == amdxdna_ids[i].revision)
255 return amdxdna_ids[i].dev_info;
256 }
257 return NULL;
258}
259
260static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
261{
262 struct device *dev = &pdev->dev;
263 struct amdxdna_dev *xdna;
264 int ret;
265
266 xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
267 if (IS_ERR(ptr: xdna))
268 return PTR_ERR(ptr: xdna);
269
270 xdna->dev_info = amdxdna_get_dev_info(pdev);
271 if (!xdna->dev_info)
272 return -ENODEV;
273
274 drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
275 init_rwsem(&xdna->notifier_lock);
276 INIT_LIST_HEAD(list: &xdna->client_list);
277 pci_set_drvdata(pdev, data: xdna);
278
279 if (IS_ENABLED(CONFIG_LOCKDEP)) {
280 fs_reclaim_acquire(GFP_KERNEL);
281 might_lock(&xdna->notifier_lock);
282 fs_reclaim_release(GFP_KERNEL);
283 }
284
285 xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
286 if (!xdna->notifier_wq)
287 return -ENOMEM;
288
289 mutex_lock(&xdna->dev_lock);
290 ret = xdna->dev_info->ops->init(xdna);
291 mutex_unlock(lock: &xdna->dev_lock);
292 if (ret) {
293 XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
294 goto destroy_notifier_wq;
295 }
296
297 ret = amdxdna_sysfs_init(xdna);
298 if (ret) {
299 XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
300 goto failed_dev_fini;
301 }
302
303 ret = drm_dev_register(dev: &xdna->ddev, flags: 0);
304 if (ret) {
305 XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
306 goto failed_sysfs_fini;
307 }
308
309 return 0;
310
311failed_sysfs_fini:
312 amdxdna_sysfs_fini(xdna);
313failed_dev_fini:
314 mutex_lock(&xdna->dev_lock);
315 xdna->dev_info->ops->fini(xdna);
316 mutex_unlock(lock: &xdna->dev_lock);
317destroy_notifier_wq:
318 destroy_workqueue(wq: xdna->notifier_wq);
319 return ret;
320}
321
322static void amdxdna_remove(struct pci_dev *pdev)
323{
324 struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
325 struct amdxdna_client *client;
326
327 destroy_workqueue(wq: xdna->notifier_wq);
328
329 drm_dev_unplug(dev: &xdna->ddev);
330 amdxdna_sysfs_fini(xdna);
331
332 mutex_lock(&xdna->dev_lock);
333 client = list_first_entry_or_null(&xdna->client_list,
334 struct amdxdna_client, node);
335 while (client) {
336 list_del_init(entry: &client->node);
337 amdxdna_hwctx_remove_all(client);
338
339 client = list_first_entry_or_null(&xdna->client_list,
340 struct amdxdna_client, node);
341 }
342
343 xdna->dev_info->ops->fini(xdna);
344 mutex_unlock(lock: &xdna->dev_lock);
345}
346
347static const struct dev_pm_ops amdxdna_pm_ops = {
348 SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
349 RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
350};
351
352static struct pci_driver amdxdna_pci_driver = {
353 .name = KBUILD_MODNAME,
354 .id_table = pci_ids,
355 .probe = amdxdna_probe,
356 .remove = amdxdna_remove,
357 .driver.pm = &amdxdna_pm_ops,
358};
359
360module_pci_driver(amdxdna_pci_driver);
361
362MODULE_LICENSE("GPL");
363MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
364MODULE_DESCRIPTION("amdxdna driver");
365

source code of linux/drivers/accel/amdxdna/amdxdna_pci_drv.c