| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2022-2024, Advanced Micro Devices, Inc. |
| 4 | */ |
| 5 | |
| 6 | #include <drm/amdxdna_accel.h> |
| 7 | #include <drm/drm_accel.h> |
| 8 | #include <drm/drm_drv.h> |
| 9 | #include <drm/drm_gem.h> |
| 10 | #include <drm/drm_gem_shmem_helper.h> |
| 11 | #include <drm/drm_ioctl.h> |
| 12 | #include <drm/drm_managed.h> |
| 13 | #include <drm/gpu_scheduler.h> |
| 14 | #include <linux/iommu.h> |
| 15 | #include <linux/pci.h> |
| 16 | #include <linux/pm_runtime.h> |
| 17 | |
| 18 | #include "amdxdna_ctx.h" |
| 19 | #include "amdxdna_gem.h" |
| 20 | #include "amdxdna_pci_drv.h" |
| 21 | |
| 22 | #define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */ |
| 23 | |
| 24 | MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin" ); |
| 25 | MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin" ); |
| 26 | MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin" ); |
| 27 | MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin" ); |
| 28 | |
| 29 | /* |
| 30 | * Bind the driver base on (vendor_id, device_id) pair and later use the |
| 31 | * (device_id, rev_id) pair as a key to select the devices. The devices with |
| 32 | * same device_id have very similar interface to host driver. |
| 33 | */ |
| 34 | static const struct pci_device_id pci_ids[] = { |
| 35 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) }, |
| 36 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) }, |
| 37 | {0} |
| 38 | }; |
| 39 | |
| 40 | MODULE_DEVICE_TABLE(pci, pci_ids); |
| 41 | |
| 42 | static const struct amdxdna_device_id amdxdna_ids[] = { |
| 43 | { 0x1502, 0x0, &dev_npu1_info }, |
| 44 | { 0x17f0, 0x0, &dev_npu2_info }, |
| 45 | { 0x17f0, 0x10, &dev_npu4_info }, |
| 46 | { 0x17f0, 0x11, &dev_npu5_info }, |
| 47 | { 0x17f0, 0x20, &dev_npu6_info }, |
| 48 | {0} |
| 49 | }; |
| 50 | |
| 51 | static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp) |
| 52 | { |
| 53 | struct amdxdna_dev *xdna = to_xdna_dev(ddev); |
| 54 | struct amdxdna_client *client; |
| 55 | int ret; |
| 56 | |
| 57 | ret = pm_runtime_resume_and_get(dev: ddev->dev); |
| 58 | if (ret) { |
| 59 | XDNA_ERR(xdna, "Failed to get rpm, ret %d" , ret); |
| 60 | return ret; |
| 61 | } |
| 62 | |
| 63 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
| 64 | if (!client) { |
| 65 | ret = -ENOMEM; |
| 66 | goto put_rpm; |
| 67 | } |
| 68 | |
| 69 | client->pid = pid_nr(rcu_access_pointer(filp->pid)); |
| 70 | client->xdna = xdna; |
| 71 | |
| 72 | client->sva = iommu_sva_bind_device(dev: xdna->ddev.dev, current->mm); |
| 73 | if (IS_ERR(ptr: client->sva)) { |
| 74 | ret = PTR_ERR(ptr: client->sva); |
| 75 | XDNA_ERR(xdna, "SVA bind device failed, ret %d" , ret); |
| 76 | goto failed; |
| 77 | } |
| 78 | client->pasid = iommu_sva_get_pasid(handle: client->sva); |
| 79 | if (client->pasid == IOMMU_PASID_INVALID) { |
| 80 | XDNA_ERR(xdna, "SVA get pasid failed" ); |
| 81 | ret = -ENODEV; |
| 82 | goto unbind_sva; |
| 83 | } |
| 84 | mutex_init(&client->hwctx_lock); |
| 85 | init_srcu_struct(&client->hwctx_srcu); |
| 86 | xa_init_flags(xa: &client->hwctx_xa, XA_FLAGS_ALLOC); |
| 87 | mutex_init(&client->mm_lock); |
| 88 | |
| 89 | mutex_lock(&xdna->dev_lock); |
| 90 | list_add_tail(new: &client->node, head: &xdna->client_list); |
| 91 | mutex_unlock(lock: &xdna->dev_lock); |
| 92 | |
| 93 | filp->driver_priv = client; |
| 94 | client->filp = filp; |
| 95 | |
| 96 | XDNA_DBG(xdna, "pid %d opened" , client->pid); |
| 97 | return 0; |
| 98 | |
| 99 | unbind_sva: |
| 100 | iommu_sva_unbind_device(handle: client->sva); |
| 101 | failed: |
| 102 | kfree(objp: client); |
| 103 | put_rpm: |
| 104 | pm_runtime_mark_last_busy(dev: ddev->dev); |
| 105 | pm_runtime_put_autosuspend(dev: ddev->dev); |
| 106 | |
| 107 | return ret; |
| 108 | } |
| 109 | |
| 110 | static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp) |
| 111 | { |
| 112 | struct amdxdna_client *client = filp->driver_priv; |
| 113 | struct amdxdna_dev *xdna = to_xdna_dev(ddev); |
| 114 | |
| 115 | XDNA_DBG(xdna, "closing pid %d" , client->pid); |
| 116 | |
| 117 | xa_destroy(&client->hwctx_xa); |
| 118 | cleanup_srcu_struct(ssp: &client->hwctx_srcu); |
| 119 | mutex_destroy(lock: &client->hwctx_lock); |
| 120 | mutex_destroy(lock: &client->mm_lock); |
| 121 | if (client->dev_heap) |
| 122 | drm_gem_object_put(to_gobj(client->dev_heap)); |
| 123 | |
| 124 | iommu_sva_unbind_device(handle: client->sva); |
| 125 | |
| 126 | XDNA_DBG(xdna, "pid %d closed" , client->pid); |
| 127 | kfree(objp: client); |
| 128 | pm_runtime_mark_last_busy(dev: ddev->dev); |
| 129 | pm_runtime_put_autosuspend(dev: ddev->dev); |
| 130 | } |
| 131 | |
| 132 | static int amdxdna_flush(struct file *f, fl_owner_t id) |
| 133 | { |
| 134 | struct drm_file *filp = f->private_data; |
| 135 | struct amdxdna_client *client = filp->driver_priv; |
| 136 | struct amdxdna_dev *xdna = client->xdna; |
| 137 | int idx; |
| 138 | |
| 139 | XDNA_DBG(xdna, "PID %d flushing..." , client->pid); |
| 140 | if (!drm_dev_enter(dev: &xdna->ddev, idx: &idx)) |
| 141 | return 0; |
| 142 | |
| 143 | mutex_lock(&xdna->dev_lock); |
| 144 | list_del_init(entry: &client->node); |
| 145 | mutex_unlock(lock: &xdna->dev_lock); |
| 146 | amdxdna_hwctx_remove_all(client); |
| 147 | |
| 148 | drm_dev_exit(idx); |
| 149 | return 0; |
| 150 | } |
| 151 | |
| 152 | static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
| 153 | { |
| 154 | struct amdxdna_client *client = filp->driver_priv; |
| 155 | struct amdxdna_dev *xdna = to_xdna_dev(dev); |
| 156 | struct amdxdna_drm_get_info *args = data; |
| 157 | int ret; |
| 158 | |
| 159 | if (!xdna->dev_info->ops->get_aie_info) |
| 160 | return -EOPNOTSUPP; |
| 161 | |
| 162 | XDNA_DBG(xdna, "Request parameter %u" , args->param); |
| 163 | mutex_lock(&xdna->dev_lock); |
| 164 | ret = xdna->dev_info->ops->get_aie_info(client, args); |
| 165 | mutex_unlock(lock: &xdna->dev_lock); |
| 166 | return ret; |
| 167 | } |
| 168 | |
| 169 | static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
| 170 | { |
| 171 | struct amdxdna_client *client = filp->driver_priv; |
| 172 | struct amdxdna_dev *xdna = to_xdna_dev(dev); |
| 173 | struct amdxdna_drm_set_state *args = data; |
| 174 | int ret; |
| 175 | |
| 176 | if (!xdna->dev_info->ops->set_aie_state) |
| 177 | return -EOPNOTSUPP; |
| 178 | |
| 179 | XDNA_DBG(xdna, "Request parameter %u" , args->param); |
| 180 | mutex_lock(&xdna->dev_lock); |
| 181 | ret = xdna->dev_info->ops->set_aie_state(client, args); |
| 182 | mutex_unlock(lock: &xdna->dev_lock); |
| 183 | |
| 184 | return ret; |
| 185 | } |
| 186 | |
| 187 | static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = { |
| 188 | /* Context */ |
| 189 | DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0), |
| 190 | DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0), |
| 191 | DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0), |
| 192 | /* BO */ |
| 193 | DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0), |
| 194 | DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0), |
| 195 | DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0), |
| 196 | /* Execution */ |
| 197 | DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0), |
| 198 | /* AIE hardware */ |
| 199 | DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0), |
| 200 | DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY), |
| 201 | }; |
| 202 | |
| 203 | static const struct file_operations amdxdna_fops = { |
| 204 | .owner = THIS_MODULE, |
| 205 | .open = accel_open, |
| 206 | .release = drm_release, |
| 207 | .flush = amdxdna_flush, |
| 208 | .unlocked_ioctl = drm_ioctl, |
| 209 | .compat_ioctl = drm_compat_ioctl, |
| 210 | .poll = drm_poll, |
| 211 | .read = drm_read, |
| 212 | .llseek = noop_llseek, |
| 213 | .mmap = drm_gem_mmap, |
| 214 | .fop_flags = FOP_UNSIGNED_OFFSET, |
| 215 | }; |
| 216 | |
| 217 | const struct drm_driver amdxdna_drm_drv = { |
| 218 | .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL | |
| 219 | DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE, |
| 220 | .fops = &amdxdna_fops, |
| 221 | .name = "amdxdna_accel_driver" , |
| 222 | .desc = "AMD XDNA DRM implementation" , |
| 223 | .open = amdxdna_drm_open, |
| 224 | .postclose = amdxdna_drm_close, |
| 225 | .ioctls = amdxdna_drm_ioctls, |
| 226 | .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls), |
| 227 | |
| 228 | .gem_create_object = amdxdna_gem_create_object_cb, |
| 229 | .gem_prime_import = amdxdna_gem_prime_import, |
| 230 | }; |
| 231 | |
| 232 | static const struct amdxdna_dev_info * |
| 233 | amdxdna_get_dev_info(struct pci_dev *pdev) |
| 234 | { |
| 235 | int i; |
| 236 | |
| 237 | for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) { |
| 238 | if (pdev->device == amdxdna_ids[i].device && |
| 239 | pdev->revision == amdxdna_ids[i].revision) |
| 240 | return amdxdna_ids[i].dev_info; |
| 241 | } |
| 242 | return NULL; |
| 243 | } |
| 244 | |
| 245 | static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
| 246 | { |
| 247 | struct device *dev = &pdev->dev; |
| 248 | struct amdxdna_dev *xdna; |
| 249 | int ret; |
| 250 | |
| 251 | xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev); |
| 252 | if (IS_ERR(ptr: xdna)) |
| 253 | return PTR_ERR(ptr: xdna); |
| 254 | |
| 255 | xdna->dev_info = amdxdna_get_dev_info(pdev); |
| 256 | if (!xdna->dev_info) |
| 257 | return -ENODEV; |
| 258 | |
| 259 | drmm_mutex_init(&xdna->ddev, &xdna->dev_lock); |
| 260 | init_rwsem(&xdna->notifier_lock); |
| 261 | INIT_LIST_HEAD(list: &xdna->client_list); |
| 262 | pci_set_drvdata(pdev, data: xdna); |
| 263 | |
| 264 | if (IS_ENABLED(CONFIG_LOCKDEP)) { |
| 265 | fs_reclaim_acquire(GFP_KERNEL); |
| 266 | might_lock(&xdna->notifier_lock); |
| 267 | fs_reclaim_release(GFP_KERNEL); |
| 268 | } |
| 269 | |
| 270 | xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq" , 0); |
| 271 | if (!xdna->notifier_wq) |
| 272 | return -ENOMEM; |
| 273 | |
| 274 | mutex_lock(&xdna->dev_lock); |
| 275 | ret = xdna->dev_info->ops->init(xdna); |
| 276 | mutex_unlock(lock: &xdna->dev_lock); |
| 277 | if (ret) { |
| 278 | XDNA_ERR(xdna, "Hardware init failed, ret %d" , ret); |
| 279 | goto destroy_notifier_wq; |
| 280 | } |
| 281 | |
| 282 | ret = amdxdna_sysfs_init(xdna); |
| 283 | if (ret) { |
| 284 | XDNA_ERR(xdna, "Create amdxdna attrs failed: %d" , ret); |
| 285 | goto failed_dev_fini; |
| 286 | } |
| 287 | |
| 288 | pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY); |
| 289 | pm_runtime_use_autosuspend(dev); |
| 290 | pm_runtime_allow(dev); |
| 291 | |
| 292 | ret = drm_dev_register(dev: &xdna->ddev, flags: 0); |
| 293 | if (ret) { |
| 294 | XDNA_ERR(xdna, "DRM register failed, ret %d" , ret); |
| 295 | pm_runtime_forbid(dev); |
| 296 | goto failed_sysfs_fini; |
| 297 | } |
| 298 | |
| 299 | pm_runtime_mark_last_busy(dev); |
| 300 | pm_runtime_put_autosuspend(dev); |
| 301 | return 0; |
| 302 | |
| 303 | failed_sysfs_fini: |
| 304 | amdxdna_sysfs_fini(xdna); |
| 305 | failed_dev_fini: |
| 306 | mutex_lock(&xdna->dev_lock); |
| 307 | xdna->dev_info->ops->fini(xdna); |
| 308 | mutex_unlock(lock: &xdna->dev_lock); |
| 309 | destroy_notifier_wq: |
| 310 | destroy_workqueue(wq: xdna->notifier_wq); |
| 311 | return ret; |
| 312 | } |
| 313 | |
| 314 | static void amdxdna_remove(struct pci_dev *pdev) |
| 315 | { |
| 316 | struct amdxdna_dev *xdna = pci_get_drvdata(pdev); |
| 317 | struct device *dev = &pdev->dev; |
| 318 | struct amdxdna_client *client; |
| 319 | |
| 320 | destroy_workqueue(wq: xdna->notifier_wq); |
| 321 | |
| 322 | pm_runtime_get_noresume(dev); |
| 323 | pm_runtime_forbid(dev); |
| 324 | |
| 325 | drm_dev_unplug(dev: &xdna->ddev); |
| 326 | amdxdna_sysfs_fini(xdna); |
| 327 | |
| 328 | mutex_lock(&xdna->dev_lock); |
| 329 | client = list_first_entry_or_null(&xdna->client_list, |
| 330 | struct amdxdna_client, node); |
| 331 | while (client) { |
| 332 | list_del_init(entry: &client->node); |
| 333 | mutex_unlock(lock: &xdna->dev_lock); |
| 334 | |
| 335 | amdxdna_hwctx_remove_all(client); |
| 336 | |
| 337 | mutex_lock(&xdna->dev_lock); |
| 338 | client = list_first_entry_or_null(&xdna->client_list, |
| 339 | struct amdxdna_client, node); |
| 340 | } |
| 341 | |
| 342 | xdna->dev_info->ops->fini(xdna); |
| 343 | mutex_unlock(lock: &xdna->dev_lock); |
| 344 | } |
| 345 | |
| 346 | static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna) |
| 347 | { |
| 348 | if (xdna->dev_info->ops->suspend) |
| 349 | xdna->dev_info->ops->suspend(xdna); |
| 350 | |
| 351 | return 0; |
| 352 | } |
| 353 | |
| 354 | static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna) |
| 355 | { |
| 356 | if (xdna->dev_info->ops->resume) |
| 357 | return xdna->dev_info->ops->resume(xdna); |
| 358 | |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | static int amdxdna_pmops_suspend(struct device *dev) |
| 363 | { |
| 364 | struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); |
| 365 | struct amdxdna_client *client; |
| 366 | |
| 367 | mutex_lock(&xdna->dev_lock); |
| 368 | list_for_each_entry(client, &xdna->client_list, node) |
| 369 | amdxdna_hwctx_suspend(client); |
| 370 | |
| 371 | amdxdna_dev_suspend_nolock(xdna); |
| 372 | mutex_unlock(lock: &xdna->dev_lock); |
| 373 | |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | static int amdxdna_pmops_resume(struct device *dev) |
| 378 | { |
| 379 | struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); |
| 380 | struct amdxdna_client *client; |
| 381 | int ret; |
| 382 | |
| 383 | XDNA_INFO(xdna, "firmware resuming..." ); |
| 384 | mutex_lock(&xdna->dev_lock); |
| 385 | ret = amdxdna_dev_resume_nolock(xdna); |
| 386 | if (ret) { |
| 387 | XDNA_ERR(xdna, "resume NPU firmware failed" ); |
| 388 | mutex_unlock(lock: &xdna->dev_lock); |
| 389 | return ret; |
| 390 | } |
| 391 | |
| 392 | XDNA_INFO(xdna, "hardware context resuming..." ); |
| 393 | list_for_each_entry(client, &xdna->client_list, node) |
| 394 | amdxdna_hwctx_resume(client); |
| 395 | mutex_unlock(lock: &xdna->dev_lock); |
| 396 | |
| 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | static int amdxdna_rpmops_suspend(struct device *dev) |
| 401 | { |
| 402 | struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); |
| 403 | int ret; |
| 404 | |
| 405 | mutex_lock(&xdna->dev_lock); |
| 406 | ret = amdxdna_dev_suspend_nolock(xdna); |
| 407 | mutex_unlock(lock: &xdna->dev_lock); |
| 408 | |
| 409 | XDNA_DBG(xdna, "Runtime suspend done ret: %d" , ret); |
| 410 | return ret; |
| 411 | } |
| 412 | |
| 413 | static int amdxdna_rpmops_resume(struct device *dev) |
| 414 | { |
| 415 | struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); |
| 416 | int ret; |
| 417 | |
| 418 | mutex_lock(&xdna->dev_lock); |
| 419 | ret = amdxdna_dev_resume_nolock(xdna); |
| 420 | mutex_unlock(lock: &xdna->dev_lock); |
| 421 | |
| 422 | XDNA_DBG(xdna, "Runtime resume done ret: %d" , ret); |
| 423 | return ret; |
| 424 | } |
| 425 | |
| 426 | static const struct dev_pm_ops amdxdna_pm_ops = { |
| 427 | SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume) |
| 428 | RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL) |
| 429 | }; |
| 430 | |
| 431 | static struct pci_driver amdxdna_pci_driver = { |
| 432 | .name = KBUILD_MODNAME, |
| 433 | .id_table = pci_ids, |
| 434 | .probe = amdxdna_probe, |
| 435 | .remove = amdxdna_remove, |
| 436 | .driver.pm = &amdxdna_pm_ops, |
| 437 | }; |
| 438 | |
| 439 | module_pci_driver(amdxdna_pci_driver); |
| 440 | |
| 441 | MODULE_LICENSE("GPL" ); |
| 442 | MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>" ); |
| 443 | MODULE_DESCRIPTION("amdxdna driver" ); |
| 444 | |