1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ |
4 | /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ |
5 | |
6 | #include <linux/delay.h> |
7 | #include <linux/dma-mapping.h> |
8 | #include <linux/idr.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/list.h> |
11 | #include <linux/kobject.h> |
12 | #include <linux/kref.h> |
13 | #include <linux/mhi.h> |
14 | #include <linux/module.h> |
15 | #include <linux/msi.h> |
16 | #include <linux/mutex.h> |
17 | #include <linux/pci.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/workqueue.h> |
20 | #include <linux/wait.h> |
21 | #include <drm/drm_accel.h> |
22 | #include <drm/drm_drv.h> |
23 | #include <drm/drm_file.h> |
24 | #include <drm/drm_gem.h> |
25 | #include <drm/drm_ioctl.h> |
26 | #include <drm/drm_managed.h> |
27 | #include <uapi/drm/qaic_accel.h> |
28 | |
29 | #include "mhi_controller.h" |
30 | #include "qaic.h" |
31 | #include "qaic_timesync.h" |
32 | |
33 | MODULE_IMPORT_NS(DMA_BUF); |
34 | |
35 | #define PCI_DEV_AIC100 0xa100 |
36 | #define QAIC_NAME "qaic" |
37 | #define QAIC_DESC "Qualcomm Cloud AI Accelerators" |
38 | #define CNTL_MAJOR 5 |
39 | #define CNTL_MINOR 0 |
40 | |
41 | bool datapath_polling; |
42 | module_param(datapath_polling, bool, 0400); |
43 | MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode" ); |
44 | static bool link_up; |
45 | static DEFINE_IDA(qaic_usrs); |
46 | |
47 | static void qaicm_wq_release(struct drm_device *dev, void *res) |
48 | { |
49 | struct workqueue_struct *wq = res; |
50 | |
51 | destroy_workqueue(wq); |
52 | } |
53 | |
54 | static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt) |
55 | { |
56 | struct workqueue_struct *wq; |
57 | int ret; |
58 | |
59 | wq = alloc_workqueue(fmt, flags: WQ_UNBOUND, max_active: 0); |
60 | if (!wq) |
61 | return ERR_PTR(error: -ENOMEM); |
62 | ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq); |
63 | if (ret) |
64 | return ERR_PTR(error: ret); |
65 | |
66 | return wq; |
67 | } |
68 | |
69 | static void qaicm_srcu_release(struct drm_device *dev, void *res) |
70 | { |
71 | struct srcu_struct *lock = res; |
72 | |
73 | cleanup_srcu_struct(ssp: lock); |
74 | } |
75 | |
76 | static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock) |
77 | { |
78 | int ret; |
79 | |
80 | ret = init_srcu_struct(lock); |
81 | if (ret) |
82 | return ret; |
83 | |
84 | return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock); |
85 | } |
86 | |
87 | static void qaicm_pci_release(struct drm_device *dev, void *res) |
88 | { |
89 | struct qaic_device *qdev = to_qaic_device(dev); |
90 | |
91 | pci_set_drvdata(pdev: qdev->pdev, NULL); |
92 | } |
93 | |
94 | static void free_usr(struct kref *kref) |
95 | { |
96 | struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count); |
97 | |
98 | cleanup_srcu_struct(ssp: &usr->qddev_lock); |
99 | ida_free(&qaic_usrs, id: usr->handle); |
100 | kfree(objp: usr); |
101 | } |
102 | |
103 | static int qaic_open(struct drm_device *dev, struct drm_file *file) |
104 | { |
105 | struct qaic_drm_device *qddev = to_qaic_drm_device(dev); |
106 | struct qaic_device *qdev = qddev->qdev; |
107 | struct qaic_user *usr; |
108 | int rcu_id; |
109 | int ret; |
110 | |
111 | rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
112 | if (qdev->dev_state != QAIC_ONLINE) { |
113 | ret = -ENODEV; |
114 | goto dev_unlock; |
115 | } |
116 | |
117 | usr = kmalloc(size: sizeof(*usr), GFP_KERNEL); |
118 | if (!usr) { |
119 | ret = -ENOMEM; |
120 | goto dev_unlock; |
121 | } |
122 | |
123 | usr->handle = ida_alloc(ida: &qaic_usrs, GFP_KERNEL); |
124 | if (usr->handle < 0) { |
125 | ret = usr->handle; |
126 | goto free_usr; |
127 | } |
128 | usr->qddev = qddev; |
129 | atomic_set(v: &usr->chunk_id, i: 0); |
130 | init_srcu_struct(&usr->qddev_lock); |
131 | kref_init(kref: &usr->ref_count); |
132 | |
133 | ret = mutex_lock_interruptible(&qddev->users_mutex); |
134 | if (ret) |
135 | goto cleanup_usr; |
136 | |
137 | list_add(new: &usr->node, head: &qddev->users); |
138 | mutex_unlock(lock: &qddev->users_mutex); |
139 | |
140 | file->driver_priv = usr; |
141 | |
142 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: rcu_id); |
143 | return 0; |
144 | |
145 | cleanup_usr: |
146 | cleanup_srcu_struct(ssp: &usr->qddev_lock); |
147 | ida_free(&qaic_usrs, id: usr->handle); |
148 | free_usr: |
149 | kfree(objp: usr); |
150 | dev_unlock: |
151 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: rcu_id); |
152 | return ret; |
153 | } |
154 | |
155 | static void qaic_postclose(struct drm_device *dev, struct drm_file *file) |
156 | { |
157 | struct qaic_user *usr = file->driver_priv; |
158 | struct qaic_drm_device *qddev; |
159 | struct qaic_device *qdev; |
160 | int qdev_rcu_id; |
161 | int usr_rcu_id; |
162 | int i; |
163 | |
164 | qddev = usr->qddev; |
165 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
166 | if (qddev) { |
167 | qdev = qddev->qdev; |
168 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
169 | if (qdev->dev_state == QAIC_ONLINE) { |
170 | qaic_release_usr(qdev, usr); |
171 | for (i = 0; i < qdev->num_dbc; ++i) |
172 | if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle) |
173 | release_dbc(qdev, dbc_id: i); |
174 | } |
175 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
176 | |
177 | mutex_lock(&qddev->users_mutex); |
178 | if (!list_empty(head: &usr->node)) |
179 | list_del_init(entry: &usr->node); |
180 | mutex_unlock(lock: &qddev->users_mutex); |
181 | } |
182 | |
183 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
184 | kref_put(kref: &usr->ref_count, release: free_usr); |
185 | |
186 | file->driver_priv = NULL; |
187 | } |
188 | |
189 | DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops); |
190 | |
191 | static const struct drm_ioctl_desc qaic_drm_ioctls[] = { |
192 | DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0), |
193 | DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0), |
194 | DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0), |
195 | DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0), |
196 | DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0), |
197 | DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0), |
198 | DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0), |
199 | DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0), |
200 | DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0), |
201 | }; |
202 | |
203 | static const struct drm_driver qaic_accel_driver = { |
204 | .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, |
205 | |
206 | .name = QAIC_NAME, |
207 | .desc = QAIC_DESC, |
208 | .date = "20190618" , |
209 | |
210 | .fops = &qaic_accel_fops, |
211 | .open = qaic_open, |
212 | .postclose = qaic_postclose, |
213 | |
214 | .ioctls = qaic_drm_ioctls, |
215 | .num_ioctls = ARRAY_SIZE(qaic_drm_ioctls), |
216 | .gem_prime_import = qaic_gem_prime_import, |
217 | }; |
218 | |
219 | static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id) |
220 | { |
221 | struct qaic_drm_device *qddev = qdev->qddev; |
222 | struct drm_device *drm = to_drm(qddev); |
223 | int ret; |
224 | |
225 | /* Hold off implementing partitions until the uapi is determined */ |
226 | if (partition_id != QAIC_NO_PARTITION) |
227 | return -EINVAL; |
228 | |
229 | qddev->partition_id = partition_id; |
230 | |
231 | ret = drm_dev_register(dev: drm, flags: 0); |
232 | if (ret) |
233 | pci_dbg(qdev->pdev, "drm_dev_register failed %d\n" , ret); |
234 | |
235 | return ret; |
236 | } |
237 | |
238 | static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) |
239 | { |
240 | struct qaic_drm_device *qddev = qdev->qddev; |
241 | struct drm_device *drm = to_drm(qddev); |
242 | struct qaic_user *usr; |
243 | |
244 | drm_dev_unregister(dev: drm); |
245 | qddev->partition_id = 0; |
246 | /* |
247 | * Existing users get unresolvable errors till they close FDs. |
248 | * Need to sync carefully with users calling close(). The |
249 | * list of users can be modified elsewhere when the lock isn't |
250 | * held here, but the sync'ing the srcu with the mutex held |
251 | * could deadlock. Grab the mutex so that the list will be |
252 | * unmodified. The user we get will exist as long as the |
253 | * lock is held. Signal that the qcdev is going away, and |
254 | * grab a reference to the user so they don't go away for |
255 | * synchronize_srcu(). Then release the mutex to avoid |
256 | * deadlock and make sure the user has observed the signal. |
257 | * With the lock released, we cannot maintain any state of the |
258 | * user list. |
259 | */ |
260 | mutex_lock(&qddev->users_mutex); |
261 | while (!list_empty(head: &qddev->users)) { |
262 | usr = list_first_entry(&qddev->users, struct qaic_user, node); |
263 | list_del_init(entry: &usr->node); |
264 | kref_get(kref: &usr->ref_count); |
265 | usr->qddev = NULL; |
266 | mutex_unlock(lock: &qddev->users_mutex); |
267 | synchronize_srcu(ssp: &usr->qddev_lock); |
268 | kref_put(kref: &usr->ref_count, release: free_usr); |
269 | mutex_lock(&qddev->users_mutex); |
270 | } |
271 | mutex_unlock(lock: &qddev->users_mutex); |
272 | } |
273 | |
274 | static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) |
275 | { |
276 | u16 major = -1, minor = -1; |
277 | struct qaic_device *qdev; |
278 | int ret; |
279 | |
280 | /* |
281 | * Invoking this function indicates that the control channel to the |
282 | * device is available. We use that as a signal to indicate that |
283 | * the device side firmware has booted. The device side firmware |
284 | * manages the device resources, so we need to communicate with it |
285 | * via the control channel in order to utilize the device. Therefore |
286 | * we wait until this signal to create the drm dev that userspace will |
287 | * use to control the device, because without the device side firmware, |
288 | * userspace can't do anything useful. |
289 | */ |
290 | |
291 | qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); |
292 | |
293 | dev_set_drvdata(dev: &mhi_dev->dev, data: qdev); |
294 | qdev->cntl_ch = mhi_dev; |
295 | |
296 | ret = qaic_control_open(qdev); |
297 | if (ret) { |
298 | pci_dbg(qdev->pdev, "%s: control_open failed %d\n" , __func__, ret); |
299 | return ret; |
300 | } |
301 | |
302 | qdev->dev_state = QAIC_BOOT; |
303 | ret = get_cntl_version(qdev, NULL, major: &major, minor: &minor); |
304 | if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) { |
305 | pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n" , |
306 | __func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret); |
307 | ret = -EINVAL; |
308 | goto close_control; |
309 | } |
310 | qdev->dev_state = QAIC_ONLINE; |
311 | kobject_uevent(kobj: &(to_accel_kdev(qdev->qddev))->kobj, action: KOBJ_ONLINE); |
312 | |
313 | return ret; |
314 | |
315 | close_control: |
316 | qaic_control_close(qdev); |
317 | return ret; |
318 | } |
319 | |
320 | static void qaic_mhi_remove(struct mhi_device *mhi_dev) |
321 | { |
322 | /* This is redundant since we have already observed the device crash */ |
323 | } |
324 | |
325 | static void qaic_notify_reset(struct qaic_device *qdev) |
326 | { |
327 | int i; |
328 | |
329 | kobject_uevent(kobj: &(to_accel_kdev(qdev->qddev))->kobj, action: KOBJ_OFFLINE); |
330 | qdev->dev_state = QAIC_OFFLINE; |
331 | /* wake up any waiters to avoid waiting for timeouts at sync */ |
332 | wake_all_cntl(qdev); |
333 | for (i = 0; i < qdev->num_dbc; ++i) |
334 | wakeup_dbc(qdev, dbc_id: i); |
335 | synchronize_srcu(ssp: &qdev->dev_lock); |
336 | } |
337 | |
338 | void qaic_dev_reset_clean_local_state(struct qaic_device *qdev) |
339 | { |
340 | int i; |
341 | |
342 | qaic_notify_reset(qdev); |
343 | |
344 | /* start tearing things down */ |
345 | for (i = 0; i < qdev->num_dbc; ++i) |
346 | release_dbc(qdev, dbc_id: i); |
347 | } |
348 | |
349 | static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id) |
350 | { |
351 | struct device *dev = &pdev->dev; |
352 | struct qaic_drm_device *qddev; |
353 | struct qaic_device *qdev; |
354 | struct drm_device *drm; |
355 | int i, ret; |
356 | |
357 | qdev = devm_kzalloc(dev, size: sizeof(*qdev), GFP_KERNEL); |
358 | if (!qdev) |
359 | return NULL; |
360 | |
361 | qdev->dev_state = QAIC_OFFLINE; |
362 | if (id->device == PCI_DEV_AIC100) { |
363 | qdev->num_dbc = 16; |
364 | qdev->dbc = devm_kcalloc(dev, n: qdev->num_dbc, size: sizeof(*qdev->dbc), GFP_KERNEL); |
365 | if (!qdev->dbc) |
366 | return NULL; |
367 | } |
368 | |
369 | qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm); |
370 | if (IS_ERR(ptr: qddev)) |
371 | return NULL; |
372 | |
373 | drm = to_drm(qddev); |
374 | pci_set_drvdata(pdev, data: qdev); |
375 | |
376 | ret = drmm_mutex_init(drm, &qddev->users_mutex); |
377 | if (ret) |
378 | return NULL; |
379 | ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL); |
380 | if (ret) |
381 | return NULL; |
382 | ret = drmm_mutex_init(drm, &qdev->cntl_mutex); |
383 | if (ret) |
384 | return NULL; |
385 | |
386 | qdev->cntl_wq = qaicm_wq_init(dev: drm, fmt: "qaic_cntl" ); |
387 | if (IS_ERR(ptr: qdev->cntl_wq)) |
388 | return NULL; |
389 | qdev->qts_wq = qaicm_wq_init(dev: drm, fmt: "qaic_ts" ); |
390 | if (IS_ERR(ptr: qdev->qts_wq)) |
391 | return NULL; |
392 | |
393 | ret = qaicm_srcu_init(dev: drm, lock: &qdev->dev_lock); |
394 | if (ret) |
395 | return NULL; |
396 | |
397 | qdev->qddev = qddev; |
398 | qdev->pdev = pdev; |
399 | qddev->qdev = qdev; |
400 | |
401 | INIT_LIST_HEAD(list: &qdev->cntl_xfer_list); |
402 | INIT_LIST_HEAD(list: &qddev->users); |
403 | |
404 | for (i = 0; i < qdev->num_dbc; ++i) { |
405 | spin_lock_init(&qdev->dbc[i].xfer_lock); |
406 | qdev->dbc[i].qdev = qdev; |
407 | qdev->dbc[i].id = i; |
408 | INIT_LIST_HEAD(list: &qdev->dbc[i].xfer_list); |
409 | ret = qaicm_srcu_init(dev: drm, lock: &qdev->dbc[i].ch_lock); |
410 | if (ret) |
411 | return NULL; |
412 | init_waitqueue_head(&qdev->dbc[i].dbc_release); |
413 | INIT_LIST_HEAD(list: &qdev->dbc[i].bo_lists); |
414 | } |
415 | |
416 | return qdev; |
417 | } |
418 | |
419 | static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev) |
420 | { |
421 | int bars; |
422 | int ret; |
423 | |
424 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM); |
425 | |
426 | /* make sure the device has the expected BARs */ |
427 | if (bars != (BIT(0) | BIT(2) | BIT(4))) { |
428 | pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n" , |
429 | __func__, bars); |
430 | return -EINVAL; |
431 | } |
432 | |
433 | ret = pcim_enable_device(pdev); |
434 | if (ret) |
435 | return ret; |
436 | |
437 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
438 | if (ret) |
439 | return ret; |
440 | ret = dma_set_max_seg_size(dev: &pdev->dev, UINT_MAX); |
441 | if (ret) |
442 | return ret; |
443 | |
444 | qdev->bar_0 = devm_ioremap_resource(dev: &pdev->dev, res: &pdev->resource[0]); |
445 | if (IS_ERR(ptr: qdev->bar_0)) |
446 | return PTR_ERR(ptr: qdev->bar_0); |
447 | |
448 | qdev->bar_2 = devm_ioremap_resource(dev: &pdev->dev, res: &pdev->resource[2]); |
449 | if (IS_ERR(ptr: qdev->bar_2)) |
450 | return PTR_ERR(ptr: qdev->bar_2); |
451 | |
452 | /* Managed release since we use pcim_enable_device above */ |
453 | pci_set_master(dev: pdev); |
454 | |
455 | return 0; |
456 | } |
457 | |
458 | static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev) |
459 | { |
460 | int mhi_irq; |
461 | int ret; |
462 | int i; |
463 | |
464 | /* Managed release since we use pcim_enable_device */ |
465 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 32, max_vecs: 32, PCI_IRQ_MSI); |
466 | if (ret == -ENOSPC) { |
467 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI); |
468 | if (ret < 0) |
469 | return ret; |
470 | |
471 | /* |
472 | * Operate in one MSI mode. All interrupts will be directed to |
473 | * MSI0; every interrupt will wake up all the interrupt handlers |
474 | * (MHI and DBC[0-15]). Since the interrupt is now shared, it is |
475 | * not disabled during DBC threaded handler, but only one thread |
476 | * will be allowed to run per DBC, so while it can be |
477 | * interrupted, it shouldn't race with itself. |
478 | */ |
479 | qdev->single_msi = true; |
480 | pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n" ); |
481 | } else if (ret < 0) { |
482 | return ret; |
483 | } |
484 | |
485 | mhi_irq = pci_irq_vector(dev: pdev, nr: 0); |
486 | if (mhi_irq < 0) |
487 | return mhi_irq; |
488 | |
489 | for (i = 0; i < qdev->num_dbc; ++i) { |
490 | ret = devm_request_threaded_irq(dev: &pdev->dev, |
491 | irq: pci_irq_vector(dev: pdev, nr: qdev->single_msi ? 0 : i + 1), |
492 | handler: dbc_irq_handler, thread_fn: dbc_irq_threaded_fn, IRQF_SHARED, |
493 | devname: "qaic_dbc" , dev_id: &qdev->dbc[i]); |
494 | if (ret) |
495 | return ret; |
496 | |
497 | if (datapath_polling) { |
498 | qdev->dbc[i].irq = pci_irq_vector(dev: pdev, nr: qdev->single_msi ? 0 : i + 1); |
499 | if (!qdev->single_msi) |
500 | disable_irq_nosync(irq: qdev->dbc[i].irq); |
501 | INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work); |
502 | } |
503 | } |
504 | |
505 | return mhi_irq; |
506 | } |
507 | |
508 | static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
509 | { |
510 | struct qaic_device *qdev; |
511 | int mhi_irq; |
512 | int ret; |
513 | int i; |
514 | |
515 | qdev = create_qdev(pdev, id); |
516 | if (!qdev) |
517 | return -ENOMEM; |
518 | |
519 | ret = init_pci(qdev, pdev); |
520 | if (ret) |
521 | return ret; |
522 | |
523 | for (i = 0; i < qdev->num_dbc; ++i) |
524 | qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i); |
525 | |
526 | mhi_irq = init_msi(qdev, pdev); |
527 | if (mhi_irq < 0) |
528 | return mhi_irq; |
529 | |
530 | ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION); |
531 | if (ret) |
532 | return ret; |
533 | |
534 | qdev->mhi_cntrl = qaic_mhi_register_controller(pci_dev: pdev, mhi_bar: qdev->bar_0, mhi_irq, |
535 | shared_msi: qdev->single_msi); |
536 | if (IS_ERR(ptr: qdev->mhi_cntrl)) { |
537 | ret = PTR_ERR(ptr: qdev->mhi_cntrl); |
538 | qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); |
539 | return ret; |
540 | } |
541 | |
542 | return 0; |
543 | } |
544 | |
545 | static void qaic_pci_remove(struct pci_dev *pdev) |
546 | { |
547 | struct qaic_device *qdev = pci_get_drvdata(pdev); |
548 | |
549 | if (!qdev) |
550 | return; |
551 | |
552 | qaic_dev_reset_clean_local_state(qdev); |
553 | qaic_mhi_free_controller(mhi_cntrl: qdev->mhi_cntrl, link_up); |
554 | qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); |
555 | } |
556 | |
557 | static void qaic_pci_shutdown(struct pci_dev *pdev) |
558 | { |
559 | /* see qaic_exit for what link_up is doing */ |
560 | link_up = true; |
561 | qaic_pci_remove(pdev); |
562 | } |
563 | |
564 | static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error) |
565 | { |
566 | return PCI_ERS_RESULT_NEED_RESET; |
567 | } |
568 | |
569 | static void qaic_pci_reset_prepare(struct pci_dev *pdev) |
570 | { |
571 | struct qaic_device *qdev = pci_get_drvdata(pdev); |
572 | |
573 | qaic_notify_reset(qdev); |
574 | qaic_mhi_start_reset(mhi_cntrl: qdev->mhi_cntrl); |
575 | qaic_dev_reset_clean_local_state(qdev); |
576 | } |
577 | |
578 | static void qaic_pci_reset_done(struct pci_dev *pdev) |
579 | { |
580 | struct qaic_device *qdev = pci_get_drvdata(pdev); |
581 | |
582 | qaic_mhi_reset_done(mhi_cntrl: qdev->mhi_cntrl); |
583 | } |
584 | |
585 | static const struct mhi_device_id qaic_mhi_match_table[] = { |
586 | { .chan = "QAIC_CONTROL" , }, |
587 | {}, |
588 | }; |
589 | |
590 | static struct mhi_driver qaic_mhi_driver = { |
591 | .id_table = qaic_mhi_match_table, |
592 | .remove = qaic_mhi_remove, |
593 | .probe = qaic_mhi_probe, |
594 | .ul_xfer_cb = qaic_mhi_ul_xfer_cb, |
595 | .dl_xfer_cb = qaic_mhi_dl_xfer_cb, |
596 | .driver = { |
597 | .name = "qaic_mhi" , |
598 | }, |
599 | }; |
600 | |
601 | static const struct pci_device_id qaic_ids[] = { |
602 | { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), }, |
603 | { } |
604 | }; |
605 | MODULE_DEVICE_TABLE(pci, qaic_ids); |
606 | |
607 | static const struct pci_error_handlers qaic_pci_err_handler = { |
608 | .error_detected = qaic_pci_error_detected, |
609 | .reset_prepare = qaic_pci_reset_prepare, |
610 | .reset_done = qaic_pci_reset_done, |
611 | }; |
612 | |
613 | static struct pci_driver qaic_pci_driver = { |
614 | .name = QAIC_NAME, |
615 | .id_table = qaic_ids, |
616 | .probe = qaic_pci_probe, |
617 | .remove = qaic_pci_remove, |
618 | .shutdown = qaic_pci_shutdown, |
619 | .err_handler = &qaic_pci_err_handler, |
620 | }; |
621 | |
622 | static int __init qaic_init(void) |
623 | { |
624 | int ret; |
625 | |
626 | ret = pci_register_driver(&qaic_pci_driver); |
627 | if (ret) { |
628 | pr_debug("qaic: pci_register_driver failed %d\n" , ret); |
629 | return ret; |
630 | } |
631 | |
632 | ret = mhi_driver_register(&qaic_mhi_driver); |
633 | if (ret) { |
634 | pr_debug("qaic: mhi_driver_register failed %d\n" , ret); |
635 | goto free_pci; |
636 | } |
637 | |
638 | ret = qaic_timesync_init(); |
639 | if (ret) |
640 | pr_debug("qaic: qaic_timesync_init failed %d\n" , ret); |
641 | |
642 | return 0; |
643 | |
644 | free_pci: |
645 | pci_unregister_driver(dev: &qaic_pci_driver); |
646 | return ret; |
647 | } |
648 | |
649 | static void __exit qaic_exit(void) |
650 | { |
651 | /* |
652 | * We assume that qaic_pci_remove() is called due to a hotplug event |
653 | * which would mean that the link is down, and thus |
654 | * qaic_mhi_free_controller() should not try to access the device during |
655 | * cleanup. |
656 | * We call pci_unregister_driver() below, which also triggers |
657 | * qaic_pci_remove(), but since this is module exit, we expect the link |
658 | * to the device to be up, in which case qaic_mhi_free_controller() |
659 | * should try to access the device during cleanup to put the device in |
660 | * a sane state. |
661 | * For that reason, we set link_up here to let qaic_mhi_free_controller |
662 | * know the expected link state. Since the module is going to be |
663 | * removed at the end of this, we don't need to worry about |
664 | * reinitializing the link_up state after the cleanup is done. |
665 | */ |
666 | link_up = true; |
667 | qaic_timesync_deinit(); |
668 | mhi_driver_unregister(mhi_drv: &qaic_mhi_driver); |
669 | pci_unregister_driver(dev: &qaic_pci_driver); |
670 | } |
671 | |
672 | module_init(qaic_init); |
673 | module_exit(qaic_exit); |
674 | |
675 | MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team" ); |
676 | MODULE_DESCRIPTION(QAIC_DESC " Accel Driver" ); |
677 | MODULE_LICENSE("GPL" ); |
678 | |