1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2020-2023 Intel Corporation |
4 | */ |
5 | |
6 | #include <linux/firmware.h> |
7 | #include <linux/module.h> |
8 | #include <linux/pci.h> |
9 | |
10 | #include <drm/drm_accel.h> |
11 | #include <drm/drm_file.h> |
12 | #include <drm/drm_gem.h> |
13 | #include <drm/drm_ioctl.h> |
14 | #include <drm/drm_prime.h> |
15 | |
16 | #include "vpu_boot_api.h" |
17 | #include "ivpu_debugfs.h" |
18 | #include "ivpu_drv.h" |
19 | #include "ivpu_fw.h" |
20 | #include "ivpu_gem.h" |
21 | #include "ivpu_hw.h" |
22 | #include "ivpu_ipc.h" |
23 | #include "ivpu_job.h" |
24 | #include "ivpu_jsm_msg.h" |
25 | #include "ivpu_mmu.h" |
26 | #include "ivpu_mmu_context.h" |
27 | #include "ivpu_pm.h" |
28 | |
29 | #ifndef DRIVER_VERSION_STR |
30 | #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \ |
31 | __stringify(DRM_IVPU_DRIVER_MINOR) "." |
32 | #endif |
33 | |
34 | static const struct drm_driver driver; |
35 | |
36 | static struct lock_class_key submitted_jobs_xa_lock_class_key; |
37 | |
38 | int ivpu_dbg_mask; |
39 | module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644); |
40 | MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros." ); |
41 | |
42 | int ivpu_test_mode; |
43 | module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); |
44 | MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw" ); |
45 | |
46 | u8 ivpu_pll_min_ratio; |
47 | module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); |
48 | MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency" ); |
49 | |
50 | u8 ivpu_pll_max_ratio = U8_MAX; |
51 | module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); |
52 | MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency" ); |
53 | |
54 | bool ivpu_disable_mmu_cont_pages; |
55 | module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644); |
56 | MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization" ); |
57 | |
58 | struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) |
59 | { |
60 | struct ivpu_device *vdev = file_priv->vdev; |
61 | |
62 | kref_get(kref: &file_priv->ref); |
63 | |
64 | ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n" , |
65 | file_priv->ctx.id, kref_read(&file_priv->ref)); |
66 | |
67 | return file_priv; |
68 | } |
69 | |
70 | struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id) |
71 | { |
72 | struct ivpu_file_priv *file_priv; |
73 | |
74 | xa_lock_irq(&vdev->context_xa); |
75 | file_priv = xa_load(&vdev->context_xa, index: id); |
76 | /* file_priv may still be in context_xa during file_priv_release() */ |
77 | if (file_priv && !kref_get_unless_zero(kref: &file_priv->ref)) |
78 | file_priv = NULL; |
79 | xa_unlock_irq(&vdev->context_xa); |
80 | |
81 | if (file_priv) |
82 | ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n" , |
83 | file_priv->ctx.id, kref_read(&file_priv->ref)); |
84 | |
85 | return file_priv; |
86 | } |
87 | |
88 | static void file_priv_release(struct kref *ref) |
89 | { |
90 | struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); |
91 | struct ivpu_device *vdev = file_priv->vdev; |
92 | |
93 | ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n" , file_priv->ctx.id); |
94 | |
95 | ivpu_cmdq_release_all(file_priv); |
96 | ivpu_bo_remove_all_bos_from_context(ctx: &file_priv->ctx); |
97 | ivpu_jsm_context_release(vdev, host_ssid: file_priv->ctx.id); |
98 | ivpu_mmu_user_context_fini(vdev, ctx: &file_priv->ctx); |
99 | drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv); |
100 | mutex_destroy(lock: &file_priv->lock); |
101 | kfree(objp: file_priv); |
102 | } |
103 | |
104 | void ivpu_file_priv_put(struct ivpu_file_priv **link) |
105 | { |
106 | struct ivpu_file_priv *file_priv = *link; |
107 | struct ivpu_device *vdev = file_priv->vdev; |
108 | |
109 | drm_WARN_ON(&vdev->drm, !file_priv); |
110 | |
111 | ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n" , |
112 | file_priv->ctx.id, kref_read(&file_priv->ref)); |
113 | |
114 | *link = NULL; |
115 | kref_put(kref: &file_priv->ref, release: file_priv_release); |
116 | } |
117 | |
118 | static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args) |
119 | { |
120 | switch (args->index) { |
121 | case DRM_IVPU_CAP_METRIC_STREAMER: |
122 | args->value = 0; |
123 | break; |
124 | case DRM_IVPU_CAP_DMA_MEMORY_RANGE: |
125 | args->value = 1; |
126 | break; |
127 | default: |
128 | return -EINVAL; |
129 | } |
130 | |
131 | return 0; |
132 | } |
133 | |
134 | static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate) |
135 | { |
136 | int ret; |
137 | |
138 | ret = ivpu_rpm_get_if_active(vdev); |
139 | if (ret < 0) |
140 | return ret; |
141 | |
142 | *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0; |
143 | |
144 | if (ret) |
145 | ivpu_rpm_put(vdev); |
146 | |
147 | return 0; |
148 | } |
149 | |
150 | static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
151 | { |
152 | struct ivpu_file_priv *file_priv = file->driver_priv; |
153 | struct ivpu_device *vdev = file_priv->vdev; |
154 | struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); |
155 | struct drm_ivpu_param *args = data; |
156 | int ret = 0; |
157 | int idx; |
158 | |
159 | if (!drm_dev_enter(dev, idx: &idx)) |
160 | return -ENODEV; |
161 | |
162 | switch (args->param) { |
163 | case DRM_IVPU_PARAM_DEVICE_ID: |
164 | args->value = pdev->device; |
165 | break; |
166 | case DRM_IVPU_PARAM_DEVICE_REVISION: |
167 | args->value = pdev->revision; |
168 | break; |
169 | case DRM_IVPU_PARAM_PLATFORM_TYPE: |
170 | args->value = vdev->platform; |
171 | break; |
172 | case DRM_IVPU_PARAM_CORE_CLOCK_RATE: |
173 | ret = ivpu_get_core_clock_rate(vdev, clk_rate: &args->value); |
174 | break; |
175 | case DRM_IVPU_PARAM_NUM_CONTEXTS: |
176 | args->value = ivpu_get_context_count(vdev); |
177 | break; |
178 | case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: |
179 | args->value = vdev->hw->ranges.user.start; |
180 | break; |
181 | case DRM_IVPU_PARAM_CONTEXT_PRIORITY: |
182 | args->value = file_priv->priority; |
183 | break; |
184 | case DRM_IVPU_PARAM_CONTEXT_ID: |
185 | args->value = file_priv->ctx.id; |
186 | break; |
187 | case DRM_IVPU_PARAM_FW_API_VERSION: |
188 | if (args->index < VPU_FW_API_VER_NUM) { |
189 | struct vpu_firmware_header *fw_hdr; |
190 | |
191 | fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data; |
192 | args->value = fw_hdr->api_version[args->index]; |
193 | } else { |
194 | ret = -EINVAL; |
195 | } |
196 | break; |
197 | case DRM_IVPU_PARAM_ENGINE_HEARTBEAT: |
198 | ret = ivpu_jsm_get_heartbeat(vdev, engine: args->index, heartbeat: &args->value); |
199 | break; |
200 | case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID: |
201 | args->value = (u64)atomic64_inc_return(v: &vdev->unique_id_counter); |
202 | break; |
203 | case DRM_IVPU_PARAM_TILE_CONFIG: |
204 | args->value = vdev->hw->tile_fuse; |
205 | break; |
206 | case DRM_IVPU_PARAM_SKU: |
207 | args->value = vdev->hw->sku; |
208 | break; |
209 | case DRM_IVPU_PARAM_CAPABILITIES: |
210 | ret = ivpu_get_capabilities(vdev, args); |
211 | break; |
212 | default: |
213 | ret = -EINVAL; |
214 | break; |
215 | } |
216 | |
217 | drm_dev_exit(idx); |
218 | return ret; |
219 | } |
220 | |
221 | static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
222 | { |
223 | struct ivpu_file_priv *file_priv = file->driver_priv; |
224 | struct drm_ivpu_param *args = data; |
225 | int ret = 0; |
226 | |
227 | switch (args->param) { |
228 | case DRM_IVPU_PARAM_CONTEXT_PRIORITY: |
229 | if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME) |
230 | file_priv->priority = args->value; |
231 | else |
232 | ret = -EINVAL; |
233 | break; |
234 | default: |
235 | ret = -EINVAL; |
236 | } |
237 | |
238 | return ret; |
239 | } |
240 | |
241 | static int ivpu_open(struct drm_device *dev, struct drm_file *file) |
242 | { |
243 | struct ivpu_device *vdev = to_ivpu_device(dev); |
244 | struct ivpu_file_priv *file_priv; |
245 | u32 ctx_id; |
246 | void *old; |
247 | int ret; |
248 | |
249 | ret = xa_alloc_irq(xa: &vdev->context_xa, id: &ctx_id, NULL, limit: vdev->context_xa_limit, GFP_KERNEL); |
250 | if (ret) { |
251 | ivpu_err(vdev, "Failed to allocate context id: %d\n" , ret); |
252 | return ret; |
253 | } |
254 | |
255 | file_priv = kzalloc(size: sizeof(*file_priv), GFP_KERNEL); |
256 | if (!file_priv) { |
257 | ret = -ENOMEM; |
258 | goto err_xa_erase; |
259 | } |
260 | |
261 | file_priv->vdev = vdev; |
262 | file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL; |
263 | kref_init(kref: &file_priv->ref); |
264 | mutex_init(&file_priv->lock); |
265 | |
266 | ret = ivpu_mmu_user_context_init(vdev, ctx: &file_priv->ctx, ctx_id); |
267 | if (ret) |
268 | goto err_mutex_destroy; |
269 | |
270 | old = xa_store_irq(xa: &vdev->context_xa, index: ctx_id, entry: file_priv, GFP_KERNEL); |
271 | if (xa_is_err(entry: old)) { |
272 | ret = xa_err(entry: old); |
273 | ivpu_err(vdev, "Failed to store context %u: %d\n" , ctx_id, ret); |
274 | goto err_ctx_fini; |
275 | } |
276 | |
277 | ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n" , |
278 | ctx_id, current->comm, task_pid_nr(current)); |
279 | |
280 | file->driver_priv = file_priv; |
281 | return 0; |
282 | |
283 | err_ctx_fini: |
284 | ivpu_mmu_user_context_fini(vdev, ctx: &file_priv->ctx); |
285 | err_mutex_destroy: |
286 | mutex_destroy(lock: &file_priv->lock); |
287 | kfree(objp: file_priv); |
288 | err_xa_erase: |
289 | xa_erase_irq(xa: &vdev->context_xa, index: ctx_id); |
290 | return ret; |
291 | } |
292 | |
293 | static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) |
294 | { |
295 | struct ivpu_file_priv *file_priv = file->driver_priv; |
296 | struct ivpu_device *vdev = to_ivpu_device(dev); |
297 | |
298 | ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n" , |
299 | file_priv->ctx.id, current->comm, task_pid_nr(current)); |
300 | |
301 | ivpu_file_priv_put(link: &file_priv); |
302 | } |
303 | |
304 | static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { |
305 | DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0), |
306 | DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0), |
307 | DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0), |
308 | DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), |
309 | DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), |
310 | DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), |
311 | }; |
312 | |
313 | static int ivpu_wait_for_ready(struct ivpu_device *vdev) |
314 | { |
315 | struct ivpu_ipc_consumer cons; |
316 | struct ivpu_ipc_hdr ipc_hdr; |
317 | unsigned long timeout; |
318 | int ret; |
319 | |
320 | if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST) |
321 | return 0; |
322 | |
323 | ivpu_ipc_consumer_add(vdev, cons: &cons, IVPU_IPC_CHAN_BOOT_MSG); |
324 | |
325 | timeout = jiffies + msecs_to_jiffies(m: vdev->timeout.boot); |
326 | while (1) { |
327 | ret = ivpu_ipc_irq_handler(vdev); |
328 | if (ret) |
329 | break; |
330 | ret = ivpu_ipc_receive(vdev, cons: &cons, ipc_buf: &ipc_hdr, NULL, timeout_ms: 0); |
331 | if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) |
332 | break; |
333 | |
334 | cond_resched(); |
335 | } |
336 | |
337 | ivpu_ipc_consumer_del(vdev, cons: &cons); |
338 | |
339 | if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) { |
340 | ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n" , |
341 | ipc_hdr.data_addr); |
342 | return -EIO; |
343 | } |
344 | |
345 | if (!ret) |
346 | ivpu_dbg(vdev, PM, "VPU ready message received successfully\n" ); |
347 | else |
348 | ivpu_hw_diagnose_failure(vdev); |
349 | |
350 | return ret; |
351 | } |
352 | |
353 | /** |
354 | * ivpu_boot() - Start VPU firmware |
355 | * @vdev: VPU device |
356 | * |
357 | * This function is paired with ivpu_shutdown() but it doesn't power up the |
358 | * VPU because power up has to be called very early in ivpu_probe(). |
359 | */ |
360 | int ivpu_boot(struct ivpu_device *vdev) |
361 | { |
362 | int ret; |
363 | |
364 | /* Update boot params located at first 4KB of FW memory */ |
365 | ivpu_fw_boot_params_setup(vdev, bp: vdev->fw->mem->kvaddr); |
366 | |
367 | ret = ivpu_hw_boot_fw(vdev); |
368 | if (ret) { |
369 | ivpu_err(vdev, "Failed to start the firmware: %d\n" , ret); |
370 | return ret; |
371 | } |
372 | |
373 | ret = ivpu_wait_for_ready(vdev); |
374 | if (ret) { |
375 | ivpu_err(vdev, "Failed to boot the firmware: %d\n" , ret); |
376 | return ret; |
377 | } |
378 | |
379 | ivpu_hw_irq_clear(vdev); |
380 | enable_irq(irq: vdev->irq); |
381 | ivpu_hw_irq_enable(vdev); |
382 | ivpu_ipc_enable(vdev); |
383 | return 0; |
384 | } |
385 | |
386 | void ivpu_prepare_for_reset(struct ivpu_device *vdev) |
387 | { |
388 | ivpu_hw_irq_disable(vdev); |
389 | disable_irq(irq: vdev->irq); |
390 | ivpu_ipc_disable(vdev); |
391 | ivpu_mmu_disable(vdev); |
392 | } |
393 | |
394 | int ivpu_shutdown(struct ivpu_device *vdev) |
395 | { |
396 | int ret; |
397 | |
398 | ivpu_prepare_for_reset(vdev); |
399 | |
400 | ret = ivpu_hw_power_down(vdev); |
401 | if (ret) |
402 | ivpu_warn(vdev, "Failed to power down HW: %d\n" , ret); |
403 | |
404 | return ret; |
405 | } |
406 | |
407 | static const struct file_operations ivpu_fops = { |
408 | .owner = THIS_MODULE, |
409 | DRM_ACCEL_FOPS, |
410 | }; |
411 | |
412 | static const struct drm_driver driver = { |
413 | .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, |
414 | |
415 | .open = ivpu_open, |
416 | .postclose = ivpu_postclose, |
417 | .gem_prime_import = ivpu_gem_prime_import, |
418 | |
419 | .ioctls = ivpu_drm_ioctls, |
420 | .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), |
421 | .fops = &ivpu_fops, |
422 | |
423 | .name = DRIVER_NAME, |
424 | .desc = DRIVER_DESC, |
425 | .date = DRIVER_DATE, |
426 | .major = DRM_IVPU_DRIVER_MAJOR, |
427 | .minor = DRM_IVPU_DRIVER_MINOR, |
428 | }; |
429 | |
430 | static int ivpu_irq_init(struct ivpu_device *vdev) |
431 | { |
432 | struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); |
433 | int ret; |
434 | |
435 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); |
436 | if (ret < 0) { |
437 | ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n" , ret); |
438 | return ret; |
439 | } |
440 | |
441 | vdev->irq = pci_irq_vector(dev: pdev, nr: 0); |
442 | |
443 | ret = devm_request_irq(dev: vdev->drm.dev, irq: vdev->irq, handler: vdev->hw->ops->irq_handler, |
444 | IRQF_NO_AUTOEN, DRIVER_NAME, dev_id: vdev); |
445 | if (ret) |
446 | ivpu_err(vdev, "Failed to request an IRQ %d\n" , ret); |
447 | |
448 | return ret; |
449 | } |
450 | |
451 | static int ivpu_pci_init(struct ivpu_device *vdev) |
452 | { |
453 | struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); |
454 | struct resource *bar0 = &pdev->resource[0]; |
455 | struct resource *bar4 = &pdev->resource[4]; |
456 | int ret; |
457 | |
458 | ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n" , bar0); |
459 | vdev->regv = devm_ioremap_resource(dev: vdev->drm.dev, res: bar0); |
460 | if (IS_ERR(ptr: vdev->regv)) { |
461 | ivpu_err(vdev, "Failed to map bar 0: %pe\n" , vdev->regv); |
462 | return PTR_ERR(ptr: vdev->regv); |
463 | } |
464 | |
465 | ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n" , bar4); |
466 | vdev->regb = devm_ioremap_resource(dev: vdev->drm.dev, res: bar4); |
467 | if (IS_ERR(ptr: vdev->regb)) { |
468 | ivpu_err(vdev, "Failed to map bar 4: %pe\n" , vdev->regb); |
469 | return PTR_ERR(ptr: vdev->regb); |
470 | } |
471 | |
472 | ret = dma_set_mask_and_coherent(dev: vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits)); |
473 | if (ret) { |
474 | ivpu_err(vdev, "Failed to set DMA mask: %d\n" , ret); |
475 | return ret; |
476 | } |
477 | dma_set_max_seg_size(dev: vdev->drm.dev, UINT_MAX); |
478 | |
479 | /* Clear any pending errors */ |
480 | pcie_capability_clear_word(dev: pdev, PCI_EXP_DEVSTA, clear: 0x3f); |
481 | |
482 | /* VPU 37XX does not require 10m D3hot delay */ |
483 | if (ivpu_hw_gen(vdev) == IVPU_HW_37XX) |
484 | pdev->d3hot_delay = 0; |
485 | |
486 | ret = pcim_enable_device(pdev); |
487 | if (ret) { |
488 | ivpu_err(vdev, "Failed to enable PCI device: %d\n" , ret); |
489 | return ret; |
490 | } |
491 | |
492 | pci_set_master(dev: pdev); |
493 | |
494 | return 0; |
495 | } |
496 | |
497 | static int ivpu_dev_init(struct ivpu_device *vdev) |
498 | { |
499 | int ret; |
500 | |
501 | vdev->hw = drmm_kzalloc(dev: &vdev->drm, size: sizeof(*vdev->hw), GFP_KERNEL); |
502 | if (!vdev->hw) |
503 | return -ENOMEM; |
504 | |
505 | vdev->mmu = drmm_kzalloc(dev: &vdev->drm, size: sizeof(*vdev->mmu), GFP_KERNEL); |
506 | if (!vdev->mmu) |
507 | return -ENOMEM; |
508 | |
509 | vdev->fw = drmm_kzalloc(dev: &vdev->drm, size: sizeof(*vdev->fw), GFP_KERNEL); |
510 | if (!vdev->fw) |
511 | return -ENOMEM; |
512 | |
513 | vdev->ipc = drmm_kzalloc(dev: &vdev->drm, size: sizeof(*vdev->ipc), GFP_KERNEL); |
514 | if (!vdev->ipc) |
515 | return -ENOMEM; |
516 | |
517 | vdev->pm = drmm_kzalloc(dev: &vdev->drm, size: sizeof(*vdev->pm), GFP_KERNEL); |
518 | if (!vdev->pm) |
519 | return -ENOMEM; |
520 | |
521 | if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) { |
522 | vdev->hw->ops = &ivpu_hw_40xx_ops; |
523 | vdev->hw->dma_bits = 48; |
524 | } else { |
525 | vdev->hw->ops = &ivpu_hw_37xx_ops; |
526 | vdev->hw->dma_bits = 38; |
527 | } |
528 | |
529 | vdev->platform = IVPU_PLATFORM_INVALID; |
530 | vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; |
531 | vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; |
532 | atomic64_set(v: &vdev->unique_id_counter, i: 0); |
533 | xa_init_flags(xa: &vdev->context_xa, XA_FLAGS_ALLOC); |
534 | xa_init_flags(xa: &vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); |
535 | lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); |
536 | |
537 | ret = ivpu_pci_init(vdev); |
538 | if (ret) |
539 | goto err_xa_destroy; |
540 | |
541 | ret = ivpu_irq_init(vdev); |
542 | if (ret) |
543 | goto err_xa_destroy; |
544 | |
545 | /* Init basic HW info based on buttress registers which are accessible before power up */ |
546 | ret = ivpu_hw_info_init(vdev); |
547 | if (ret) |
548 | goto err_xa_destroy; |
549 | |
550 | /* Power up early so the rest of init code can access VPU registers */ |
551 | ret = ivpu_hw_power_up(vdev); |
552 | if (ret) |
553 | goto err_xa_destroy; |
554 | |
555 | ret = ivpu_mmu_global_context_init(vdev); |
556 | if (ret) |
557 | goto err_power_down; |
558 | |
559 | ret = ivpu_mmu_init(vdev); |
560 | if (ret) |
561 | goto err_mmu_gctx_fini; |
562 | |
563 | ret = ivpu_mmu_reserved_context_init(vdev); |
564 | if (ret) |
565 | goto err_mmu_gctx_fini; |
566 | |
567 | ret = ivpu_fw_init(vdev); |
568 | if (ret) |
569 | goto err_mmu_rctx_fini; |
570 | |
571 | ret = ivpu_ipc_init(vdev); |
572 | if (ret) |
573 | goto err_fw_fini; |
574 | |
575 | ivpu_pm_init(vdev); |
576 | |
577 | ret = ivpu_job_done_thread_init(vdev); |
578 | if (ret) |
579 | goto err_ipc_fini; |
580 | |
581 | ret = ivpu_boot(vdev); |
582 | if (ret) |
583 | goto err_job_done_thread_fini; |
584 | |
585 | ivpu_pm_enable(vdev); |
586 | |
587 | return 0; |
588 | |
589 | err_job_done_thread_fini: |
590 | ivpu_job_done_thread_fini(vdev); |
591 | err_ipc_fini: |
592 | ivpu_ipc_fini(vdev); |
593 | err_fw_fini: |
594 | ivpu_fw_fini(vdev); |
595 | err_mmu_rctx_fini: |
596 | ivpu_mmu_reserved_context_fini(vdev); |
597 | err_mmu_gctx_fini: |
598 | ivpu_mmu_global_context_fini(vdev); |
599 | err_power_down: |
600 | ivpu_hw_power_down(vdev); |
601 | if (IVPU_WA(d3hot_after_power_off)) |
602 | pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); |
603 | err_xa_destroy: |
604 | xa_destroy(&vdev->submitted_jobs_xa); |
605 | xa_destroy(&vdev->context_xa); |
606 | return ret; |
607 | } |
608 | |
609 | static void ivpu_dev_fini(struct ivpu_device *vdev) |
610 | { |
611 | ivpu_pm_disable(vdev); |
612 | ivpu_shutdown(vdev); |
613 | if (IVPU_WA(d3hot_after_power_off)) |
614 | pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); |
615 | ivpu_job_done_thread_fini(vdev); |
616 | ivpu_pm_cancel_recovery(vdev); |
617 | |
618 | ivpu_ipc_fini(vdev); |
619 | ivpu_fw_fini(vdev); |
620 | ivpu_mmu_reserved_context_fini(vdev); |
621 | ivpu_mmu_global_context_fini(vdev); |
622 | |
623 | drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); |
624 | xa_destroy(&vdev->submitted_jobs_xa); |
625 | drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa)); |
626 | xa_destroy(&vdev->context_xa); |
627 | } |
628 | |
629 | static struct pci_device_id ivpu_pci_ids[] = { |
630 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, |
631 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) }, |
632 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, |
633 | { } |
634 | }; |
635 | MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); |
636 | |
637 | static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
638 | { |
639 | struct ivpu_device *vdev; |
640 | int ret; |
641 | |
642 | vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm); |
643 | if (IS_ERR(ptr: vdev)) |
644 | return PTR_ERR(ptr: vdev); |
645 | |
646 | pci_set_drvdata(pdev, data: vdev); |
647 | |
648 | ret = ivpu_dev_init(vdev); |
649 | if (ret) |
650 | return ret; |
651 | |
652 | ivpu_debugfs_init(vdev); |
653 | |
654 | ret = drm_dev_register(dev: &vdev->drm, flags: 0); |
655 | if (ret) { |
656 | dev_err(&pdev->dev, "Failed to register DRM device: %d\n" , ret); |
657 | ivpu_dev_fini(vdev); |
658 | } |
659 | |
660 | return ret; |
661 | } |
662 | |
663 | static void ivpu_remove(struct pci_dev *pdev) |
664 | { |
665 | struct ivpu_device *vdev = pci_get_drvdata(pdev); |
666 | |
667 | drm_dev_unplug(dev: &vdev->drm); |
668 | ivpu_dev_fini(vdev); |
669 | } |
670 | |
671 | static const struct dev_pm_ops ivpu_drv_pci_pm = { |
672 | SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb) |
673 | SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL) |
674 | }; |
675 | |
676 | static const struct pci_error_handlers ivpu_drv_pci_err = { |
677 | .reset_prepare = ivpu_pm_reset_prepare_cb, |
678 | .reset_done = ivpu_pm_reset_done_cb, |
679 | }; |
680 | |
681 | static struct pci_driver ivpu_pci_driver = { |
682 | .name = KBUILD_MODNAME, |
683 | .id_table = ivpu_pci_ids, |
684 | .probe = ivpu_probe, |
685 | .remove = ivpu_remove, |
686 | .driver = { |
687 | .pm = &ivpu_drv_pci_pm, |
688 | }, |
689 | .err_handler = &ivpu_drv_pci_err, |
690 | }; |
691 | |
692 | module_pci_driver(ivpu_pci_driver); |
693 | |
694 | MODULE_AUTHOR("Intel Corporation" ); |
695 | MODULE_DESCRIPTION(DRIVER_DESC); |
696 | MODULE_LICENSE("GPL and additional rights" ); |
697 | MODULE_VERSION(DRIVER_VERSION_STR); |
698 | |