1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2020-2023 Intel Corporation |
4 | */ |
5 | |
6 | #ifndef __IVPU_DRV_H__ |
7 | #define __IVPU_DRV_H__ |
8 | |
9 | #include <drm/drm_device.h> |
10 | #include <drm/drm_drv.h> |
11 | #include <drm/drm_managed.h> |
12 | #include <drm/drm_mm.h> |
13 | #include <drm/drm_print.h> |
14 | |
15 | #include <linux/pci.h> |
16 | #include <linux/xarray.h> |
17 | #include <uapi/drm/ivpu_accel.h> |
18 | |
19 | #include "ivpu_mmu_context.h" |
20 | |
21 | #define DRIVER_NAME "intel_vpu" |
22 | #define DRIVER_DESC "Driver for Intel Versatile Processing Unit (VPU)" |
23 | #define DRIVER_DATE "20230117" |
24 | |
25 | #define PCI_DEVICE_ID_MTL 0x7d1d |
26 | #define PCI_DEVICE_ID_ARL 0xad1d |
27 | #define PCI_DEVICE_ID_LNL 0x643e |
28 | |
29 | #define IVPU_HW_37XX 37 |
30 | #define IVPU_HW_40XX 40 |
31 | |
32 | #define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 |
33 | /* SSID 1 is used by the VPU to represent reserved context */ |
34 | #define IVPU_RESERVED_CONTEXT_MMU_SSID 1 |
35 | #define IVPU_USER_CONTEXT_MIN_SSID 2 |
36 | #define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63) |
37 | |
38 | #define IVPU_NUM_ENGINES 2 |
39 | |
40 | #define IVPU_PLATFORM_SILICON 0 |
41 | #define IVPU_PLATFORM_SIMICS 2 |
42 | #define IVPU_PLATFORM_FPGA 3 |
43 | #define IVPU_PLATFORM_INVALID 8 |
44 | |
45 | #define IVPU_DBG_REG BIT(0) |
46 | #define IVPU_DBG_IRQ BIT(1) |
47 | #define IVPU_DBG_MMU BIT(2) |
48 | #define IVPU_DBG_FILE BIT(3) |
49 | #define IVPU_DBG_MISC BIT(4) |
50 | #define IVPU_DBG_FW_BOOT BIT(5) |
51 | #define IVPU_DBG_PM BIT(6) |
52 | #define IVPU_DBG_IPC BIT(7) |
53 | #define IVPU_DBG_BO BIT(8) |
54 | #define IVPU_DBG_JOB BIT(9) |
55 | #define IVPU_DBG_JSM BIT(10) |
56 | #define IVPU_DBG_KREF BIT(11) |
57 | #define IVPU_DBG_RPM BIT(12) |
58 | |
59 | #define ivpu_err(vdev, fmt, ...) \ |
60 | drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) |
61 | |
62 | #define ivpu_err_ratelimited(vdev, fmt, ...) \ |
63 | drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) |
64 | |
65 | #define ivpu_warn(vdev, fmt, ...) \ |
66 | drm_warn(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) |
67 | |
68 | #define ivpu_warn_ratelimited(vdev, fmt, ...) \ |
69 | drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) |
70 | |
71 | #define ivpu_info(vdev, fmt, ...) drm_info(&(vdev)->drm, fmt, ##__VA_ARGS__) |
72 | |
73 | #define ivpu_dbg(vdev, type, fmt, args...) do { \ |
74 | if (unlikely(IVPU_DBG_##type & ivpu_dbg_mask)) \ |
75 | dev_dbg((vdev)->drm.dev, "[%s] " fmt, #type, ##args); \ |
76 | } while (0) |
77 | |
78 | #define IVPU_WA(wa_name) (vdev->wa.wa_name) |
79 | |
80 | #define IVPU_PRINT_WA(wa_name) do { \ |
81 | if (IVPU_WA(wa_name)) \ |
82 | ivpu_dbg(vdev, MISC, "Using WA: " #wa_name "\n"); \ |
83 | } while (0) |
84 | |
85 | struct ivpu_wa_table { |
86 | bool punit_disabled; |
87 | bool clear_runtime_mem; |
88 | bool d3hot_after_power_off; |
89 | bool interrupt_clear_with_0; |
90 | bool disable_clock_relinquish; |
91 | }; |
92 | |
93 | struct ivpu_hw_info; |
94 | struct ivpu_mmu_info; |
95 | struct ivpu_fw_info; |
96 | struct ivpu_ipc_info; |
97 | struct ivpu_pm_info; |
98 | |
99 | struct ivpu_device { |
100 | struct drm_device drm; |
101 | void __iomem *regb; |
102 | void __iomem *regv; |
103 | u32 platform; |
104 | u32 irq; |
105 | |
106 | struct ivpu_wa_table wa; |
107 | struct ivpu_hw_info *hw; |
108 | struct ivpu_mmu_info *mmu; |
109 | struct ivpu_fw_info *fw; |
110 | struct ivpu_ipc_info *ipc; |
111 | struct ivpu_pm_info *pm; |
112 | |
113 | struct ivpu_mmu_context gctx; |
114 | struct ivpu_mmu_context rctx; |
115 | struct xarray context_xa; |
116 | struct xa_limit context_xa_limit; |
117 | |
118 | struct xarray submitted_jobs_xa; |
119 | struct task_struct *job_done_thread; |
120 | |
121 | atomic64_t unique_id_counter; |
122 | |
123 | struct { |
124 | int boot; |
125 | int jsm; |
126 | int tdr; |
127 | int reschedule_suspend; |
128 | int autosuspend; |
129 | } timeout; |
130 | }; |
131 | |
132 | /* |
133 | * file_priv has its own refcount (ref) that allows user space to close the fd |
134 | * without blocking even if VPU is still processing some jobs. |
135 | */ |
136 | struct ivpu_file_priv { |
137 | struct kref ref; |
138 | struct ivpu_device *vdev; |
139 | struct mutex lock; /* Protects cmdq */ |
140 | struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES]; |
141 | struct ivpu_mmu_context ctx; |
142 | u32 priority; |
143 | bool has_mmu_faults; |
144 | }; |
145 | |
146 | extern int ivpu_dbg_mask; |
147 | extern u8 ivpu_pll_min_ratio; |
148 | extern u8 ivpu_pll_max_ratio; |
149 | extern bool ivpu_disable_mmu_cont_pages; |
150 | |
151 | #define IVPU_TEST_MODE_DISABLED 0 |
152 | #define IVPU_TEST_MODE_FW_TEST 1 |
153 | #define IVPU_TEST_MODE_NULL_HW 2 |
154 | extern int ivpu_test_mode; |
155 | |
156 | struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); |
157 | struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id); |
158 | void ivpu_file_priv_put(struct ivpu_file_priv **link); |
159 | |
160 | int ivpu_boot(struct ivpu_device *vdev); |
161 | int ivpu_shutdown(struct ivpu_device *vdev); |
162 | void ivpu_prepare_for_reset(struct ivpu_device *vdev); |
163 | |
164 | static inline u8 ivpu_revision(struct ivpu_device *vdev) |
165 | { |
166 | return to_pci_dev(vdev->drm.dev)->revision; |
167 | } |
168 | |
169 | static inline u16 ivpu_device_id(struct ivpu_device *vdev) |
170 | { |
171 | return to_pci_dev(vdev->drm.dev)->device; |
172 | } |
173 | |
174 | static inline int ivpu_hw_gen(struct ivpu_device *vdev) |
175 | { |
176 | switch (ivpu_device_id(vdev)) { |
177 | case PCI_DEVICE_ID_MTL: |
178 | case PCI_DEVICE_ID_ARL: |
179 | return IVPU_HW_37XX; |
180 | case PCI_DEVICE_ID_LNL: |
181 | return IVPU_HW_40XX; |
182 | default: |
183 | ivpu_err(vdev, "Unknown VPU device\n" ); |
184 | return 0; |
185 | } |
186 | } |
187 | |
188 | static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev) |
189 | { |
190 | return container_of(dev, struct ivpu_device, drm); |
191 | } |
192 | |
193 | static inline u32 ivpu_get_context_count(struct ivpu_device *vdev) |
194 | { |
195 | struct xa_limit ctx_limit = vdev->context_xa_limit; |
196 | |
197 | return (ctx_limit.max - ctx_limit.min + 1); |
198 | } |
199 | |
200 | static inline u32 ivpu_get_platform(struct ivpu_device *vdev) |
201 | { |
202 | WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID); |
203 | return vdev->platform; |
204 | } |
205 | |
206 | static inline bool ivpu_is_silicon(struct ivpu_device *vdev) |
207 | { |
208 | return ivpu_get_platform(vdev) == IVPU_PLATFORM_SILICON; |
209 | } |
210 | |
211 | static inline bool ivpu_is_simics(struct ivpu_device *vdev) |
212 | { |
213 | return ivpu_get_platform(vdev) == IVPU_PLATFORM_SIMICS; |
214 | } |
215 | |
216 | static inline bool ivpu_is_fpga(struct ivpu_device *vdev) |
217 | { |
218 | return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA; |
219 | } |
220 | |
221 | #endif /* __IVPU_DRV_H__ */ |
222 | |