1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2020-2024 Intel Corporation |
4 | */ |
5 | |
6 | #include "ivpu_drv.h" |
7 | #include "ivpu_fw.h" |
8 | #include "ivpu_hw_37xx_reg.h" |
9 | #include "ivpu_hw_reg_io.h" |
10 | #include "ivpu_hw.h" |
11 | #include "ivpu_ipc.h" |
12 | #include "ivpu_mmu.h" |
13 | #include "ivpu_pm.h" |
14 | |
15 | #define TILE_FUSE_ENABLE_BOTH 0x0 |
16 | #define TILE_SKU_BOTH 0x3630 |
17 | |
18 | /* Work point configuration values */ |
19 | #define CONFIG_1_TILE 0x01 |
20 | #define CONFIG_2_TILE 0x02 |
21 | #define PLL_RATIO_5_3 0x01 |
22 | #define PLL_RATIO_4_3 0x02 |
23 | #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio)) |
24 | #define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3) |
25 | #define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3) |
26 | #define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3) |
27 | #define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3) |
28 | #define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0) |
29 | |
30 | #define PLL_REF_CLK_FREQ (50 * 1000000) |
31 | #define PLL_SIMULATION_FREQ (10 * 1000000) |
32 | #define PLL_PROF_CLK_FREQ (38400 * 1000) |
33 | #define PLL_DEFAULT_EPP_VALUE 0x80 |
34 | |
35 | #define TIM_SAFE_ENABLE 0xf1d0dead |
36 | #define TIM_WATCHDOG_RESET_VALUE 0xffffffff |
37 | |
38 | #define TIMEOUT_US (150 * USEC_PER_MSEC) |
39 | #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) |
40 | #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) |
41 | #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) |
42 | |
43 | #define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ |
44 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ |
45 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ |
46 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ |
47 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ |
48 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ |
49 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) |
50 | |
51 | #define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ |
52 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ |
53 | (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) |
54 | |
55 | #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) |
56 | |
57 | #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ |
58 | (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR))) |
59 | |
60 | #define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \ |
61 | (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE))) |
62 | |
63 | #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) |
64 | #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) |
65 | |
66 | #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ |
67 | (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ |
68 | (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ |
69 | (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ |
70 | (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ |
71 | (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ |
72 | (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) |
73 | |
74 | static void ivpu_hw_wa_init(struct ivpu_device *vdev) |
75 | { |
76 | vdev->wa.punit_disabled = false; |
77 | vdev->wa.clear_runtime_mem = false; |
78 | |
79 | REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK); |
80 | if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) { |
81 | /* Writing 1s does not clear the interrupt status register */ |
82 | vdev->wa.interrupt_clear_with_0 = true; |
83 | REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0); |
84 | } |
85 | |
86 | IVPU_PRINT_WA(punit_disabled); |
87 | IVPU_PRINT_WA(clear_runtime_mem); |
88 | IVPU_PRINT_WA(interrupt_clear_with_0); |
89 | } |
90 | |
91 | static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) |
92 | { |
93 | vdev->timeout.boot = 1000; |
94 | vdev->timeout.jsm = 500; |
95 | vdev->timeout.tdr = 2000; |
96 | vdev->timeout.reschedule_suspend = 10; |
97 | vdev->timeout.autosuspend = 10; |
98 | vdev->timeout.d0i3_entry_msg = 5; |
99 | } |
100 | |
101 | static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) |
102 | { |
103 | return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); |
104 | } |
105 | |
106 | /* Send KMD initiated workpoint change */ |
107 | static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, |
108 | u16 target_ratio, u16 config) |
109 | { |
110 | int ret; |
111 | u32 val; |
112 | |
113 | ret = ivpu_pll_wait_for_cmd_send(vdev); |
114 | if (ret) { |
115 | ivpu_err(vdev, "Failed to sync before WP request: %d\n" , ret); |
116 | return ret; |
117 | } |
118 | |
119 | val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0); |
120 | val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); |
121 | val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); |
122 | REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val); |
123 | |
124 | val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1); |
125 | val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); |
126 | val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val); |
127 | REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val); |
128 | |
129 | val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2); |
130 | val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); |
131 | REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val); |
132 | |
133 | val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD); |
134 | val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val); |
135 | REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val); |
136 | |
137 | ret = ivpu_pll_wait_for_cmd_send(vdev); |
138 | if (ret) |
139 | ivpu_err(vdev, "Failed to sync after WP request: %d\n" , ret); |
140 | |
141 | return ret; |
142 | } |
143 | |
144 | static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable) |
145 | { |
146 | u32 exp_val = enable ? 0x1 : 0x0; |
147 | |
148 | if (IVPU_WA(punit_disabled)) |
149 | return 0; |
150 | |
151 | return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); |
152 | } |
153 | |
154 | static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) |
155 | { |
156 | if (IVPU_WA(punit_disabled)) |
157 | return 0; |
158 | |
159 | return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); |
160 | } |
161 | |
162 | static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) |
163 | { |
164 | struct ivpu_hw_info *hw = vdev->hw; |
165 | u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio; |
166 | u32 fmin_fuse, fmax_fuse; |
167 | |
168 | fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE); |
169 | fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); |
170 | fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); |
171 | |
172 | fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE); |
173 | fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); |
174 | |
175 | hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); |
176 | hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); |
177 | hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); |
178 | } |
179 | |
180 | static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev) |
181 | { |
182 | return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100); |
183 | } |
184 | |
185 | static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) |
186 | { |
187 | struct ivpu_hw_info *hw = vdev->hw; |
188 | u16 target_ratio; |
189 | u16 config; |
190 | int ret; |
191 | |
192 | if (IVPU_WA(punit_disabled)) { |
193 | ivpu_dbg(vdev, PM, "Skipping PLL request\n" ); |
194 | return 0; |
195 | } |
196 | |
197 | if (enable) { |
198 | target_ratio = hw->pll.pn_ratio; |
199 | config = hw->config; |
200 | } else { |
201 | target_ratio = 0; |
202 | config = 0; |
203 | } |
204 | |
205 | ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n" , |
206 | config, target_ratio); |
207 | |
208 | ret = ivpu_pll_cmd_send(vdev, min_ratio: hw->pll.min_ratio, max_ratio: hw->pll.max_ratio, target_ratio, config); |
209 | if (ret) { |
210 | ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n" , ret); |
211 | return ret; |
212 | } |
213 | |
214 | ret = ivpu_pll_wait_for_lock(vdev, enable); |
215 | if (ret) { |
216 | ivpu_err(vdev, "Timed out waiting for PLL lock\n" ); |
217 | return ret; |
218 | } |
219 | |
220 | if (enable) { |
221 | ret = ivpu_pll_wait_for_status_ready(vdev); |
222 | if (ret) { |
223 | ivpu_err(vdev, "Timed out waiting for PLL ready status\n" ); |
224 | return ret; |
225 | } |
226 | |
227 | ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev); |
228 | if (ret) { |
229 | ivpu_err(vdev, "Timed out waiting for NPU IP bar\n" ); |
230 | return ret; |
231 | } |
232 | } |
233 | |
234 | return 0; |
235 | } |
236 | |
237 | static int ivpu_pll_enable(struct ivpu_device *vdev) |
238 | { |
239 | return ivpu_pll_drive(vdev, enable: true); |
240 | } |
241 | |
242 | static int ivpu_pll_disable(struct ivpu_device *vdev) |
243 | { |
244 | return ivpu_pll_drive(vdev, enable: false); |
245 | } |
246 | |
247 | static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev) |
248 | { |
249 | u32 val = 0; |
250 | |
251 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val); |
252 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val); |
253 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val); |
254 | |
255 | REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val); |
256 | } |
257 | |
258 | static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) |
259 | { |
260 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET); |
261 | |
262 | if (enable) { |
263 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); |
264 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); |
265 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); |
266 | } else { |
267 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); |
268 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); |
269 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); |
270 | } |
271 | |
272 | REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val); |
273 | } |
274 | |
275 | static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) |
276 | { |
277 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET); |
278 | |
279 | if (enable) { |
280 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); |
281 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); |
282 | val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); |
283 | } else { |
284 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); |
285 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); |
286 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); |
287 | } |
288 | |
289 | REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val); |
290 | } |
291 | |
292 | static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) |
293 | { |
294 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); |
295 | |
296 | if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) |
297 | return -EIO; |
298 | |
299 | return 0; |
300 | } |
301 | |
302 | static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) |
303 | { |
304 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN); |
305 | |
306 | if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) |
307 | return -EIO; |
308 | |
309 | return 0; |
310 | } |
311 | |
312 | static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) |
313 | { |
314 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY); |
315 | |
316 | if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) |
317 | return -EIO; |
318 | |
319 | return 0; |
320 | } |
321 | |
322 | static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) |
323 | { |
324 | u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); |
325 | |
326 | if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || |
327 | !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) |
328 | return -EIO; |
329 | |
330 | return 0; |
331 | } |
332 | |
333 | static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) |
334 | { |
335 | u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN); |
336 | |
337 | if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || |
338 | !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) |
339 | return -EIO; |
340 | |
341 | return 0; |
342 | } |
343 | |
344 | static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) |
345 | { |
346 | u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY); |
347 | |
348 | if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || |
349 | !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) |
350 | return -EIO; |
351 | |
352 | return 0; |
353 | } |
354 | |
355 | static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev) |
356 | { |
357 | ivpu_boot_host_ss_rst_clr_assert(vdev); |
358 | |
359 | return ivpu_boot_noc_qreqn_check(vdev, exp_val: 0x0); |
360 | } |
361 | |
362 | static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev) |
363 | { |
364 | REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0); |
365 | } |
366 | |
367 | static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) |
368 | { |
369 | int ret; |
370 | u32 val; |
371 | |
372 | val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); |
373 | if (enable) |
374 | val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); |
375 | else |
376 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); |
377 | REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val); |
378 | |
379 | ret = ivpu_boot_noc_qacceptn_check(vdev, exp_val: enable ? 0x1 : 0x0); |
380 | if (ret) { |
381 | ivpu_err(vdev, "Failed qacceptn check: %d\n" , ret); |
382 | return ret; |
383 | } |
384 | |
385 | ret = ivpu_boot_noc_qdeny_check(vdev, exp_val: 0x0); |
386 | if (ret) |
387 | ivpu_err(vdev, "Failed qdeny check: %d\n" , ret); |
388 | |
389 | return ret; |
390 | } |
391 | |
392 | static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) |
393 | { |
394 | return ivpu_boot_host_ss_axi_drive(vdev, enable: true); |
395 | } |
396 | |
397 | static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) |
398 | { |
399 | int ret; |
400 | u32 val; |
401 | |
402 | val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); |
403 | if (enable) { |
404 | val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); |
405 | val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); |
406 | } else { |
407 | val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); |
408 | val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); |
409 | } |
410 | REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val); |
411 | |
412 | ret = ivpu_boot_top_noc_qacceptn_check(vdev, exp_val: enable ? 0x1 : 0x0); |
413 | if (ret) { |
414 | ivpu_err(vdev, "Failed qacceptn check: %d\n" , ret); |
415 | return ret; |
416 | } |
417 | |
418 | ret = ivpu_boot_top_noc_qdeny_check(vdev, exp_val: 0x0); |
419 | if (ret) |
420 | ivpu_err(vdev, "Failed qdeny check: %d\n" , ret); |
421 | |
422 | return ret; |
423 | } |
424 | |
425 | static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) |
426 | { |
427 | return ivpu_boot_host_ss_top_noc_drive(vdev, enable: true); |
428 | } |
429 | |
430 | static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) |
431 | { |
432 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); |
433 | |
434 | if (enable) |
435 | val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); |
436 | else |
437 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); |
438 | |
439 | REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); |
440 | } |
441 | |
442 | static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) |
443 | { |
444 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0); |
445 | |
446 | if (enable) |
447 | val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); |
448 | else |
449 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); |
450 | |
451 | REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val); |
452 | } |
453 | |
454 | static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) |
455 | { |
456 | return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, |
457 | exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); |
458 | } |
459 | |
460 | static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) |
461 | { |
462 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0); |
463 | |
464 | if (enable) |
465 | val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); |
466 | else |
467 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); |
468 | |
469 | REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val); |
470 | } |
471 | |
472 | static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable) |
473 | { |
474 | u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE); |
475 | |
476 | if (enable) |
477 | val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); |
478 | else |
479 | val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); |
480 | |
481 | REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val); |
482 | } |
483 | |
484 | static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) |
485 | { |
486 | int ret; |
487 | |
488 | ivpu_boot_pwr_island_trickle_drive(vdev, enable: true); |
489 | ivpu_boot_pwr_island_drive(vdev, enable: true); |
490 | |
491 | ret = ivpu_boot_wait_for_pwr_island_status(vdev, exp_val: 0x1); |
492 | if (ret) { |
493 | ivpu_err(vdev, "Timed out waiting for power island status\n" ); |
494 | return ret; |
495 | } |
496 | |
497 | ret = ivpu_boot_top_noc_qrenqn_check(vdev, exp_val: 0x0); |
498 | if (ret) { |
499 | ivpu_err(vdev, "Failed qrenqn check %d\n" , ret); |
500 | return ret; |
501 | } |
502 | |
503 | ivpu_boot_host_ss_clk_drive(vdev, enable: true); |
504 | ivpu_boot_pwr_island_isolation_drive(vdev, enable: false); |
505 | ivpu_boot_host_ss_rst_drive(vdev, enable: true); |
506 | ivpu_boot_dpu_active_drive(vdev, enable: true); |
507 | |
508 | return ret; |
509 | } |
510 | |
511 | static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) |
512 | { |
513 | u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); |
514 | |
515 | val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); |
516 | val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); |
517 | val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); |
518 | |
519 | REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val); |
520 | } |
521 | |
522 | static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) |
523 | { |
524 | u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV); |
525 | |
526 | val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); |
527 | val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); |
528 | val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); |
529 | val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); |
530 | |
531 | REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val); |
532 | } |
533 | |
534 | static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) |
535 | { |
536 | u32 val; |
537 | |
538 | val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC); |
539 | val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val); |
540 | |
541 | val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val); |
542 | REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); |
543 | |
544 | val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); |
545 | REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); |
546 | |
547 | val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); |
548 | REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); |
549 | |
550 | val = vdev->fw->entry_point >> 9; |
551 | REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); |
552 | |
553 | val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val); |
554 | REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); |
555 | |
556 | ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n" , |
557 | vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume" ); |
558 | } |
559 | |
560 | static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) |
561 | { |
562 | int ret; |
563 | u32 val; |
564 | |
565 | ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); |
566 | if (ret) { |
567 | ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n" , ret); |
568 | return ret; |
569 | } |
570 | |
571 | val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL); |
572 | if (enable) |
573 | val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val); |
574 | else |
575 | val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val); |
576 | REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val); |
577 | |
578 | ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); |
579 | if (ret) |
580 | ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n" , ret); |
581 | |
582 | return ret; |
583 | } |
584 | |
585 | static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev) |
586 | { |
587 | struct ivpu_hw_info *hw = vdev->hw; |
588 | |
589 | hw->tile_fuse = TILE_FUSE_ENABLE_BOTH; |
590 | hw->sku = TILE_SKU_BOTH; |
591 | hw->config = WP_CONFIG_2_TILE_4_3_RATIO; |
592 | |
593 | ivpu_pll_init_frequency_ratios(vdev); |
594 | |
595 | ivpu_hw_init_range(range: &hw->ranges.global, start: 0x80000000, SZ_512M); |
596 | ivpu_hw_init_range(range: &hw->ranges.user, start: 0xc0000000, size: 255 * SZ_1M); |
597 | ivpu_hw_init_range(range: &hw->ranges.shave, start: 0x180000000, SZ_2G); |
598 | ivpu_hw_init_range(range: &hw->ranges.dma, start: 0x200000000, SZ_8G); |
599 | |
600 | vdev->platform = IVPU_PLATFORM_SILICON; |
601 | ivpu_hw_wa_init(vdev); |
602 | ivpu_hw_timeouts_init(vdev); |
603 | |
604 | return 0; |
605 | } |
606 | |
607 | static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev) |
608 | { |
609 | int ret; |
610 | u32 val; |
611 | |
612 | if (IVPU_WA(punit_disabled)) |
613 | return 0; |
614 | |
615 | ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); |
616 | if (ret) { |
617 | ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n" ); |
618 | return ret; |
619 | } |
620 | |
621 | val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET); |
622 | val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val); |
623 | REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val); |
624 | |
625 | ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); |
626 | if (ret) |
627 | ivpu_err(vdev, "Timed out waiting for RESET completion\n" ); |
628 | |
629 | return ret; |
630 | } |
631 | |
632 | static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) |
633 | { |
634 | int ret = 0; |
635 | |
636 | if (ivpu_hw_37xx_ip_reset(vdev)) { |
637 | ivpu_err(vdev, "Failed to reset NPU\n" ); |
638 | ret = -EIO; |
639 | } |
640 | |
641 | if (ivpu_pll_disable(vdev)) { |
642 | ivpu_err(vdev, "Failed to disable PLL\n" ); |
643 | ret = -EIO; |
644 | } |
645 | |
646 | return ret; |
647 | } |
648 | |
649 | static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev) |
650 | { |
651 | int ret; |
652 | |
653 | ret = ivpu_boot_d0i3_drive(vdev, enable: true); |
654 | if (ret) |
655 | ivpu_err(vdev, "Failed to enable D0i3: %d\n" , ret); |
656 | |
657 | udelay(5); /* VPU requires 5 us to complete the transition */ |
658 | |
659 | return ret; |
660 | } |
661 | |
662 | static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev) |
663 | { |
664 | int ret; |
665 | |
666 | ret = ivpu_boot_d0i3_drive(vdev, enable: false); |
667 | if (ret) |
668 | ivpu_err(vdev, "Failed to disable D0i3: %d\n" , ret); |
669 | |
670 | return ret; |
671 | } |
672 | |
673 | static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) |
674 | { |
675 | int ret; |
676 | |
677 | /* PLL requests may fail when powering down, so issue WP 0 here */ |
678 | ret = ivpu_pll_disable(vdev); |
679 | if (ret) |
680 | ivpu_warn(vdev, "Failed to disable PLL: %d\n" , ret); |
681 | |
682 | ret = ivpu_hw_37xx_d0i3_disable(vdev); |
683 | if (ret) |
684 | ivpu_warn(vdev, "Failed to disable D0I3: %d\n" , ret); |
685 | |
686 | ret = ivpu_pll_enable(vdev); |
687 | if (ret) { |
688 | ivpu_err(vdev, "Failed to enable PLL: %d\n" , ret); |
689 | return ret; |
690 | } |
691 | |
692 | ret = ivpu_boot_host_ss_configure(vdev); |
693 | if (ret) { |
694 | ivpu_err(vdev, "Failed to configure host SS: %d\n" , ret); |
695 | return ret; |
696 | } |
697 | |
698 | /* |
699 | * The control circuitry for vpu_idle indication logic powers up active. |
700 | * To ensure unnecessary low power mode signal from LRT during bring up, |
701 | * KMD disables the circuitry prior to bringing up the Main Power island. |
702 | */ |
703 | ivpu_boot_vpu_idle_gen_disable(vdev); |
704 | |
705 | ret = ivpu_boot_pwr_domain_enable(vdev); |
706 | if (ret) { |
707 | ivpu_err(vdev, "Failed to enable power domain: %d\n" , ret); |
708 | return ret; |
709 | } |
710 | |
711 | ret = ivpu_boot_host_ss_axi_enable(vdev); |
712 | if (ret) { |
713 | ivpu_err(vdev, "Failed to enable AXI: %d\n" , ret); |
714 | return ret; |
715 | } |
716 | |
717 | ret = ivpu_boot_host_ss_top_noc_enable(vdev); |
718 | if (ret) |
719 | ivpu_err(vdev, "Failed to enable TOP NOC: %d\n" , ret); |
720 | |
721 | return ret; |
722 | } |
723 | |
724 | static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev) |
725 | { |
726 | ivpu_boot_no_snoop_enable(vdev); |
727 | ivpu_boot_tbu_mmu_enable(vdev); |
728 | ivpu_boot_soc_cpu_boot(vdev); |
729 | |
730 | return 0; |
731 | } |
732 | |
733 | static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev) |
734 | { |
735 | u32 val; |
736 | |
737 | if (IVPU_WA(punit_disabled)) |
738 | return true; |
739 | |
740 | val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS); |
741 | return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) && |
742 | REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val); |
743 | } |
744 | |
745 | static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev) |
746 | { |
747 | return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); |
748 | } |
749 | |
750 | static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) |
751 | { |
752 | vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); |
753 | vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT); |
754 | } |
755 | |
756 | static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev) |
757 | { |
758 | int ret = 0; |
759 | |
760 | ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev); |
761 | |
762 | if (!ivpu_hw_37xx_is_idle(vdev)) |
763 | ivpu_warn(vdev, "NPU not idle during power down\n" ); |
764 | |
765 | if (ivpu_hw_37xx_reset(vdev)) { |
766 | ivpu_err(vdev, "Failed to reset NPU\n" ); |
767 | ret = -EIO; |
768 | } |
769 | |
770 | if (ivpu_hw_37xx_d0i3_enable(vdev)) { |
771 | ivpu_err(vdev, "Failed to enter D0I3\n" ); |
772 | ret = -EIO; |
773 | } |
774 | |
775 | return ret; |
776 | } |
777 | |
778 | static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev) |
779 | { |
780 | u32 val; |
781 | |
782 | /* Enable writing and set non-zero WDT value */ |
783 | REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); |
784 | REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); |
785 | |
786 | /* Enable writing and disable watchdog timer */ |
787 | REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); |
788 | REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0); |
789 | |
790 | /* Now clear the timeout interrupt */ |
791 | val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG); |
792 | val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); |
793 | REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val); |
794 | } |
795 | |
796 | static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev) |
797 | { |
798 | return PLL_PROF_CLK_FREQ; |
799 | } |
800 | |
801 | static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable) |
802 | { |
803 | /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */ |
804 | } |
805 | |
806 | static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) |
807 | { |
808 | u32 pll_clock = PLL_REF_CLK_FREQ * ratio; |
809 | u32 cpu_clock; |
810 | |
811 | if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3) |
812 | cpu_clock = pll_clock * 2 / 4; |
813 | else |
814 | cpu_clock = pll_clock * 2 / 5; |
815 | |
816 | return cpu_clock; |
817 | } |
818 | |
819 | /* Register indirect accesses */ |
820 | static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev) |
821 | { |
822 | u32 pll_curr_ratio; |
823 | |
824 | pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL); |
825 | pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK; |
826 | |
827 | if (!ivpu_is_silicon(vdev)) |
828 | return PLL_SIMULATION_FREQ; |
829 | |
830 | return ivpu_hw_37xx_ratio_to_freq(vdev, ratio: pll_curr_ratio); |
831 | } |
832 | |
833 | static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev) |
834 | { |
835 | return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET); |
836 | } |
837 | |
838 | static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev) |
839 | { |
840 | return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE); |
841 | } |
842 | |
843 | static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev) |
844 | { |
845 | return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE); |
846 | } |
847 | |
848 | static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) |
849 | { |
850 | u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0; |
851 | u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET); |
852 | |
853 | REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); |
854 | } |
855 | |
856 | static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) |
857 | { |
858 | return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM); |
859 | } |
860 | |
861 | static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) |
862 | { |
863 | u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT); |
864 | |
865 | return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); |
866 | } |
867 | |
868 | static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) |
869 | { |
870 | REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); |
871 | } |
872 | |
873 | static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev) |
874 | { |
875 | REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); |
876 | } |
877 | |
878 | static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev) |
879 | { |
880 | REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); |
881 | REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); |
882 | REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); |
883 | REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); |
884 | } |
885 | |
886 | static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev) |
887 | { |
888 | REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); |
889 | REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); |
890 | REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull); |
891 | REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0); |
892 | } |
893 | |
894 | static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev) |
895 | { |
896 | ivpu_pm_trigger_recovery(vdev, reason: "WDT NCE IRQ" ); |
897 | } |
898 | |
899 | static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev) |
900 | { |
901 | ivpu_hw_wdt_disable(vdev); |
902 | ivpu_pm_trigger_recovery(vdev, reason: "WDT MSS IRQ" ); |
903 | } |
904 | |
905 | static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev) |
906 | { |
907 | ivpu_pm_trigger_recovery(vdev, reason: "NOC Firewall IRQ" ); |
908 | } |
909 | |
910 | /* Handler for IRQs from VPU core (irqV) */ |
911 | static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) |
912 | { |
913 | u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; |
914 | |
915 | if (!status) |
916 | return false; |
917 | |
918 | REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status); |
919 | |
920 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) |
921 | ivpu_mmu_irq_evtq_handler(vdev); |
922 | |
923 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) |
924 | ivpu_ipc_irq_handler(vdev, wake_thread); |
925 | |
926 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) |
927 | ivpu_dbg(vdev, IRQ, "MMU sync complete\n" ); |
928 | |
929 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) |
930 | ivpu_mmu_irq_gerr_handler(vdev); |
931 | |
932 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) |
933 | ivpu_hw_37xx_irq_wdt_mss_handler(vdev); |
934 | |
935 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) |
936 | ivpu_hw_37xx_irq_wdt_nce_handler(vdev); |
937 | |
938 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) |
939 | ivpu_hw_37xx_irq_noc_firewall_handler(vdev); |
940 | |
941 | return true; |
942 | } |
943 | |
944 | /* Handler for IRQs from Buttress core (irqB) */ |
945 | static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) |
946 | { |
947 | u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; |
948 | bool schedule_recovery = false; |
949 | |
950 | if (!status) |
951 | return false; |
952 | |
953 | if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) |
954 | ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x" , |
955 | REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL)); |
956 | |
957 | if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { |
958 | ivpu_err(vdev, "ATS_ERR irq 0x%016llx" , REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0)); |
959 | REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1); |
960 | schedule_recovery = true; |
961 | } |
962 | |
963 | if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) { |
964 | u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG); |
965 | |
966 | ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx" , |
967 | ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), |
968 | REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), |
969 | REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); |
970 | REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1); |
971 | schedule_recovery = true; |
972 | } |
973 | |
974 | /* This must be done after interrupts are cleared at the source. */ |
975 | if (IVPU_WA(interrupt_clear_with_0)) |
976 | /* |
977 | * Writing 1 triggers an interrupt, so we can't perform read update write. |
978 | * Clear local interrupt status by writing 0 to all bits. |
979 | */ |
980 | REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0); |
981 | else |
982 | REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status); |
983 | |
984 | if (schedule_recovery) |
985 | ivpu_pm_trigger_recovery(vdev, reason: "Buttress IRQ" ); |
986 | |
987 | return true; |
988 | } |
989 | |
990 | static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr) |
991 | { |
992 | struct ivpu_device *vdev = ptr; |
993 | bool irqv_handled, irqb_handled, wake_thread = false; |
994 | |
995 | REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); |
996 | |
997 | irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, wake_thread: &wake_thread); |
998 | irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq); |
999 | |
1000 | /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ |
1001 | REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); |
1002 | |
1003 | if (wake_thread) |
1004 | return IRQ_WAKE_THREAD; |
1005 | if (irqv_handled || irqb_handled) |
1006 | return IRQ_HANDLED; |
1007 | return IRQ_NONE; |
1008 | } |
1009 | |
1010 | static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev) |
1011 | { |
1012 | u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; |
1013 | u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; |
1014 | |
1015 | if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev)) |
1016 | ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ" ); |
1017 | |
1018 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) |
1019 | ivpu_err(vdev, "WDT MSS timeout detected\n" ); |
1020 | |
1021 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) |
1022 | ivpu_err(vdev, "WDT NCE timeout detected\n" ); |
1023 | |
1024 | if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) |
1025 | ivpu_err(vdev, "NOC Firewall irq detected\n" ); |
1026 | |
1027 | if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) |
1028 | ivpu_err(vdev, "ATS_ERR irq 0x%016llx" , REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0)); |
1029 | |
1030 | if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) { |
1031 | u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG); |
1032 | |
1033 | ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx" , |
1034 | ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), |
1035 | REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), |
1036 | REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); |
1037 | } |
1038 | } |
1039 | |
1040 | const struct ivpu_hw_ops ivpu_hw_37xx_ops = { |
1041 | .info_init = ivpu_hw_37xx_info_init, |
1042 | .power_up = ivpu_hw_37xx_power_up, |
1043 | .is_idle = ivpu_hw_37xx_is_idle, |
1044 | .wait_for_idle = ivpu_hw_37xx_wait_for_idle, |
1045 | .power_down = ivpu_hw_37xx_power_down, |
1046 | .reset = ivpu_hw_37xx_reset, |
1047 | .boot_fw = ivpu_hw_37xx_boot_fw, |
1048 | .wdt_disable = ivpu_hw_37xx_wdt_disable, |
1049 | .diagnose_failure = ivpu_hw_37xx_diagnose_failure, |
1050 | .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get, |
1051 | .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive, |
1052 | .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get, |
1053 | .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq, |
1054 | .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get, |
1055 | .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get, |
1056 | .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get, |
1057 | .reg_db_set = ivpu_hw_37xx_reg_db_set, |
1058 | .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get, |
1059 | .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get, |
1060 | .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set, |
1061 | .irq_clear = ivpu_hw_37xx_irq_clear, |
1062 | .irq_enable = ivpu_hw_37xx_irq_enable, |
1063 | .irq_disable = ivpu_hw_37xx_irq_disable, |
1064 | .irq_handler = ivpu_hw_37xx_irq_handler, |
1065 | }; |
1066 | |