1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2016-2019 Intel Corporation |
4 | */ |
5 | |
6 | #include <linux/string_helpers.h> |
7 | |
8 | #include "gt/intel_gt.h" |
9 | #include "gt/intel_gt_print.h" |
10 | #include "gt/intel_reset.h" |
11 | #include "intel_gsc_fw.h" |
12 | #include "intel_gsc_uc.h" |
13 | #include "intel_guc.h" |
14 | #include "intel_guc_ads.h" |
15 | #include "intel_guc_print.h" |
16 | #include "intel_guc_submission.h" |
17 | #include "gt/intel_rps.h" |
18 | #include "intel_uc.h" |
19 | |
20 | #include "i915_drv.h" |
21 | #include "i915_hwmon.h" |
22 | |
23 | static const struct intel_uc_ops uc_ops_off; |
24 | static const struct intel_uc_ops uc_ops_on; |
25 | |
26 | static void uc_expand_default_options(struct intel_uc *uc) |
27 | { |
28 | struct drm_i915_private *i915 = uc_to_gt(uc)->i915; |
29 | |
30 | if (i915->params.enable_guc != -1) |
31 | return; |
32 | |
33 | /* Don't enable GuC/HuC on pre-Gen12 */ |
34 | if (GRAPHICS_VER(i915) < 12) { |
35 | i915->params.enable_guc = 0; |
36 | return; |
37 | } |
38 | |
39 | /* Don't enable GuC/HuC on older Gen12 platforms */ |
40 | if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) { |
41 | i915->params.enable_guc = 0; |
42 | return; |
43 | } |
44 | |
45 | /* Intermediate platforms are HuC authentication only */ |
46 | if (IS_ALDERLAKE_S(i915) && !IS_RAPTORLAKE_S(i915)) { |
47 | i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; |
48 | return; |
49 | } |
50 | |
51 | /* Default: enable HuC authentication and GuC submission */ |
52 | i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION; |
53 | |
54 | /* XEHPSDV and PVC do not use HuC */ |
55 | if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915)) |
56 | i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC; |
57 | } |
58 | |
59 | /* Reset GuC providing us with fresh state for both GuC and HuC. |
60 | */ |
61 | static int __intel_uc_reset_hw(struct intel_uc *uc) |
62 | { |
63 | struct intel_gt *gt = uc_to_gt(uc); |
64 | int ret; |
65 | u32 guc_status; |
66 | |
67 | ret = i915_inject_probe_error(gt->i915, -ENXIO); |
68 | if (ret) |
69 | return ret; |
70 | |
71 | ret = intel_reset_guc(gt); |
72 | if (ret) { |
73 | gt_err(gt, "Failed to reset GuC, ret = %d\n" , ret); |
74 | return ret; |
75 | } |
76 | |
77 | guc_status = intel_uncore_read(uncore: gt->uncore, GUC_STATUS); |
78 | gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET), |
79 | "GuC status: 0x%x, MIA core expected to be in reset\n" , |
80 | guc_status); |
81 | |
82 | return ret; |
83 | } |
84 | |
85 | static void __confirm_options(struct intel_uc *uc) |
86 | { |
87 | struct intel_gt *gt = uc_to_gt(uc); |
88 | struct drm_i915_private *i915 = gt->i915; |
89 | |
90 | gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , |
91 | i915->params.enable_guc, |
92 | str_yes_no(intel_uc_wants_guc(uc)), |
93 | str_yes_no(intel_uc_wants_guc_submission(uc)), |
94 | str_yes_no(intel_uc_wants_huc(uc)), |
95 | str_yes_no(intel_uc_wants_guc_slpc(uc))); |
96 | |
97 | if (i915->params.enable_guc == 0) { |
98 | GEM_BUG_ON(intel_uc_wants_guc(uc)); |
99 | GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); |
100 | GEM_BUG_ON(intel_uc_wants_huc(uc)); |
101 | GEM_BUG_ON(intel_uc_wants_guc_slpc(uc)); |
102 | return; |
103 | } |
104 | |
105 | if (!intel_uc_supports_guc(uc)) |
106 | gt_info(gt, "Incompatible option enable_guc=%d - %s\n" , |
107 | i915->params.enable_guc, "GuC is not supported!" ); |
108 | |
109 | if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && |
110 | !intel_uc_supports_guc_submission(uc)) |
111 | gt_info(gt, "Incompatible option enable_guc=%d - %s\n" , |
112 | i915->params.enable_guc, "GuC submission is N/A" ); |
113 | |
114 | if (i915->params.enable_guc & ~ENABLE_GUC_MASK) |
115 | gt_info(gt, "Incompatible option enable_guc=%d - %s\n" , |
116 | i915->params.enable_guc, "undocumented flag" ); |
117 | } |
118 | |
119 | void intel_uc_init_early(struct intel_uc *uc) |
120 | { |
121 | uc_expand_default_options(uc); |
122 | |
123 | intel_guc_init_early(guc: &uc->guc); |
124 | intel_huc_init_early(huc: &uc->huc); |
125 | intel_gsc_uc_init_early(gsc: &uc->gsc); |
126 | |
127 | __confirm_options(uc); |
128 | |
129 | if (intel_uc_wants_guc(uc)) |
130 | uc->ops = &uc_ops_on; |
131 | else |
132 | uc->ops = &uc_ops_off; |
133 | } |
134 | |
135 | void intel_uc_init_late(struct intel_uc *uc) |
136 | { |
137 | intel_guc_init_late(guc: &uc->guc); |
138 | intel_gsc_uc_load_start(gsc: &uc->gsc); |
139 | } |
140 | |
141 | void intel_uc_driver_late_release(struct intel_uc *uc) |
142 | { |
143 | } |
144 | |
145 | /** |
146 | * intel_uc_init_mmio - setup uC MMIO access |
147 | * @uc: the intel_uc structure |
148 | * |
149 | * Setup minimal state necessary for MMIO accesses later in the |
150 | * initialization sequence. |
151 | */ |
152 | void intel_uc_init_mmio(struct intel_uc *uc) |
153 | { |
154 | intel_guc_init_send_regs(guc: &uc->guc); |
155 | } |
156 | |
157 | static void __uc_capture_load_err_log(struct intel_uc *uc) |
158 | { |
159 | struct intel_guc *guc = &uc->guc; |
160 | |
161 | if (guc->log.vma && !uc->load_err_log) |
162 | uc->load_err_log = i915_gem_object_get(obj: guc->log.vma->obj); |
163 | } |
164 | |
165 | static void __uc_free_load_err_log(struct intel_uc *uc) |
166 | { |
167 | struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log); |
168 | |
169 | if (log) |
170 | i915_gem_object_put(obj: log); |
171 | } |
172 | |
173 | void intel_uc_driver_remove(struct intel_uc *uc) |
174 | { |
175 | intel_uc_fini_hw(uc); |
176 | intel_uc_fini(uc); |
177 | __uc_free_load_err_log(uc); |
178 | } |
179 | |
180 | /* |
181 | * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 |
182 | * register using the same bits used in the CT message payload. Since our |
183 | * communication channel with guc is turned off at this point, we can save the |
184 | * message and handle it after we turn it back on. |
185 | */ |
186 | static void guc_clear_mmio_msg(struct intel_guc *guc) |
187 | { |
188 | intel_uncore_write(uncore: guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), val: 0); |
189 | } |
190 | |
191 | static void guc_get_mmio_msg(struct intel_guc *guc) |
192 | { |
193 | u32 val; |
194 | |
195 | spin_lock_irq(lock: &guc->irq_lock); |
196 | |
197 | val = intel_uncore_read(uncore: guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); |
198 | guc->mmio_msg |= val & guc->msg_enabled_mask; |
199 | |
200 | /* |
201 | * clear all events, including the ones we're not currently servicing, |
202 | * to make sure we don't try to process a stale message if we enable |
203 | * handling of more events later. |
204 | */ |
205 | guc_clear_mmio_msg(guc); |
206 | |
207 | spin_unlock_irq(lock: &guc->irq_lock); |
208 | } |
209 | |
210 | static void guc_handle_mmio_msg(struct intel_guc *guc) |
211 | { |
212 | /* we need communication to be enabled to reply to GuC */ |
213 | GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); |
214 | |
215 | spin_lock_irq(lock: &guc->irq_lock); |
216 | if (guc->mmio_msg) { |
217 | intel_guc_to_host_process_recv_msg(guc, payload: &guc->mmio_msg, len: 1); |
218 | guc->mmio_msg = 0; |
219 | } |
220 | spin_unlock_irq(lock: &guc->irq_lock); |
221 | } |
222 | |
223 | static int guc_enable_communication(struct intel_guc *guc) |
224 | { |
225 | struct intel_gt *gt = guc_to_gt(guc); |
226 | struct drm_i915_private *i915 = gt->i915; |
227 | int ret; |
228 | |
229 | GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); |
230 | |
231 | ret = i915_inject_probe_error(i915, -ENXIO); |
232 | if (ret) |
233 | return ret; |
234 | |
235 | ret = intel_guc_ct_enable(ct: &guc->ct); |
236 | if (ret) |
237 | return ret; |
238 | |
239 | /* check for mmio messages received before/during the CT enable */ |
240 | guc_get_mmio_msg(guc); |
241 | guc_handle_mmio_msg(guc); |
242 | |
243 | intel_guc_enable_interrupts(guc); |
244 | |
245 | /* check for CT messages received before we enabled interrupts */ |
246 | spin_lock_irq(lock: gt->irq_lock); |
247 | intel_guc_ct_event_handler(ct: &guc->ct); |
248 | spin_unlock_irq(lock: gt->irq_lock); |
249 | |
250 | guc_dbg(guc, "communication enabled\n" ); |
251 | |
252 | return 0; |
253 | } |
254 | |
255 | static void guc_disable_communication(struct intel_guc *guc) |
256 | { |
257 | /* |
258 | * Events generated during or after CT disable are logged by guc in |
259 | * via mmio. Make sure the register is clear before disabling CT since |
260 | * all events we cared about have already been processed via CT. |
261 | */ |
262 | guc_clear_mmio_msg(guc); |
263 | |
264 | intel_guc_disable_interrupts(guc); |
265 | |
266 | intel_guc_ct_disable(ct: &guc->ct); |
267 | |
268 | /* |
269 | * Check for messages received during/after the CT disable. We do not |
270 | * expect any messages to have arrived via CT between the interrupt |
271 | * disable and the CT disable because GuC should've been idle until we |
272 | * triggered the CT disable protocol. |
273 | */ |
274 | guc_get_mmio_msg(guc); |
275 | |
276 | guc_dbg(guc, "communication disabled\n" ); |
277 | } |
278 | |
279 | static void __uc_fetch_firmwares(struct intel_uc *uc) |
280 | { |
281 | struct intel_gt *gt = uc_to_gt(uc); |
282 | int err; |
283 | |
284 | GEM_BUG_ON(!intel_uc_wants_guc(uc)); |
285 | |
286 | err = intel_uc_fw_fetch(uc_fw: &uc->guc.fw); |
287 | if (err) { |
288 | /* Make sure we transition out of transient "SELECTED" state */ |
289 | if (intel_uc_wants_huc(uc)) { |
290 | gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling HuC\n" , ERR_PTR(err)); |
291 | intel_uc_fw_change_status(uc_fw: &uc->huc.fw, |
292 | status: INTEL_UC_FIRMWARE_ERROR); |
293 | } |
294 | |
295 | if (intel_uc_wants_gsc_uc(uc)) { |
296 | gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling GSC\n" , ERR_PTR(err)); |
297 | intel_uc_fw_change_status(uc_fw: &uc->gsc.fw, |
298 | status: INTEL_UC_FIRMWARE_ERROR); |
299 | } |
300 | |
301 | return; |
302 | } |
303 | |
304 | if (intel_uc_wants_huc(uc)) |
305 | intel_uc_fw_fetch(uc_fw: &uc->huc.fw); |
306 | |
307 | if (intel_uc_wants_gsc_uc(uc)) |
308 | intel_uc_fw_fetch(uc_fw: &uc->gsc.fw); |
309 | } |
310 | |
311 | static void __uc_cleanup_firmwares(struct intel_uc *uc) |
312 | { |
313 | intel_uc_fw_cleanup_fetch(uc_fw: &uc->gsc.fw); |
314 | intel_uc_fw_cleanup_fetch(uc_fw: &uc->huc.fw); |
315 | intel_uc_fw_cleanup_fetch(uc_fw: &uc->guc.fw); |
316 | } |
317 | |
318 | static int __uc_init(struct intel_uc *uc) |
319 | { |
320 | struct intel_guc *guc = &uc->guc; |
321 | struct intel_huc *huc = &uc->huc; |
322 | int ret; |
323 | |
324 | GEM_BUG_ON(!intel_uc_wants_guc(uc)); |
325 | |
326 | if (!intel_uc_uses_guc(uc)) |
327 | return 0; |
328 | |
329 | if (i915_inject_probe_failure(uc_to_gt(uc)->i915)) |
330 | return -ENOMEM; |
331 | |
332 | ret = intel_guc_init(guc); |
333 | if (ret) |
334 | return ret; |
335 | |
336 | if (intel_uc_uses_huc(uc)) |
337 | intel_huc_init(huc); |
338 | |
339 | if (intel_uc_uses_gsc_uc(uc)) |
340 | intel_gsc_uc_init(gsc: &uc->gsc); |
341 | |
342 | return 0; |
343 | } |
344 | |
345 | static void __uc_fini(struct intel_uc *uc) |
346 | { |
347 | intel_gsc_uc_fini(gsc: &uc->gsc); |
348 | intel_huc_fini(huc: &uc->huc); |
349 | intel_guc_fini(guc: &uc->guc); |
350 | } |
351 | |
352 | static int __uc_sanitize(struct intel_uc *uc) |
353 | { |
354 | struct intel_guc *guc = &uc->guc; |
355 | struct intel_huc *huc = &uc->huc; |
356 | |
357 | GEM_BUG_ON(!intel_uc_supports_guc(uc)); |
358 | |
359 | intel_huc_sanitize(huc); |
360 | intel_guc_sanitize(guc); |
361 | |
362 | return __intel_uc_reset_hw(uc); |
363 | } |
364 | |
365 | /* Initialize and verify the uC regs related to uC positioning in WOPCM */ |
366 | static int uc_init_wopcm(struct intel_uc *uc) |
367 | { |
368 | struct intel_gt *gt = uc_to_gt(uc); |
369 | struct intel_uncore *uncore = gt->uncore; |
370 | u32 base = intel_wopcm_guc_base(wopcm: >->wopcm); |
371 | u32 size = intel_wopcm_guc_size(wopcm: >->wopcm); |
372 | u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; |
373 | u32 mask; |
374 | int err; |
375 | |
376 | if (unlikely(!base || !size)) { |
377 | gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n" ); |
378 | return -E2BIG; |
379 | } |
380 | |
381 | GEM_BUG_ON(!intel_uc_supports_guc(uc)); |
382 | GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); |
383 | GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); |
384 | GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); |
385 | GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); |
386 | |
387 | err = i915_inject_probe_error(gt->i915, -ENXIO); |
388 | if (err) |
389 | return err; |
390 | |
391 | mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; |
392 | err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, val: size, mask, |
393 | expected_val: size | GUC_WOPCM_SIZE_LOCKED); |
394 | if (err) |
395 | goto err_out; |
396 | |
397 | mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; |
398 | err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET, |
399 | val: base | huc_agent, mask, |
400 | expected_val: base | huc_agent | |
401 | GUC_WOPCM_OFFSET_VALID); |
402 | if (err) |
403 | goto err_out; |
404 | |
405 | return 0; |
406 | |
407 | err_out: |
408 | gt_probe_error(gt, "Failed to init uC WOPCM registers!\n" ); |
409 | gt_probe_error(gt, "%s(%#x)=%#x\n" , "DMA_GUC_WOPCM_OFFSET" , |
410 | i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET), |
411 | intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); |
412 | gt_probe_error(gt, "%s(%#x)=%#x\n" , "GUC_WOPCM_SIZE" , |
413 | i915_mmio_reg_offset(GUC_WOPCM_SIZE), |
414 | intel_uncore_read(uncore, GUC_WOPCM_SIZE)); |
415 | |
416 | return err; |
417 | } |
418 | |
419 | static bool uc_is_wopcm_locked(struct intel_uc *uc) |
420 | { |
421 | struct intel_gt *gt = uc_to_gt(uc); |
422 | struct intel_uncore *uncore = gt->uncore; |
423 | |
424 | return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) || |
425 | (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID); |
426 | } |
427 | |
428 | static int __uc_check_hw(struct intel_uc *uc) |
429 | { |
430 | if (uc->fw_table_invalid) |
431 | return -EIO; |
432 | |
433 | if (!intel_uc_supports_guc(uc)) |
434 | return 0; |
435 | |
436 | /* |
437 | * We can silently continue without GuC only if it was never enabled |
438 | * before on this system after reboot, otherwise we risk GPU hangs. |
439 | * To check if GuC was loaded before we look at WOPCM registers. |
440 | */ |
441 | if (uc_is_wopcm_locked(uc)) |
442 | return -EIO; |
443 | |
444 | return 0; |
445 | } |
446 | |
447 | static void print_fw_ver(struct intel_gt *gt, struct intel_uc_fw *fw) |
448 | { |
449 | gt_info(gt, "%s firmware %s version %u.%u.%u\n" , |
450 | intel_uc_fw_type_repr(fw->type), fw->file_selected.path, |
451 | fw->file_selected.ver.major, |
452 | fw->file_selected.ver.minor, |
453 | fw->file_selected.ver.patch); |
454 | } |
455 | |
456 | static int __uc_init_hw(struct intel_uc *uc) |
457 | { |
458 | struct intel_gt *gt = uc_to_gt(uc); |
459 | struct drm_i915_private *i915 = gt->i915; |
460 | struct intel_guc *guc = &uc->guc; |
461 | struct intel_huc *huc = &uc->huc; |
462 | int ret, attempts; |
463 | bool pl1en = false; |
464 | |
465 | GEM_BUG_ON(!intel_uc_supports_guc(uc)); |
466 | GEM_BUG_ON(!intel_uc_wants_guc(uc)); |
467 | |
468 | print_fw_ver(gt, fw: &guc->fw); |
469 | |
470 | if (intel_uc_uses_huc(uc)) |
471 | print_fw_ver(gt, fw: &huc->fw); |
472 | |
473 | if (!intel_uc_fw_is_loadable(uc_fw: &guc->fw)) { |
474 | ret = __uc_check_hw(uc) || |
475 | intel_uc_fw_is_overridden(uc_fw: &guc->fw) || |
476 | intel_uc_wants_guc_submission(uc) ? |
477 | intel_uc_fw_status_to_error(status: guc->fw.status) : 0; |
478 | goto err_out; |
479 | } |
480 | |
481 | ret = uc_init_wopcm(uc); |
482 | if (ret) |
483 | goto err_out; |
484 | |
485 | intel_guc_reset_interrupts(guc); |
486 | |
487 | /* WaEnableuKernelHeaderValidFix:skl */ |
488 | /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ |
489 | if (GRAPHICS_VER(i915) == 9) |
490 | attempts = 3; |
491 | else |
492 | attempts = 1; |
493 | |
494 | /* Disable a potentially low PL1 power limit to allow freq to be raised */ |
495 | i915_hwmon_power_max_disable(i915: gt->i915, old: &pl1en); |
496 | |
497 | intel_rps_raise_unslice(rps: &uc_to_gt(uc)->rps); |
498 | |
499 | while (attempts--) { |
500 | /* |
501 | * Always reset the GuC just before (re)loading, so |
502 | * that the state and timing are fairly predictable |
503 | */ |
504 | ret = __uc_sanitize(uc); |
505 | if (ret) |
506 | goto err_rps; |
507 | |
508 | intel_huc_fw_upload(huc); |
509 | intel_guc_ads_reset(guc); |
510 | intel_guc_write_params(guc); |
511 | ret = intel_guc_fw_upload(guc); |
512 | if (ret == 0) |
513 | break; |
514 | |
515 | gt_dbg(gt, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n" , |
516 | ERR_PTR(ret), attempts); |
517 | } |
518 | |
519 | /* Did we succeded or run out of retries? */ |
520 | if (ret) |
521 | goto err_log_capture; |
522 | |
523 | ret = guc_enable_communication(guc); |
524 | if (ret) |
525 | goto err_log_capture; |
526 | |
527 | /* |
528 | * GSC-loaded HuC is authenticated by the GSC, so we don't need to |
529 | * trigger the auth here. However, given that the HuC loaded this way |
530 | * survive GT reset, we still need to update our SW bookkeeping to make |
531 | * sure it reflects the correct HW status. |
532 | */ |
533 | if (intel_huc_is_loaded_by_gsc(huc)) |
534 | intel_huc_update_auth_status(huc); |
535 | else |
536 | intel_huc_auth(huc, type: INTEL_HUC_AUTH_BY_GUC); |
537 | |
538 | if (intel_uc_uses_guc_submission(uc)) { |
539 | ret = intel_guc_submission_enable(guc); |
540 | if (ret) |
541 | goto err_log_capture; |
542 | } |
543 | |
544 | if (intel_uc_uses_guc_slpc(uc)) { |
545 | ret = intel_guc_slpc_enable(slpc: &guc->slpc); |
546 | if (ret) |
547 | goto err_submission; |
548 | } else { |
549 | /* Restore GT back to RPn for non-SLPC path */ |
550 | intel_rps_lower_unslice(rps: &uc_to_gt(uc)->rps); |
551 | } |
552 | |
553 | i915_hwmon_power_max_restore(i915: gt->i915, old: pl1en); |
554 | |
555 | guc_info(guc, "submission %s\n" , str_enabled_disabled(intel_uc_uses_guc_submission(uc))); |
556 | guc_info(guc, "SLPC %s\n" , str_enabled_disabled(intel_uc_uses_guc_slpc(uc))); |
557 | |
558 | return 0; |
559 | |
560 | /* |
561 | * We've failed to load the firmware :( |
562 | */ |
563 | err_submission: |
564 | intel_guc_submission_disable(guc); |
565 | err_log_capture: |
566 | __uc_capture_load_err_log(uc); |
567 | err_rps: |
568 | /* Return GT back to RPn */ |
569 | intel_rps_lower_unslice(rps: &uc_to_gt(uc)->rps); |
570 | |
571 | i915_hwmon_power_max_restore(i915: gt->i915, old: pl1en); |
572 | err_out: |
573 | __uc_sanitize(uc); |
574 | |
575 | if (!ret) { |
576 | gt_notice(gt, "GuC is uninitialized\n" ); |
577 | /* We want to run without GuC submission */ |
578 | return 0; |
579 | } |
580 | |
581 | gt_probe_error(gt, "GuC initialization failed %pe\n" , ERR_PTR(ret)); |
582 | |
583 | /* We want to keep KMS alive */ |
584 | return -EIO; |
585 | } |
586 | |
587 | static void __uc_fini_hw(struct intel_uc *uc) |
588 | { |
589 | struct intel_guc *guc = &uc->guc; |
590 | |
591 | if (!intel_guc_is_fw_running(guc)) |
592 | return; |
593 | |
594 | if (intel_uc_uses_guc_submission(uc)) |
595 | intel_guc_submission_disable(guc); |
596 | |
597 | __uc_sanitize(uc); |
598 | } |
599 | |
600 | /** |
601 | * intel_uc_reset_prepare - Prepare for reset |
602 | * @uc: the intel_uc structure |
603 | * |
604 | * Preparing for full gpu reset. |
605 | */ |
606 | void intel_uc_reset_prepare(struct intel_uc *uc) |
607 | { |
608 | struct intel_guc *guc = &uc->guc; |
609 | |
610 | uc->reset_in_progress = true; |
611 | |
612 | /* Nothing to do if GuC isn't supported */ |
613 | if (!intel_uc_supports_guc(uc)) |
614 | return; |
615 | |
616 | /* Firmware expected to be running when this function is called */ |
617 | if (!intel_guc_is_ready(guc)) |
618 | goto sanitize; |
619 | |
620 | if (intel_uc_uses_guc_submission(uc)) |
621 | intel_guc_submission_reset_prepare(guc); |
622 | |
623 | sanitize: |
624 | __uc_sanitize(uc); |
625 | } |
626 | |
627 | void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled) |
628 | { |
629 | struct intel_guc *guc = &uc->guc; |
630 | |
631 | /* Firmware can not be running when this function is called */ |
632 | if (intel_uc_uses_guc_submission(uc)) |
633 | intel_guc_submission_reset(guc, stalled); |
634 | } |
635 | |
636 | void intel_uc_reset_finish(struct intel_uc *uc) |
637 | { |
638 | struct intel_guc *guc = &uc->guc; |
639 | |
640 | /* |
641 | * NB: The wedge code path results in prepare -> prepare -> finish -> finish. |
642 | * So this function is sometimes called with the in-progress flag not set. |
643 | */ |
644 | uc->reset_in_progress = false; |
645 | |
646 | /* Firmware expected to be running when this function is called */ |
647 | if (intel_uc_uses_guc_submission(uc)) |
648 | intel_guc_submission_reset_finish(guc); |
649 | } |
650 | |
651 | void intel_uc_cancel_requests(struct intel_uc *uc) |
652 | { |
653 | struct intel_guc *guc = &uc->guc; |
654 | |
655 | /* Firmware can not be running when this function is called */ |
656 | if (intel_uc_uses_guc_submission(uc)) |
657 | intel_guc_submission_cancel_requests(guc); |
658 | } |
659 | |
660 | void intel_uc_runtime_suspend(struct intel_uc *uc) |
661 | { |
662 | struct intel_guc *guc = &uc->guc; |
663 | |
664 | if (!intel_guc_is_ready(guc)) { |
665 | guc->interrupts.enabled = false; |
666 | return; |
667 | } |
668 | |
669 | /* |
670 | * Wait for any outstanding CTB before tearing down communication /w the |
671 | * GuC. |
672 | */ |
673 | #define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5) |
674 | intel_guc_wait_for_pending_msg(guc, wait_var: &guc->outstanding_submission_g2h, |
675 | interruptible: false, OUTSTANDING_CTB_TIMEOUT_PERIOD); |
676 | GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); |
677 | |
678 | guc_disable_communication(guc); |
679 | } |
680 | |
681 | void intel_uc_suspend(struct intel_uc *uc) |
682 | { |
683 | struct intel_guc *guc = &uc->guc; |
684 | intel_wakeref_t wakeref; |
685 | int err; |
686 | |
687 | /* flush the GSC worker */ |
688 | intel_gsc_uc_flush_work(gsc: &uc->gsc); |
689 | |
690 | wake_up_all_tlb_invalidate(guc); |
691 | |
692 | if (!intel_guc_is_ready(guc)) { |
693 | guc->interrupts.enabled = false; |
694 | return; |
695 | } |
696 | |
697 | intel_guc_submission_flush_work(guc); |
698 | |
699 | with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { |
700 | err = intel_guc_suspend(guc); |
701 | if (err) |
702 | guc_dbg(guc, "Failed to suspend, %pe" , ERR_PTR(err)); |
703 | } |
704 | } |
705 | |
706 | static void __uc_resume_mappings(struct intel_uc *uc) |
707 | { |
708 | intel_uc_fw_resume_mapping(uc_fw: &uc->guc.fw); |
709 | intel_uc_fw_resume_mapping(uc_fw: &uc->huc.fw); |
710 | } |
711 | |
712 | static int __uc_resume(struct intel_uc *uc, bool enable_communication) |
713 | { |
714 | struct intel_guc *guc = &uc->guc; |
715 | struct intel_gt *gt = guc_to_gt(guc); |
716 | int err; |
717 | |
718 | if (!intel_guc_is_fw_running(guc)) |
719 | return 0; |
720 | |
721 | /* Make sure we enable communication if and only if it's disabled */ |
722 | GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); |
723 | |
724 | if (enable_communication) |
725 | guc_enable_communication(guc); |
726 | |
727 | /* If we are only resuming GuC communication but not reloading |
728 | * GuC, we need to ensure the ARAT timer interrupt is enabled |
729 | * again. In case of GuC reload, it is enabled during SLPC enable. |
730 | */ |
731 | if (enable_communication && intel_uc_uses_guc_slpc(uc)) |
732 | intel_guc_pm_intrmsk_enable(gt); |
733 | |
734 | err = intel_guc_resume(guc); |
735 | if (err) { |
736 | guc_dbg(guc, "Failed to resume, %pe" , ERR_PTR(err)); |
737 | return err; |
738 | } |
739 | |
740 | intel_gsc_uc_resume(gsc: &uc->gsc); |
741 | |
742 | if (intel_guc_tlb_invalidation_is_available(guc)) { |
743 | intel_guc_invalidate_tlb_engines(guc); |
744 | intel_guc_invalidate_tlb_guc(guc); |
745 | } |
746 | |
747 | return 0; |
748 | } |
749 | |
750 | int intel_uc_resume(struct intel_uc *uc) |
751 | { |
752 | /* |
753 | * When coming out of S3/S4 we sanitize and re-init the HW, so |
754 | * communication is already re-enabled at this point. |
755 | */ |
756 | return __uc_resume(uc, enable_communication: false); |
757 | } |
758 | |
759 | int intel_uc_runtime_resume(struct intel_uc *uc) |
760 | { |
761 | /* |
762 | * During runtime resume we don't sanitize, so we need to re-init |
763 | * communication as well. |
764 | */ |
765 | return __uc_resume(uc, enable_communication: true); |
766 | } |
767 | |
768 | static const struct intel_uc_ops uc_ops_off = { |
769 | .init_hw = __uc_check_hw, |
770 | .fini = __uc_fini, /* to clean-up the init_early initialization */ |
771 | }; |
772 | |
773 | static const struct intel_uc_ops uc_ops_on = { |
774 | .sanitize = __uc_sanitize, |
775 | |
776 | .init_fw = __uc_fetch_firmwares, |
777 | .fini_fw = __uc_cleanup_firmwares, |
778 | |
779 | .init = __uc_init, |
780 | .fini = __uc_fini, |
781 | |
782 | .init_hw = __uc_init_hw, |
783 | .fini_hw = __uc_fini_hw, |
784 | |
785 | .resume_mappings = __uc_resume_mappings, |
786 | }; |
787 | |