1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright(c) 2020 Intel Corporation. |
4 | */ |
5 | #include <linux/workqueue.h> |
6 | |
7 | #include "gem/i915_gem_context.h" |
8 | |
9 | #include "gt/intel_context.h" |
10 | #include "gt/intel_gt.h" |
11 | |
12 | #include "i915_drv.h" |
13 | |
14 | #include "intel_pxp.h" |
15 | #include "intel_pxp_gsccs.h" |
16 | #include "intel_pxp_irq.h" |
17 | #include "intel_pxp_regs.h" |
18 | #include "intel_pxp_session.h" |
19 | #include "intel_pxp_tee.h" |
20 | #include "intel_pxp_types.h" |
21 | |
22 | /** |
23 | * DOC: PXP |
24 | * |
25 | * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms. |
26 | * It allows execution and flip to display of protected (i.e. encrypted) |
27 | * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig. |
28 | * |
29 | * Objects can opt-in to PXP encryption at creation time via the |
30 | * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be |
31 | * correctly protected they must be used in conjunction with a context created |
32 | * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation |
33 | * of those two uapi flags for details and restrictions. |
34 | * |
35 | * Protected objects are tied to a pxp session; currently we only support one |
36 | * session, which i915 manages and whose index is available in the uapi |
37 | * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting |
38 | * protected objects. |
39 | * The session is invalidated by the HW when certain events occur (e.g. |
40 | * suspend/resume). When this happens, all the objects that were used with the |
41 | * session are marked as invalid and all contexts marked as using protected |
42 | * content are banned. Any further attempt at using them in an execbuf call is |
43 | * rejected, while flips are converted to black frames. |
44 | * |
45 | * Some of the PXP setup operations are performed by the Management Engine, |
46 | * which is handled by the mei driver; communication between i915 and mei is |
47 | * performed via the mei_pxp component module. |
48 | */ |
49 | |
50 | bool intel_pxp_is_supported(const struct intel_pxp *pxp) |
51 | { |
52 | return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp; |
53 | } |
54 | |
55 | bool intel_pxp_is_enabled(const struct intel_pxp *pxp) |
56 | { |
57 | return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce; |
58 | } |
59 | |
60 | bool intel_pxp_is_active(const struct intel_pxp *pxp) |
61 | { |
62 | return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid; |
63 | } |
64 | |
65 | static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable) |
66 | { |
67 | u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) : |
68 | _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES); |
69 | |
70 | intel_uncore_write(uncore: pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val); |
71 | } |
72 | |
73 | static void kcr_pxp_enable(const struct intel_pxp *pxp) |
74 | { |
75 | kcr_pxp_set_status(pxp, enable: true); |
76 | } |
77 | |
78 | static void kcr_pxp_disable(const struct intel_pxp *pxp) |
79 | { |
80 | kcr_pxp_set_status(pxp, enable: false); |
81 | } |
82 | |
83 | static int create_vcs_context(struct intel_pxp *pxp) |
84 | { |
85 | static struct lock_class_key pxp_lock; |
86 | struct intel_gt *gt = pxp->ctrl_gt; |
87 | struct intel_engine_cs *engine; |
88 | struct intel_context *ce; |
89 | int i; |
90 | |
91 | /* |
92 | * Find the first VCS engine present. We're guaranteed there is one |
93 | * if we're in this function due to the check in has_pxp |
94 | */ |
95 | for (i = 0, engine = NULL; !engine; i++) |
96 | engine = gt->engine_class[VIDEO_DECODE_CLASS][i]; |
97 | |
98 | GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS); |
99 | |
100 | ce = intel_engine_create_pinned_context(engine, vm: engine->gt->vm, SZ_4K, |
101 | I915_GEM_HWS_PXP_ADDR, |
102 | key: &pxp_lock, name: "pxp_context" ); |
103 | if (IS_ERR(ptr: ce)) { |
104 | drm_err(>->i915->drm, "failed to create VCS ctx for PXP\n" ); |
105 | return PTR_ERR(ptr: ce); |
106 | } |
107 | |
108 | pxp->ce = ce; |
109 | |
110 | return 0; |
111 | } |
112 | |
113 | static void destroy_vcs_context(struct intel_pxp *pxp) |
114 | { |
115 | if (pxp->ce) |
116 | intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce)); |
117 | } |
118 | |
119 | static void pxp_init_full(struct intel_pxp *pxp) |
120 | { |
121 | struct intel_gt *gt = pxp->ctrl_gt; |
122 | int ret; |
123 | |
124 | /* |
125 | * we'll use the completion to check if there is a termination pending, |
126 | * so we start it as completed and we reinit it when a termination |
127 | * is triggered. |
128 | */ |
129 | init_completion(x: &pxp->termination); |
130 | complete_all(&pxp->termination); |
131 | |
132 | if (pxp->ctrl_gt->type == GT_MEDIA) |
133 | pxp->kcr_base = MTL_KCR_BASE; |
134 | else |
135 | pxp->kcr_base = GEN12_KCR_BASE; |
136 | |
137 | intel_pxp_session_management_init(pxp); |
138 | |
139 | ret = create_vcs_context(pxp); |
140 | if (ret) |
141 | return; |
142 | |
143 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) |
144 | ret = intel_pxp_gsccs_init(pxp); |
145 | else |
146 | ret = intel_pxp_tee_component_init(pxp); |
147 | if (ret) |
148 | goto out_context; |
149 | |
150 | drm_info(>->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n" ); |
151 | |
152 | return; |
153 | |
154 | out_context: |
155 | destroy_vcs_context(pxp); |
156 | } |
157 | |
158 | static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915) |
159 | { |
160 | /* |
161 | * NOTE: Only certain platforms require PXP-tee-backend dependencies |
162 | * for HuC authentication. For now, its limited to DG2. |
163 | */ |
164 | if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) && |
165 | intel_huc_is_loaded_by_gsc(huc: &to_gt(i915)->uc.huc) && intel_uc_uses_huc(uc: &to_gt(i915)->uc)) |
166 | return to_gt(i915); |
167 | |
168 | return NULL; |
169 | } |
170 | |
171 | static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915) |
172 | { |
173 | if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp) |
174 | return NULL; |
175 | |
176 | /* |
177 | * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine |
178 | * on the media GT. NOTE: if we have a media-tile with a GSC-engine, |
179 | * the VDBOX is already present so skip that check. We also have to |
180 | * ensure the GSC and HUC firmware are coming online |
181 | */ |
182 | if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) && |
183 | intel_uc_fw_is_loadable(uc_fw: &i915->media_gt->uc.gsc.fw) && |
184 | intel_uc_fw_is_loadable(uc_fw: &i915->media_gt->uc.huc.fw)) |
185 | return i915->media_gt; |
186 | |
187 | /* |
188 | * Else we rely on mei-pxp module but only on legacy platforms |
189 | * prior to having separate media GTs and has a valid VDBOX. |
190 | */ |
191 | if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915))) |
192 | return to_gt(i915); |
193 | |
194 | return NULL; |
195 | } |
196 | |
197 | int intel_pxp_init(struct drm_i915_private *i915) |
198 | { |
199 | struct intel_gt *gt; |
200 | bool is_full_feature = false; |
201 | |
202 | if (intel_gt_is_wedged(gt: to_gt(i915))) |
203 | return -ENOTCONN; |
204 | |
205 | /* |
206 | * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since |
207 | * we still need it if PXP's backend tee transport is needed. |
208 | */ |
209 | gt = find_gt_for_required_protected_content(i915); |
210 | if (gt) |
211 | is_full_feature = true; |
212 | else |
213 | gt = find_gt_for_required_teelink(i915); |
214 | |
215 | if (!gt) |
216 | return -ENODEV; |
217 | |
218 | /* |
219 | * At this point, we will either enable full featured PXP capabilities |
220 | * including session and object management, or we will init the backend tee |
221 | * channel for internal users such as HuC loading by GSC |
222 | */ |
223 | i915->pxp = kzalloc(size: sizeof(*i915->pxp), GFP_KERNEL); |
224 | if (!i915->pxp) |
225 | return -ENOMEM; |
226 | |
227 | /* init common info used by all feature-mode usages*/ |
228 | i915->pxp->ctrl_gt = gt; |
229 | mutex_init(&i915->pxp->tee_mutex); |
230 | |
231 | /* |
232 | * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL |
233 | * such as DG2, we can skip the init of the full PXP session/object management |
234 | * and just init the tee channel. |
235 | */ |
236 | if (is_full_feature) |
237 | pxp_init_full(pxp: i915->pxp); |
238 | else |
239 | intel_pxp_tee_component_init(pxp: i915->pxp); |
240 | |
241 | return 0; |
242 | } |
243 | |
244 | void intel_pxp_fini(struct drm_i915_private *i915) |
245 | { |
246 | if (!i915->pxp) |
247 | return; |
248 | |
249 | i915->pxp->arb_is_valid = false; |
250 | |
251 | if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0)) |
252 | intel_pxp_gsccs_fini(pxp: i915->pxp); |
253 | else |
254 | intel_pxp_tee_component_fini(pxp: i915->pxp); |
255 | |
256 | destroy_vcs_context(pxp: i915->pxp); |
257 | |
258 | kfree(objp: i915->pxp); |
259 | i915->pxp = NULL; |
260 | } |
261 | |
262 | void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp) |
263 | { |
264 | pxp->arb_is_valid = false; |
265 | reinit_completion(x: &pxp->termination); |
266 | } |
267 | |
268 | static void pxp_queue_termination(struct intel_pxp *pxp) |
269 | { |
270 | struct intel_gt *gt = pxp->ctrl_gt; |
271 | |
272 | /* |
273 | * We want to get the same effect as if we received a termination |
274 | * interrupt, so just pretend that we did. |
275 | */ |
276 | spin_lock_irq(lock: gt->irq_lock); |
277 | intel_pxp_mark_termination_in_progress(pxp); |
278 | pxp->session_events |= PXP_TERMINATION_REQUEST; |
279 | queue_work(wq: system_unbound_wq, work: &pxp->session_work); |
280 | spin_unlock_irq(lock: gt->irq_lock); |
281 | } |
282 | |
283 | static bool pxp_component_bound(struct intel_pxp *pxp) |
284 | { |
285 | bool bound = false; |
286 | |
287 | mutex_lock(&pxp->tee_mutex); |
288 | if (pxp->pxp_component) |
289 | bound = true; |
290 | mutex_unlock(lock: &pxp->tee_mutex); |
291 | |
292 | return bound; |
293 | } |
294 | |
295 | int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp) |
296 | { |
297 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) |
298 | return GSCFW_MAX_ROUND_TRIP_LATENCY_MS; |
299 | else |
300 | return 250; |
301 | } |
302 | |
303 | static int __pxp_global_teardown_final(struct intel_pxp *pxp) |
304 | { |
305 | int timeout; |
306 | |
307 | if (!pxp->arb_is_valid) |
308 | return 0; |
309 | |
310 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini" ); |
311 | /* |
312 | * To ensure synchronous and coherent session teardown completion |
313 | * in response to suspend or shutdown triggers, don't use a worker. |
314 | */ |
315 | intel_pxp_mark_termination_in_progress(pxp); |
316 | intel_pxp_terminate(pxp, post_invalidation_needs_restart: false); |
317 | |
318 | timeout = intel_pxp_get_backend_timeout_ms(pxp); |
319 | |
320 | if (!wait_for_completion_timeout(x: &pxp->termination, timeout: msecs_to_jiffies(m: timeout))) |
321 | return -ETIMEDOUT; |
322 | |
323 | return 0; |
324 | } |
325 | |
326 | static int __pxp_global_teardown_restart(struct intel_pxp *pxp) |
327 | { |
328 | int timeout; |
329 | |
330 | if (pxp->arb_is_valid) |
331 | return 0; |
332 | |
333 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart" ); |
334 | /* |
335 | * The arb-session is currently inactive and we are doing a reset and restart |
336 | * due to a runtime event. Use the worker that was designed for this. |
337 | */ |
338 | pxp_queue_termination(pxp); |
339 | |
340 | timeout = intel_pxp_get_backend_timeout_ms(pxp); |
341 | |
342 | if (!wait_for_completion_timeout(x: &pxp->termination, timeout: msecs_to_jiffies(m: timeout))) { |
343 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)" , |
344 | timeout); |
345 | return -ETIMEDOUT; |
346 | } |
347 | |
348 | return 0; |
349 | } |
350 | |
351 | void intel_pxp_end(struct intel_pxp *pxp) |
352 | { |
353 | struct drm_i915_private *i915 = pxp->ctrl_gt->i915; |
354 | intel_wakeref_t wakeref; |
355 | |
356 | if (!intel_pxp_is_enabled(pxp)) |
357 | return; |
358 | |
359 | wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm); |
360 | |
361 | mutex_lock(&pxp->arb_mutex); |
362 | |
363 | if (__pxp_global_teardown_final(pxp)) |
364 | drm_dbg(&i915->drm, "PXP end timed out\n" ); |
365 | |
366 | mutex_unlock(lock: &pxp->arb_mutex); |
367 | |
368 | intel_pxp_fini_hw(pxp); |
369 | intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: wakeref); |
370 | } |
371 | |
372 | static bool pxp_required_fw_failed(struct intel_pxp *pxp) |
373 | { |
374 | if (__intel_uc_fw_status(uc_fw: &pxp->ctrl_gt->uc.huc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) |
375 | return true; |
376 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0) && |
377 | __intel_uc_fw_status(uc_fw: &pxp->ctrl_gt->uc.gsc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) |
378 | return true; |
379 | |
380 | return false; |
381 | } |
382 | |
383 | static bool pxp_fw_dependencies_completed(struct intel_pxp *pxp) |
384 | { |
385 | if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) |
386 | return intel_pxp_gsccs_is_ready_for_sessions(pxp); |
387 | |
388 | return pxp_component_bound(pxp); |
389 | } |
390 | |
391 | /* |
392 | * this helper is used by both intel_pxp_start and by |
393 | * the GET_PARAM IOCTL that user space calls. Thus, the |
394 | * return values here should match the UAPI spec. |
395 | */ |
396 | int intel_pxp_get_readiness_status(struct intel_pxp *pxp, int timeout_ms) |
397 | { |
398 | if (!intel_pxp_is_enabled(pxp)) |
399 | return -ENODEV; |
400 | |
401 | if (pxp_required_fw_failed(pxp)) |
402 | return -ENODEV; |
403 | |
404 | if (pxp->platform_cfg_is_bad) |
405 | return -ENODEV; |
406 | |
407 | if (timeout_ms) { |
408 | if (wait_for(pxp_fw_dependencies_completed(pxp), timeout_ms)) |
409 | return 2; |
410 | } else if (!pxp_fw_dependencies_completed(pxp)) { |
411 | return 2; |
412 | } |
413 | return 1; |
414 | } |
415 | |
416 | /* |
417 | * the arb session is restarted from the irq work when we receive the |
418 | * termination completion interrupt |
419 | */ |
420 | #define PXP_READINESS_TIMEOUT 250 |
421 | |
422 | int intel_pxp_start(struct intel_pxp *pxp) |
423 | { |
424 | int ret = 0; |
425 | |
426 | ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT); |
427 | if (ret < 0) { |
428 | drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)" , ret); |
429 | return ret; |
430 | } else if (ret > 1) { |
431 | return -EIO; /* per UAPI spec, user may retry later */ |
432 | } |
433 | |
434 | mutex_lock(&pxp->arb_mutex); |
435 | |
436 | ret = __pxp_global_teardown_restart(pxp); |
437 | if (ret) |
438 | goto unlock; |
439 | |
440 | /* make sure the compiler doesn't optimize the double access */ |
441 | barrier(); |
442 | |
443 | if (!pxp->arb_is_valid) |
444 | ret = -EIO; |
445 | |
446 | unlock: |
447 | mutex_unlock(lock: &pxp->arb_mutex); |
448 | return ret; |
449 | } |
450 | |
451 | void intel_pxp_init_hw(struct intel_pxp *pxp) |
452 | { |
453 | kcr_pxp_enable(pxp); |
454 | intel_pxp_irq_enable(pxp); |
455 | } |
456 | |
457 | void intel_pxp_fini_hw(struct intel_pxp *pxp) |
458 | { |
459 | kcr_pxp_disable(pxp); |
460 | intel_pxp_irq_disable(pxp); |
461 | } |
462 | |
463 | int intel_pxp_key_check(struct intel_pxp *pxp, |
464 | struct drm_i915_gem_object *obj, |
465 | bool assign) |
466 | { |
467 | if (!intel_pxp_is_active(pxp)) |
468 | return -ENODEV; |
469 | |
470 | if (!i915_gem_object_is_protected(obj)) |
471 | return -EINVAL; |
472 | |
473 | GEM_BUG_ON(!pxp->key_instance); |
474 | |
475 | /* |
476 | * If this is the first time we're using this object, it's not |
477 | * encrypted yet; it will be encrypted with the current key, so mark it |
478 | * as such. If the object is already encrypted, check instead if the |
479 | * used key is still valid. |
480 | */ |
481 | if (!obj->pxp_key_instance && assign) |
482 | obj->pxp_key_instance = pxp->key_instance; |
483 | |
484 | if (obj->pxp_key_instance != pxp->key_instance) |
485 | return -ENOEXEC; |
486 | |
487 | return 0; |
488 | } |
489 | |
490 | void intel_pxp_invalidate(struct intel_pxp *pxp) |
491 | { |
492 | struct drm_i915_private *i915 = pxp->ctrl_gt->i915; |
493 | struct i915_gem_context *ctx, *cn; |
494 | |
495 | /* ban all contexts marked as protected */ |
496 | spin_lock_irq(lock: &i915->gem.contexts.lock); |
497 | list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { |
498 | struct i915_gem_engines_iter it; |
499 | struct intel_context *ce; |
500 | |
501 | if (!kref_get_unless_zero(kref: &ctx->ref)) |
502 | continue; |
503 | |
504 | if (likely(!i915_gem_context_uses_protected_content(ctx))) { |
505 | i915_gem_context_put(ctx); |
506 | continue; |
507 | } |
508 | |
509 | spin_unlock_irq(lock: &i915->gem.contexts.lock); |
510 | |
511 | /* |
512 | * By the time we get here we are either going to suspend with |
513 | * quiesced execution or the HW keys are already long gone and |
514 | * in this case it is worthless to attempt to close the context |
515 | * and wait for its execution. It will hang the GPU if it has |
516 | * not already. So, as a fast mitigation, we can ban the |
517 | * context as quick as we can. That might race with the |
518 | * execbuffer, but currently this is the best that can be done. |
519 | */ |
520 | for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) |
521 | intel_context_ban(ce, NULL); |
522 | i915_gem_context_unlock_engines(ctx); |
523 | |
524 | /* |
525 | * The context has been banned, no need to keep the wakeref. |
526 | * This is safe from races because the only other place this |
527 | * is touched is context_release and we're holding a ctx ref |
528 | */ |
529 | if (ctx->pxp_wakeref) { |
530 | intel_runtime_pm_put(rpm: &i915->runtime_pm, |
531 | wref: ctx->pxp_wakeref); |
532 | ctx->pxp_wakeref = 0; |
533 | } |
534 | |
535 | spin_lock_irq(lock: &i915->gem.contexts.lock); |
536 | list_safe_reset_next(ctx, cn, link); |
537 | i915_gem_context_put(ctx); |
538 | } |
539 | spin_unlock_irq(lock: &i915->gem.contexts.lock); |
540 | } |
541 | |