1 | /* SPDX-License-Identifier: MIT */ |
2 | /* |
3 | * Copyright © 2022 Intel Corporation |
4 | */ |
5 | |
6 | #ifndef __INTEL_GT_MCR__ |
7 | #define __INTEL_GT_MCR__ |
8 | |
9 | #include "intel_gt_types.h" |
10 | |
11 | void intel_gt_mcr_init(struct intel_gt *gt); |
12 | void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags); |
13 | void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags); |
14 | void intel_gt_mcr_lock_sanitize(struct intel_gt *gt); |
15 | |
16 | u32 intel_gt_mcr_read(struct intel_gt *gt, |
17 | i915_mcr_reg_t reg, |
18 | int group, int instance); |
19 | u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg); |
20 | u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg); |
21 | |
22 | void intel_gt_mcr_unicast_write(struct intel_gt *gt, |
23 | i915_mcr_reg_t reg, u32 value, |
24 | int group, int instance); |
25 | void intel_gt_mcr_multicast_write(struct intel_gt *gt, |
26 | i915_mcr_reg_t reg, u32 value); |
27 | void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, |
28 | i915_mcr_reg_t reg, u32 value); |
29 | |
30 | u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg, |
31 | u32 clear, u32 set); |
32 | |
33 | void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt, |
34 | i915_mcr_reg_t reg, |
35 | u8 *group, u8 *instance); |
36 | |
37 | void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt, |
38 | bool dump_table); |
39 | |
40 | void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss, |
41 | unsigned int *group, unsigned int *instance); |
42 | |
43 | int intel_gt_mcr_wait_for_reg(struct intel_gt *gt, |
44 | i915_mcr_reg_t reg, |
45 | u32 mask, |
46 | u32 value, |
47 | unsigned int fast_timeout_us, |
48 | unsigned int slow_timeout_ms); |
49 | |
50 | /* |
51 | * Helper for for_each_ss_steering loop. On pre-Xe_HP platforms, subslice |
52 | * presence is determined by using the group/instance as direct lookups in the |
53 | * slice/subslice topology. On Xe_HP and beyond, the steering is unrelated to |
54 | * the topology, so we lookup the DSS ID directly in "slice 0." |
55 | */ |
56 | #define _HAS_SS(ss_, gt_, group_, instance_) ( \ |
57 | GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \ |
58 | intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \ |
59 | intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_)) |
60 | |
61 | /* |
62 | * Loop over each subslice/DSS and determine the group and instance IDs that |
63 | * should be used to steer MCR accesses toward this DSS. |
64 | */ |
65 | #define for_each_ss_steering(ss_, gt_, group_, instance_) \ |
66 | for (ss_ = 0, intel_gt_mcr_get_ss_steering(gt_, 0, &group_, &instance_); \ |
67 | ss_ < I915_MAX_SS_FUSE_BITS; \ |
68 | ss_++, intel_gt_mcr_get_ss_steering(gt_, ss_, &group_, &instance_)) \ |
69 | for_each_if(_HAS_SS(ss_, gt_, group_, instance_)) |
70 | |
71 | #endif /* __INTEL_GT_MCR__ */ |
72 | |