1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ |
3 | #include "uncore.h" |
4 | #include "uncore_discovery.h" |
5 | |
6 | /* Uncore IMC PCI IDs */ |
7 | #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 |
8 | #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 |
9 | #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 |
10 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 |
11 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 |
12 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 |
13 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 |
14 | #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c |
15 | #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 |
16 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 |
17 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f |
18 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f |
19 | #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 |
20 | #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c |
21 | #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 |
22 | #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 |
23 | #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f |
24 | #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f |
25 | #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910 |
26 | #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918 |
27 | #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc |
28 | #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 |
29 | #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 |
30 | #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 |
31 | #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f |
32 | #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f |
33 | #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 |
34 | #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 |
35 | #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 |
36 | #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 |
37 | #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 |
38 | #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 |
39 | #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca |
40 | #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 |
41 | #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c |
42 | #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d |
43 | #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0 |
44 | #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34 |
45 | #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35 |
46 | #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44 |
47 | #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54 |
48 | #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64 |
49 | #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51 |
50 | #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61 |
51 | #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71 |
52 | #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33 |
53 | #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43 |
54 | #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53 |
55 | #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63 |
56 | #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73 |
57 | #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 |
58 | #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 |
59 | #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02 |
60 | #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04 |
61 | #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12 |
62 | #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14 |
63 | #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36 |
64 | #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43 |
65 | #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53 |
66 | #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660 |
67 | #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641 |
68 | #define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601 |
69 | #define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602 |
70 | #define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609 |
71 | #define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a |
72 | #define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621 |
73 | #define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623 |
74 | #define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629 |
75 | #define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637 |
76 | #define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b |
77 | #define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648 |
78 | #define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649 |
79 | #define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650 |
80 | #define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668 |
81 | #define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670 |
82 | #define PCI_DEVICE_ID_INTEL_ADL_17_IMC 0x4614 |
83 | #define PCI_DEVICE_ID_INTEL_ADL_18_IMC 0x4617 |
84 | #define PCI_DEVICE_ID_INTEL_ADL_19_IMC 0x4618 |
85 | #define PCI_DEVICE_ID_INTEL_ADL_20_IMC 0x461B |
86 | #define PCI_DEVICE_ID_INTEL_ADL_21_IMC 0x461C |
87 | #define PCI_DEVICE_ID_INTEL_RPL_1_IMC 0xA700 |
88 | #define PCI_DEVICE_ID_INTEL_RPL_2_IMC 0xA702 |
89 | #define PCI_DEVICE_ID_INTEL_RPL_3_IMC 0xA706 |
90 | #define PCI_DEVICE_ID_INTEL_RPL_4_IMC 0xA709 |
91 | #define PCI_DEVICE_ID_INTEL_RPL_5_IMC 0xA701 |
92 | #define PCI_DEVICE_ID_INTEL_RPL_6_IMC 0xA703 |
93 | #define PCI_DEVICE_ID_INTEL_RPL_7_IMC 0xA704 |
94 | #define PCI_DEVICE_ID_INTEL_RPL_8_IMC 0xA705 |
95 | #define PCI_DEVICE_ID_INTEL_RPL_9_IMC 0xA706 |
96 | #define PCI_DEVICE_ID_INTEL_RPL_10_IMC 0xA707 |
97 | #define PCI_DEVICE_ID_INTEL_RPL_11_IMC 0xA708 |
98 | #define PCI_DEVICE_ID_INTEL_RPL_12_IMC 0xA709 |
99 | #define PCI_DEVICE_ID_INTEL_RPL_13_IMC 0xA70a |
100 | #define PCI_DEVICE_ID_INTEL_RPL_14_IMC 0xA70b |
101 | #define PCI_DEVICE_ID_INTEL_RPL_15_IMC 0xA715 |
102 | #define PCI_DEVICE_ID_INTEL_RPL_16_IMC 0xA716 |
103 | #define PCI_DEVICE_ID_INTEL_RPL_17_IMC 0xA717 |
104 | #define PCI_DEVICE_ID_INTEL_RPL_18_IMC 0xA718 |
105 | #define PCI_DEVICE_ID_INTEL_RPL_19_IMC 0xA719 |
106 | #define PCI_DEVICE_ID_INTEL_RPL_20_IMC 0xA71A |
107 | #define PCI_DEVICE_ID_INTEL_RPL_21_IMC 0xA71B |
108 | #define PCI_DEVICE_ID_INTEL_RPL_22_IMC 0xA71C |
109 | #define PCI_DEVICE_ID_INTEL_RPL_23_IMC 0xA728 |
110 | #define PCI_DEVICE_ID_INTEL_RPL_24_IMC 0xA729 |
111 | #define PCI_DEVICE_ID_INTEL_RPL_25_IMC 0xA72A |
112 | #define PCI_DEVICE_ID_INTEL_MTL_1_IMC 0x7d00 |
113 | #define PCI_DEVICE_ID_INTEL_MTL_2_IMC 0x7d01 |
114 | #define PCI_DEVICE_ID_INTEL_MTL_3_IMC 0x7d02 |
115 | #define PCI_DEVICE_ID_INTEL_MTL_4_IMC 0x7d05 |
116 | #define PCI_DEVICE_ID_INTEL_MTL_5_IMC 0x7d10 |
117 | #define PCI_DEVICE_ID_INTEL_MTL_6_IMC 0x7d14 |
118 | #define PCI_DEVICE_ID_INTEL_MTL_7_IMC 0x7d15 |
119 | #define PCI_DEVICE_ID_INTEL_MTL_8_IMC 0x7d16 |
120 | #define PCI_DEVICE_ID_INTEL_MTL_9_IMC 0x7d21 |
121 | #define PCI_DEVICE_ID_INTEL_MTL_10_IMC 0x7d22 |
122 | #define PCI_DEVICE_ID_INTEL_MTL_11_IMC 0x7d23 |
123 | #define PCI_DEVICE_ID_INTEL_MTL_12_IMC 0x7d24 |
124 | #define PCI_DEVICE_ID_INTEL_MTL_13_IMC 0x7d28 |
125 | |
126 | |
127 | #define IMC_UNCORE_DEV(a) \ |
128 | { \ |
129 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_##a##_IMC), \ |
130 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), \ |
131 | } |
132 | |
133 | /* SNB event control */ |
134 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff |
135 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 |
136 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) |
137 | #define SNB_UNC_CTL_EN (1 << 22) |
138 | #define SNB_UNC_CTL_INVERT (1 << 23) |
139 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 |
140 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 |
141 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) |
142 | |
143 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ |
144 | SNB_UNC_CTL_UMASK_MASK | \ |
145 | SNB_UNC_CTL_EDGE_DET | \ |
146 | SNB_UNC_CTL_INVERT | \ |
147 | SNB_UNC_CTL_CMASK_MASK) |
148 | |
149 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ |
150 | SNB_UNC_CTL_UMASK_MASK | \ |
151 | SNB_UNC_CTL_EDGE_DET | \ |
152 | SNB_UNC_CTL_INVERT | \ |
153 | NHM_UNC_CTL_CMASK_MASK) |
154 | |
155 | /* SNB global control register */ |
156 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 |
157 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 |
158 | #define SNB_UNC_FIXED_CTR 0x395 |
159 | |
160 | /* SNB uncore global control */ |
161 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) |
162 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) |
163 | |
164 | /* SNB Cbo register */ |
165 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 |
166 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 |
167 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 |
168 | |
169 | /* SNB ARB register */ |
170 | #define SNB_UNC_ARB_PER_CTR0 0x3b0 |
171 | #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 |
172 | #define SNB_UNC_ARB_MSR_OFFSET 0x10 |
173 | |
174 | /* NHM global control register */ |
175 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 |
176 | #define NHM_UNC_FIXED_CTR 0x394 |
177 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 |
178 | |
179 | /* NHM uncore global control */ |
180 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) |
181 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) |
182 | |
183 | /* NHM uncore register */ |
184 | #define NHM_UNC_PERFEVTSEL0 0x3c0 |
185 | #define NHM_UNC_UNCORE_PMC0 0x3b0 |
186 | |
187 | /* SKL uncore global control */ |
188 | #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 |
189 | #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) |
190 | |
191 | /* ICL Cbo register */ |
192 | #define ICL_UNC_CBO_CONFIG 0x396 |
193 | #define ICL_UNC_NUM_CBO_MASK 0xf |
194 | #define ICL_UNC_CBO_0_PER_CTR0 0x702 |
195 | #define ICL_UNC_CBO_MSR_OFFSET 0x8 |
196 | |
197 | /* ICL ARB register */ |
198 | #define ICL_UNC_ARB_PER_CTR 0x3b1 |
199 | #define ICL_UNC_ARB_PERFEVTSEL 0x3b3 |
200 | |
201 | /* ADL uncore global control */ |
202 | #define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0 |
203 | #define ADL_UNC_FIXED_CTR_CTRL 0x2fde |
204 | #define ADL_UNC_FIXED_CTR 0x2fdf |
205 | |
206 | /* ADL Cbo register */ |
207 | #define ADL_UNC_CBO_0_PER_CTR0 0x2002 |
208 | #define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000 |
209 | #define ADL_UNC_CTL_THRESHOLD 0x3f000000 |
210 | #define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ |
211 | SNB_UNC_CTL_UMASK_MASK | \ |
212 | SNB_UNC_CTL_EDGE_DET | \ |
213 | SNB_UNC_CTL_INVERT | \ |
214 | ADL_UNC_CTL_THRESHOLD) |
215 | |
216 | /* ADL ARB register */ |
217 | #define ADL_UNC_ARB_PER_CTR0 0x2FD2 |
218 | #define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0 |
219 | #define ADL_UNC_ARB_MSR_OFFSET 0x8 |
220 | |
221 | /* MTL Cbo register */ |
222 | #define MTL_UNC_CBO_0_PER_CTR0 0x2448 |
223 | #define MTL_UNC_CBO_0_PERFEVTSEL0 0x2442 |
224 | |
225 | /* MTL HAC_ARB register */ |
226 | #define MTL_UNC_HAC_ARB_CTR 0x2018 |
227 | #define MTL_UNC_HAC_ARB_CTRL 0x2012 |
228 | |
229 | /* MTL ARB register */ |
230 | #define MTL_UNC_ARB_CTR 0x2418 |
231 | #define MTL_UNC_ARB_CTRL 0x2412 |
232 | |
233 | /* MTL cNCU register */ |
234 | #define MTL_UNC_CNCU_FIXED_CTR 0x2408 |
235 | #define MTL_UNC_CNCU_FIXED_CTRL 0x2402 |
236 | #define MTL_UNC_CNCU_BOX_CTL 0x240e |
237 | |
238 | /* MTL sNCU register */ |
239 | #define MTL_UNC_SNCU_FIXED_CTR 0x2008 |
240 | #define MTL_UNC_SNCU_FIXED_CTRL 0x2002 |
241 | #define MTL_UNC_SNCU_BOX_CTL 0x200e |
242 | |
243 | /* MTL HAC_CBO register */ |
244 | #define MTL_UNC_HBO_CTR 0x2048 |
245 | #define MTL_UNC_HBO_CTRL 0x2042 |
246 | |
247 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7" ); |
248 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15" ); |
249 | DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11" ); |
250 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18" ); |
251 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23" ); |
252 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28" ); |
253 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31" ); |
254 | DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29" ); |
255 | |
256 | /* Sandy Bridge uncore support */ |
257 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
258 | { |
259 | struct hw_perf_event *hwc = &event->hw; |
260 | |
261 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) |
262 | wrmsrl(msr: hwc->config_base, val: hwc->config | SNB_UNC_CTL_EN); |
263 | else |
264 | wrmsrl(msr: hwc->config_base, SNB_UNC_CTL_EN); |
265 | } |
266 | |
267 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
268 | { |
269 | wrmsrl(msr: event->hw.config_base, val: 0); |
270 | } |
271 | |
272 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) |
273 | { |
274 | if (box->pmu->pmu_idx == 0) { |
275 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, |
276 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); |
277 | } |
278 | } |
279 | |
280 | static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) |
281 | { |
282 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, |
283 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); |
284 | } |
285 | |
286 | static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) |
287 | { |
288 | if (box->pmu->pmu_idx == 0) |
289 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, val: 0); |
290 | } |
291 | |
292 | static struct uncore_event_desc snb_uncore_events[] = { |
293 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00" ), |
294 | { /* end: all zeroes */ }, |
295 | }; |
296 | |
297 | static struct attribute *snb_uncore_formats_attr[] = { |
298 | &format_attr_event.attr, |
299 | &format_attr_umask.attr, |
300 | &format_attr_edge.attr, |
301 | &format_attr_inv.attr, |
302 | &format_attr_cmask5.attr, |
303 | NULL, |
304 | }; |
305 | |
306 | static const struct attribute_group snb_uncore_format_group = { |
307 | .name = "format" , |
308 | .attrs = snb_uncore_formats_attr, |
309 | }; |
310 | |
311 | static struct intel_uncore_ops snb_uncore_msr_ops = { |
312 | .init_box = snb_uncore_msr_init_box, |
313 | .enable_box = snb_uncore_msr_enable_box, |
314 | .exit_box = snb_uncore_msr_exit_box, |
315 | .disable_event = snb_uncore_msr_disable_event, |
316 | .enable_event = snb_uncore_msr_enable_event, |
317 | .read_counter = uncore_msr_read_counter, |
318 | }; |
319 | |
320 | static struct event_constraint snb_uncore_arb_constraints[] = { |
321 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), |
322 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), |
323 | EVENT_CONSTRAINT_END |
324 | }; |
325 | |
326 | static struct intel_uncore_type snb_uncore_cbox = { |
327 | .name = "cbox" , |
328 | .num_counters = 2, |
329 | .num_boxes = 4, |
330 | .perf_ctr_bits = 44, |
331 | .fixed_ctr_bits = 48, |
332 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, |
333 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, |
334 | .fixed_ctr = SNB_UNC_FIXED_CTR, |
335 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, |
336 | .single_fixed = 1, |
337 | .event_mask = SNB_UNC_RAW_EVENT_MASK, |
338 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, |
339 | .ops = &snb_uncore_msr_ops, |
340 | .format_group = &snb_uncore_format_group, |
341 | .event_descs = snb_uncore_events, |
342 | }; |
343 | |
344 | static struct intel_uncore_type snb_uncore_arb = { |
345 | .name = "arb" , |
346 | .num_counters = 2, |
347 | .num_boxes = 1, |
348 | .perf_ctr_bits = 44, |
349 | .perf_ctr = SNB_UNC_ARB_PER_CTR0, |
350 | .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, |
351 | .event_mask = SNB_UNC_RAW_EVENT_MASK, |
352 | .msr_offset = SNB_UNC_ARB_MSR_OFFSET, |
353 | .constraints = snb_uncore_arb_constraints, |
354 | .ops = &snb_uncore_msr_ops, |
355 | .format_group = &snb_uncore_format_group, |
356 | }; |
357 | |
358 | static struct intel_uncore_type *snb_msr_uncores[] = { |
359 | &snb_uncore_cbox, |
360 | &snb_uncore_arb, |
361 | NULL, |
362 | }; |
363 | |
364 | void snb_uncore_cpu_init(void) |
365 | { |
366 | uncore_msr_uncores = snb_msr_uncores; |
367 | if (snb_uncore_cbox.num_boxes > topology_num_cores_per_package()) |
368 | snb_uncore_cbox.num_boxes = topology_num_cores_per_package(); |
369 | } |
370 | |
371 | static void skl_uncore_msr_init_box(struct intel_uncore_box *box) |
372 | { |
373 | if (box->pmu->pmu_idx == 0) { |
374 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, |
375 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); |
376 | } |
377 | |
378 | /* The 8th CBOX has different MSR space */ |
379 | if (box->pmu->pmu_idx == 7) |
380 | __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); |
381 | } |
382 | |
383 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) |
384 | { |
385 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, |
386 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); |
387 | } |
388 | |
389 | static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) |
390 | { |
391 | if (box->pmu->pmu_idx == 0) |
392 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, val: 0); |
393 | } |
394 | |
395 | static struct intel_uncore_ops skl_uncore_msr_ops = { |
396 | .init_box = skl_uncore_msr_init_box, |
397 | .enable_box = skl_uncore_msr_enable_box, |
398 | .exit_box = skl_uncore_msr_exit_box, |
399 | .disable_event = snb_uncore_msr_disable_event, |
400 | .enable_event = snb_uncore_msr_enable_event, |
401 | .read_counter = uncore_msr_read_counter, |
402 | }; |
403 | |
404 | static struct intel_uncore_type skl_uncore_cbox = { |
405 | .name = "cbox" , |
406 | .num_counters = 4, |
407 | .num_boxes = 8, |
408 | .perf_ctr_bits = 44, |
409 | .fixed_ctr_bits = 48, |
410 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, |
411 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, |
412 | .fixed_ctr = SNB_UNC_FIXED_CTR, |
413 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, |
414 | .single_fixed = 1, |
415 | .event_mask = SNB_UNC_RAW_EVENT_MASK, |
416 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, |
417 | .ops = &skl_uncore_msr_ops, |
418 | .format_group = &snb_uncore_format_group, |
419 | .event_descs = snb_uncore_events, |
420 | }; |
421 | |
422 | static struct intel_uncore_type *skl_msr_uncores[] = { |
423 | &skl_uncore_cbox, |
424 | &snb_uncore_arb, |
425 | NULL, |
426 | }; |
427 | |
428 | void skl_uncore_cpu_init(void) |
429 | { |
430 | uncore_msr_uncores = skl_msr_uncores; |
431 | if (skl_uncore_cbox.num_boxes > topology_num_cores_per_package()) |
432 | skl_uncore_cbox.num_boxes = topology_num_cores_per_package(); |
433 | snb_uncore_arb.ops = &skl_uncore_msr_ops; |
434 | } |
435 | |
436 | static struct intel_uncore_ops icl_uncore_msr_ops = { |
437 | .disable_event = snb_uncore_msr_disable_event, |
438 | .enable_event = snb_uncore_msr_enable_event, |
439 | .read_counter = uncore_msr_read_counter, |
440 | }; |
441 | |
442 | static struct intel_uncore_type icl_uncore_cbox = { |
443 | .name = "cbox" , |
444 | .num_counters = 2, |
445 | .perf_ctr_bits = 44, |
446 | .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, |
447 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, |
448 | .event_mask = SNB_UNC_RAW_EVENT_MASK, |
449 | .msr_offset = ICL_UNC_CBO_MSR_OFFSET, |
450 | .ops = &icl_uncore_msr_ops, |
451 | .format_group = &snb_uncore_format_group, |
452 | }; |
453 | |
454 | static struct uncore_event_desc icl_uncore_events[] = { |
455 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff" ), |
456 | { /* end: all zeroes */ }, |
457 | }; |
458 | |
459 | static struct attribute *icl_uncore_clock_formats_attr[] = { |
460 | &format_attr_event.attr, |
461 | NULL, |
462 | }; |
463 | |
464 | static struct attribute_group icl_uncore_clock_format_group = { |
465 | .name = "format" , |
466 | .attrs = icl_uncore_clock_formats_attr, |
467 | }; |
468 | |
469 | static struct intel_uncore_type icl_uncore_clockbox = { |
470 | .name = "clock" , |
471 | .num_counters = 1, |
472 | .num_boxes = 1, |
473 | .fixed_ctr_bits = 48, |
474 | .fixed_ctr = SNB_UNC_FIXED_CTR, |
475 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, |
476 | .single_fixed = 1, |
477 | .event_mask = SNB_UNC_CTL_EV_SEL_MASK, |
478 | .format_group = &icl_uncore_clock_format_group, |
479 | .ops = &icl_uncore_msr_ops, |
480 | .event_descs = icl_uncore_events, |
481 | }; |
482 | |
483 | static struct intel_uncore_type icl_uncore_arb = { |
484 | .name = "arb" , |
485 | .num_counters = 1, |
486 | .num_boxes = 1, |
487 | .perf_ctr_bits = 44, |
488 | .perf_ctr = ICL_UNC_ARB_PER_CTR, |
489 | .event_ctl = ICL_UNC_ARB_PERFEVTSEL, |
490 | .event_mask = SNB_UNC_RAW_EVENT_MASK, |
491 | .ops = &icl_uncore_msr_ops, |
492 | .format_group = &snb_uncore_format_group, |
493 | }; |
494 | |
495 | static struct intel_uncore_type *icl_msr_uncores[] = { |
496 | &icl_uncore_cbox, |
497 | &icl_uncore_arb, |
498 | &icl_uncore_clockbox, |
499 | NULL, |
500 | }; |
501 | |
502 | static int icl_get_cbox_num(void) |
503 | { |
504 | u64 num_boxes; |
505 | |
506 | rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); |
507 | |
508 | return num_boxes & ICL_UNC_NUM_CBO_MASK; |
509 | } |
510 | |
511 | void icl_uncore_cpu_init(void) |
512 | { |
513 | uncore_msr_uncores = icl_msr_uncores; |
514 | icl_uncore_cbox.num_boxes = icl_get_cbox_num(); |
515 | } |
516 | |
517 | static struct intel_uncore_type *tgl_msr_uncores[] = { |
518 | &icl_uncore_cbox, |
519 | &snb_uncore_arb, |
520 | &icl_uncore_clockbox, |
521 | NULL, |
522 | }; |
523 | |
524 | static void rkl_uncore_msr_init_box(struct intel_uncore_box *box) |
525 | { |
526 | if (box->pmu->pmu_idx == 0) |
527 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); |
528 | } |
529 | |
530 | void tgl_uncore_cpu_init(void) |
531 | { |
532 | uncore_msr_uncores = tgl_msr_uncores; |
533 | icl_uncore_cbox.num_boxes = icl_get_cbox_num(); |
534 | icl_uncore_cbox.ops = &skl_uncore_msr_ops; |
535 | icl_uncore_clockbox.ops = &skl_uncore_msr_ops; |
536 | snb_uncore_arb.ops = &skl_uncore_msr_ops; |
537 | skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box; |
538 | } |
539 | |
540 | static void adl_uncore_msr_init_box(struct intel_uncore_box *box) |
541 | { |
542 | if (box->pmu->pmu_idx == 0) |
543 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); |
544 | } |
545 | |
546 | static void adl_uncore_msr_enable_box(struct intel_uncore_box *box) |
547 | { |
548 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); |
549 | } |
550 | |
551 | static void adl_uncore_msr_disable_box(struct intel_uncore_box *box) |
552 | { |
553 | if (box->pmu->pmu_idx == 0) |
554 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, val: 0); |
555 | } |
556 | |
557 | static void adl_uncore_msr_exit_box(struct intel_uncore_box *box) |
558 | { |
559 | if (box->pmu->pmu_idx == 0) |
560 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, val: 0); |
561 | } |
562 | |
563 | static struct intel_uncore_ops adl_uncore_msr_ops = { |
564 | .init_box = adl_uncore_msr_init_box, |
565 | .enable_box = adl_uncore_msr_enable_box, |
566 | .disable_box = adl_uncore_msr_disable_box, |
567 | .exit_box = adl_uncore_msr_exit_box, |
568 | .disable_event = snb_uncore_msr_disable_event, |
569 | .enable_event = snb_uncore_msr_enable_event, |
570 | .read_counter = uncore_msr_read_counter, |
571 | }; |
572 | |
573 | static struct attribute *adl_uncore_formats_attr[] = { |
574 | &format_attr_event.attr, |
575 | &format_attr_umask.attr, |
576 | &format_attr_edge.attr, |
577 | &format_attr_inv.attr, |
578 | &format_attr_threshold.attr, |
579 | NULL, |
580 | }; |
581 | |
582 | static const struct attribute_group adl_uncore_format_group = { |
583 | .name = "format" , |
584 | .attrs = adl_uncore_formats_attr, |
585 | }; |
586 | |
587 | static struct intel_uncore_type adl_uncore_cbox = { |
588 | .name = "cbox" , |
589 | .num_counters = 2, |
590 | .perf_ctr_bits = 44, |
591 | .perf_ctr = ADL_UNC_CBO_0_PER_CTR0, |
592 | .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0, |
593 | .event_mask = ADL_UNC_RAW_EVENT_MASK, |
594 | .msr_offset = ICL_UNC_CBO_MSR_OFFSET, |
595 | .ops = &adl_uncore_msr_ops, |
596 | .format_group = &adl_uncore_format_group, |
597 | }; |
598 | |
599 | static struct intel_uncore_type adl_uncore_arb = { |
600 | .name = "arb" , |
601 | .num_counters = 2, |
602 | .num_boxes = 2, |
603 | .perf_ctr_bits = 44, |
604 | .perf_ctr = ADL_UNC_ARB_PER_CTR0, |
605 | .event_ctl = ADL_UNC_ARB_PERFEVTSEL0, |
606 | .event_mask = SNB_UNC_RAW_EVENT_MASK, |
607 | .msr_offset = ADL_UNC_ARB_MSR_OFFSET, |
608 | .constraints = snb_uncore_arb_constraints, |
609 | .ops = &adl_uncore_msr_ops, |
610 | .format_group = &snb_uncore_format_group, |
611 | }; |
612 | |
613 | static struct intel_uncore_type adl_uncore_clockbox = { |
614 | .name = "clock" , |
615 | .num_counters = 1, |
616 | .num_boxes = 1, |
617 | .fixed_ctr_bits = 48, |
618 | .fixed_ctr = ADL_UNC_FIXED_CTR, |
619 | .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL, |
620 | .single_fixed = 1, |
621 | .event_mask = SNB_UNC_CTL_EV_SEL_MASK, |
622 | .format_group = &icl_uncore_clock_format_group, |
623 | .ops = &adl_uncore_msr_ops, |
624 | .event_descs = icl_uncore_events, |
625 | }; |
626 | |
627 | static struct intel_uncore_type *adl_msr_uncores[] = { |
628 | &adl_uncore_cbox, |
629 | &adl_uncore_arb, |
630 | &adl_uncore_clockbox, |
631 | NULL, |
632 | }; |
633 | |
634 | void adl_uncore_cpu_init(void) |
635 | { |
636 | adl_uncore_cbox.num_boxes = icl_get_cbox_num(); |
637 | uncore_msr_uncores = adl_msr_uncores; |
638 | } |
639 | |
640 | static struct intel_uncore_type mtl_uncore_cbox = { |
641 | .name = "cbox" , |
642 | .num_counters = 2, |
643 | .perf_ctr_bits = 48, |
644 | .perf_ctr = MTL_UNC_CBO_0_PER_CTR0, |
645 | .event_ctl = MTL_UNC_CBO_0_PERFEVTSEL0, |
646 | .event_mask = ADL_UNC_RAW_EVENT_MASK, |
647 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, |
648 | .ops = &icl_uncore_msr_ops, |
649 | .format_group = &adl_uncore_format_group, |
650 | }; |
651 | |
652 | static struct intel_uncore_type mtl_uncore_hac_arb = { |
653 | .name = "hac_arb" , |
654 | .num_counters = 2, |
655 | .num_boxes = 2, |
656 | .perf_ctr_bits = 48, |
657 | .perf_ctr = MTL_UNC_HAC_ARB_CTR, |
658 | .event_ctl = MTL_UNC_HAC_ARB_CTRL, |
659 | .event_mask = ADL_UNC_RAW_EVENT_MASK, |
660 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, |
661 | .ops = &icl_uncore_msr_ops, |
662 | .format_group = &adl_uncore_format_group, |
663 | }; |
664 | |
665 | static struct intel_uncore_type mtl_uncore_arb = { |
666 | .name = "arb" , |
667 | .num_counters = 2, |
668 | .num_boxes = 2, |
669 | .perf_ctr_bits = 48, |
670 | .perf_ctr = MTL_UNC_ARB_CTR, |
671 | .event_ctl = MTL_UNC_ARB_CTRL, |
672 | .event_mask = ADL_UNC_RAW_EVENT_MASK, |
673 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, |
674 | .ops = &icl_uncore_msr_ops, |
675 | .format_group = &adl_uncore_format_group, |
676 | }; |
677 | |
678 | static struct intel_uncore_type mtl_uncore_hac_cbox = { |
679 | .name = "hac_cbox" , |
680 | .num_counters = 2, |
681 | .num_boxes = 2, |
682 | .perf_ctr_bits = 48, |
683 | .perf_ctr = MTL_UNC_HBO_CTR, |
684 | .event_ctl = MTL_UNC_HBO_CTRL, |
685 | .event_mask = ADL_UNC_RAW_EVENT_MASK, |
686 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, |
687 | .ops = &icl_uncore_msr_ops, |
688 | .format_group = &adl_uncore_format_group, |
689 | }; |
690 | |
691 | static void mtl_uncore_msr_init_box(struct intel_uncore_box *box) |
692 | { |
693 | wrmsrl(msr: uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN); |
694 | } |
695 | |
696 | static struct intel_uncore_ops mtl_uncore_msr_ops = { |
697 | .init_box = mtl_uncore_msr_init_box, |
698 | .disable_event = snb_uncore_msr_disable_event, |
699 | .enable_event = snb_uncore_msr_enable_event, |
700 | .read_counter = uncore_msr_read_counter, |
701 | }; |
702 | |
703 | static struct intel_uncore_type mtl_uncore_cncu = { |
704 | .name = "cncu" , |
705 | .num_counters = 1, |
706 | .num_boxes = 1, |
707 | .box_ctl = MTL_UNC_CNCU_BOX_CTL, |
708 | .fixed_ctr_bits = 48, |
709 | .fixed_ctr = MTL_UNC_CNCU_FIXED_CTR, |
710 | .fixed_ctl = MTL_UNC_CNCU_FIXED_CTRL, |
711 | .single_fixed = 1, |
712 | .event_mask = SNB_UNC_CTL_EV_SEL_MASK, |
713 | .format_group = &icl_uncore_clock_format_group, |
714 | .ops = &mtl_uncore_msr_ops, |
715 | .event_descs = icl_uncore_events, |
716 | }; |
717 | |
718 | static struct intel_uncore_type mtl_uncore_sncu = { |
719 | .name = "sncu" , |
720 | .num_counters = 1, |
721 | .num_boxes = 1, |
722 | .box_ctl = MTL_UNC_SNCU_BOX_CTL, |
723 | .fixed_ctr_bits = 48, |
724 | .fixed_ctr = MTL_UNC_SNCU_FIXED_CTR, |
725 | .fixed_ctl = MTL_UNC_SNCU_FIXED_CTRL, |
726 | .single_fixed = 1, |
727 | .event_mask = SNB_UNC_CTL_EV_SEL_MASK, |
728 | .format_group = &icl_uncore_clock_format_group, |
729 | .ops = &mtl_uncore_msr_ops, |
730 | .event_descs = icl_uncore_events, |
731 | }; |
732 | |
733 | static struct intel_uncore_type *mtl_msr_uncores[] = { |
734 | &mtl_uncore_cbox, |
735 | &mtl_uncore_hac_arb, |
736 | &mtl_uncore_arb, |
737 | &mtl_uncore_hac_cbox, |
738 | &mtl_uncore_cncu, |
739 | &mtl_uncore_sncu, |
740 | NULL |
741 | }; |
742 | |
743 | void mtl_uncore_cpu_init(void) |
744 | { |
745 | mtl_uncore_cbox.num_boxes = icl_get_cbox_num(); |
746 | uncore_msr_uncores = mtl_msr_uncores; |
747 | } |
748 | |
749 | enum { |
750 | SNB_PCI_UNCORE_IMC, |
751 | }; |
752 | |
753 | static struct uncore_event_desc snb_uncore_imc_events[] = { |
754 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01" ), |
755 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5" ), |
756 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB" ), |
757 | |
758 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02" ), |
759 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5" ), |
760 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB" ), |
761 | |
762 | INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03" ), |
763 | INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5" ), |
764 | INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB" ), |
765 | |
766 | INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04" ), |
767 | INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5" ), |
768 | INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB" ), |
769 | |
770 | INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05" ), |
771 | INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5" ), |
772 | INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB" ), |
773 | |
774 | { /* end: all zeroes */ }, |
775 | }; |
776 | |
777 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff |
778 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 |
779 | |
780 | /* page size multiple covering all config regs */ |
781 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 |
782 | |
783 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 |
784 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 |
785 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 |
786 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 |
787 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE |
788 | |
789 | /* BW break down- legacy counters */ |
790 | #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3 |
791 | #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040 |
792 | #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4 |
793 | #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044 |
794 | #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5 |
795 | #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048 |
796 | |
797 | enum perf_snb_uncore_imc_freerunning_types { |
798 | SNB_PCI_UNCORE_IMC_DATA_READS = 0, |
799 | SNB_PCI_UNCORE_IMC_DATA_WRITES, |
800 | SNB_PCI_UNCORE_IMC_GT_REQUESTS, |
801 | SNB_PCI_UNCORE_IMC_IA_REQUESTS, |
802 | SNB_PCI_UNCORE_IMC_IO_REQUESTS, |
803 | |
804 | SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
805 | }; |
806 | |
807 | static struct freerunning_counters snb_uncore_imc_freerunning[] = { |
808 | [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, |
809 | 0x0, 0x0, 1, 32 }, |
810 | [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE, |
811 | .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 32 }, |
812 | [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE, |
813 | .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 32 }, |
814 | [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE, |
815 | .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 32 }, |
816 | [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE, |
817 | .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 32 }, |
818 | }; |
819 | |
820 | static struct attribute *snb_uncore_imc_formats_attr[] = { |
821 | &format_attr_event.attr, |
822 | NULL, |
823 | }; |
824 | |
825 | static const struct attribute_group snb_uncore_imc_format_group = { |
826 | .name = "format" , |
827 | .attrs = snb_uncore_imc_formats_attr, |
828 | }; |
829 | |
830 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) |
831 | { |
832 | struct intel_uncore_type *type = box->pmu->type; |
833 | struct pci_dev *pdev = box->pci_dev; |
834 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; |
835 | resource_size_t addr; |
836 | u32 pci_dword; |
837 | |
838 | pci_read_config_dword(dev: pdev, where, val: &pci_dword); |
839 | addr = pci_dword; |
840 | |
841 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
842 | pci_read_config_dword(dev: pdev, where: where + 4, val: &pci_dword); |
843 | addr |= ((resource_size_t)pci_dword << 32); |
844 | #endif |
845 | |
846 | addr &= ~(PAGE_SIZE - 1); |
847 | |
848 | box->io_addr = ioremap(offset: addr, size: type->mmio_map_size); |
849 | if (!box->io_addr) |
850 | pr_warn("perf uncore: Failed to ioremap for %s.\n" , type->name); |
851 | |
852 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; |
853 | } |
854 | |
855 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) |
856 | {} |
857 | |
858 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) |
859 | {} |
860 | |
861 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
862 | {} |
863 | |
864 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
865 | {} |
866 | |
867 | /* |
868 | * Keep the custom event_init() function compatible with old event |
869 | * encoding for free running counters. |
870 | */ |
871 | static int snb_uncore_imc_event_init(struct perf_event *event) |
872 | { |
873 | struct intel_uncore_pmu *pmu; |
874 | struct intel_uncore_box *box; |
875 | struct hw_perf_event *hwc = &event->hw; |
876 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; |
877 | int idx, base; |
878 | |
879 | if (event->attr.type != event->pmu->type) |
880 | return -ENOENT; |
881 | |
882 | pmu = uncore_event_to_pmu(event); |
883 | /* no device found for this pmu */ |
884 | if (pmu->func_id < 0) |
885 | return -ENOENT; |
886 | |
887 | /* Sampling not supported yet */ |
888 | if (hwc->sample_period) |
889 | return -EINVAL; |
890 | |
891 | /* unsupported modes and filters */ |
892 | if (event->attr.sample_period) /* no sampling */ |
893 | return -EINVAL; |
894 | |
895 | /* |
896 | * Place all uncore events for a particular physical package |
897 | * onto a single cpu |
898 | */ |
899 | if (event->cpu < 0) |
900 | return -EINVAL; |
901 | |
902 | /* check only supported bits are set */ |
903 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) |
904 | return -EINVAL; |
905 | |
906 | box = uncore_pmu_to_box(pmu, cpu: event->cpu); |
907 | if (!box || box->cpu < 0) |
908 | return -EINVAL; |
909 | |
910 | event->cpu = box->cpu; |
911 | event->pmu_private = box; |
912 | |
913 | event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; |
914 | |
915 | event->hw.idx = -1; |
916 | event->hw.last_tag = ~0ULL; |
917 | event->hw.extra_reg.idx = EXTRA_REG_NONE; |
918 | event->hw.branch_reg.idx = EXTRA_REG_NONE; |
919 | /* |
920 | * check event is known (whitelist, determines counter) |
921 | */ |
922 | switch (cfg) { |
923 | case SNB_UNCORE_PCI_IMC_DATA_READS: |
924 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; |
925 | idx = UNCORE_PMC_IDX_FREERUNNING; |
926 | break; |
927 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: |
928 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; |
929 | idx = UNCORE_PMC_IDX_FREERUNNING; |
930 | break; |
931 | case SNB_UNCORE_PCI_IMC_GT_REQUESTS: |
932 | base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE; |
933 | idx = UNCORE_PMC_IDX_FREERUNNING; |
934 | break; |
935 | case SNB_UNCORE_PCI_IMC_IA_REQUESTS: |
936 | base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE; |
937 | idx = UNCORE_PMC_IDX_FREERUNNING; |
938 | break; |
939 | case SNB_UNCORE_PCI_IMC_IO_REQUESTS: |
940 | base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE; |
941 | idx = UNCORE_PMC_IDX_FREERUNNING; |
942 | break; |
943 | default: |
944 | return -EINVAL; |
945 | } |
946 | |
947 | /* must be done before validate_group */ |
948 | event->hw.event_base = base; |
949 | event->hw.idx = idx; |
950 | |
951 | /* Convert to standard encoding format for freerunning counters */ |
952 | event->hw.config = ((cfg - 1) << 8) | 0x10ff; |
953 | |
954 | /* no group validation needed, we have free running counters */ |
955 | |
956 | return 0; |
957 | } |
958 | |
959 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
960 | { |
961 | return 0; |
962 | } |
963 | |
964 | int snb_pci2phy_map_init(int devid) |
965 | { |
966 | struct pci_dev *dev = NULL; |
967 | struct pci2phy_map *map; |
968 | int bus, segment; |
969 | |
970 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, device: devid, from: dev); |
971 | if (!dev) |
972 | return -ENOTTY; |
973 | |
974 | bus = dev->bus->number; |
975 | segment = pci_domain_nr(bus: dev->bus); |
976 | |
977 | raw_spin_lock(&pci2phy_map_lock); |
978 | map = __find_pci2phy_map(segment); |
979 | if (!map) { |
980 | raw_spin_unlock(&pci2phy_map_lock); |
981 | pci_dev_put(dev); |
982 | return -ENOMEM; |
983 | } |
984 | map->pbus_to_dieid[bus] = 0; |
985 | raw_spin_unlock(&pci2phy_map_lock); |
986 | |
987 | pci_dev_put(dev); |
988 | |
989 | return 0; |
990 | } |
991 | |
992 | static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
993 | { |
994 | struct hw_perf_event *hwc = &event->hw; |
995 | |
996 | /* |
997 | * SNB IMC counters are 32-bit and are laid out back to back |
998 | * in MMIO space. Therefore we must use a 32-bit accessor function |
999 | * using readq() from uncore_mmio_read_counter() causes problems |
1000 | * because it is reading 64-bit at a time. This is okay for the |
1001 | * uncore_perf_event_update() function because it drops the upper |
1002 | * 32-bits but not okay for plain uncore_read_counter() as invoked |
1003 | * in uncore_pmu_event_start(). |
1004 | */ |
1005 | return (u64)readl(addr: box->io_addr + hwc->event_base); |
1006 | } |
1007 | |
1008 | static struct pmu snb_uncore_imc_pmu = { |
1009 | .task_ctx_nr = perf_invalid_context, |
1010 | .event_init = snb_uncore_imc_event_init, |
1011 | .add = uncore_pmu_event_add, |
1012 | .del = uncore_pmu_event_del, |
1013 | .start = uncore_pmu_event_start, |
1014 | .stop = uncore_pmu_event_stop, |
1015 | .read = uncore_pmu_event_read, |
1016 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
1017 | }; |
1018 | |
1019 | static struct intel_uncore_ops snb_uncore_imc_ops = { |
1020 | .init_box = snb_uncore_imc_init_box, |
1021 | .exit_box = uncore_mmio_exit_box, |
1022 | .enable_box = snb_uncore_imc_enable_box, |
1023 | .disable_box = snb_uncore_imc_disable_box, |
1024 | .disable_event = snb_uncore_imc_disable_event, |
1025 | .enable_event = snb_uncore_imc_enable_event, |
1026 | .hw_config = snb_uncore_imc_hw_config, |
1027 | .read_counter = snb_uncore_imc_read_counter, |
1028 | }; |
1029 | |
1030 | static struct intel_uncore_type snb_uncore_imc = { |
1031 | .name = "imc" , |
1032 | .num_counters = 5, |
1033 | .num_boxes = 1, |
1034 | .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
1035 | .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE, |
1036 | .freerunning = snb_uncore_imc_freerunning, |
1037 | .event_descs = snb_uncore_imc_events, |
1038 | .format_group = &snb_uncore_imc_format_group, |
1039 | .ops = &snb_uncore_imc_ops, |
1040 | .pmu = &snb_uncore_imc_pmu, |
1041 | }; |
1042 | |
1043 | static struct intel_uncore_type *snb_pci_uncores[] = { |
1044 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, |
1045 | NULL, |
1046 | }; |
1047 | |
1048 | static const struct pci_device_id snb_uncore_pci_ids[] = { |
1049 | IMC_UNCORE_DEV(SNB), |
1050 | { /* end: all zeroes */ }, |
1051 | }; |
1052 | |
1053 | static const struct pci_device_id ivb_uncore_pci_ids[] = { |
1054 | IMC_UNCORE_DEV(IVB), |
1055 | IMC_UNCORE_DEV(IVB_E3), |
1056 | { /* end: all zeroes */ }, |
1057 | }; |
1058 | |
1059 | static const struct pci_device_id hsw_uncore_pci_ids[] = { |
1060 | IMC_UNCORE_DEV(HSW), |
1061 | IMC_UNCORE_DEV(HSW_U), |
1062 | { /* end: all zeroes */ }, |
1063 | }; |
1064 | |
1065 | static const struct pci_device_id bdw_uncore_pci_ids[] = { |
1066 | IMC_UNCORE_DEV(BDW), |
1067 | { /* end: all zeroes */ }, |
1068 | }; |
1069 | |
1070 | static const struct pci_device_id skl_uncore_pci_ids[] = { |
1071 | IMC_UNCORE_DEV(SKL_Y), |
1072 | IMC_UNCORE_DEV(SKL_U), |
1073 | IMC_UNCORE_DEV(SKL_HD), |
1074 | IMC_UNCORE_DEV(SKL_HQ), |
1075 | IMC_UNCORE_DEV(SKL_SD), |
1076 | IMC_UNCORE_DEV(SKL_SQ), |
1077 | IMC_UNCORE_DEV(SKL_E3), |
1078 | IMC_UNCORE_DEV(KBL_Y), |
1079 | IMC_UNCORE_DEV(KBL_U), |
1080 | IMC_UNCORE_DEV(KBL_UQ), |
1081 | IMC_UNCORE_DEV(KBL_SD), |
1082 | IMC_UNCORE_DEV(KBL_SQ), |
1083 | IMC_UNCORE_DEV(KBL_HQ), |
1084 | IMC_UNCORE_DEV(KBL_WQ), |
1085 | IMC_UNCORE_DEV(CFL_2U), |
1086 | IMC_UNCORE_DEV(CFL_4U), |
1087 | IMC_UNCORE_DEV(CFL_4H), |
1088 | IMC_UNCORE_DEV(CFL_6H), |
1089 | IMC_UNCORE_DEV(CFL_2S_D), |
1090 | IMC_UNCORE_DEV(CFL_4S_D), |
1091 | IMC_UNCORE_DEV(CFL_6S_D), |
1092 | IMC_UNCORE_DEV(CFL_8S_D), |
1093 | IMC_UNCORE_DEV(CFL_4S_W), |
1094 | IMC_UNCORE_DEV(CFL_6S_W), |
1095 | IMC_UNCORE_DEV(CFL_8S_W), |
1096 | IMC_UNCORE_DEV(CFL_4S_S), |
1097 | IMC_UNCORE_DEV(CFL_6S_S), |
1098 | IMC_UNCORE_DEV(CFL_8S_S), |
1099 | IMC_UNCORE_DEV(AML_YD), |
1100 | IMC_UNCORE_DEV(AML_YQ), |
1101 | IMC_UNCORE_DEV(WHL_UQ), |
1102 | IMC_UNCORE_DEV(WHL_4_UQ), |
1103 | IMC_UNCORE_DEV(WHL_UD), |
1104 | IMC_UNCORE_DEV(CML_H1), |
1105 | IMC_UNCORE_DEV(CML_H2), |
1106 | IMC_UNCORE_DEV(CML_H3), |
1107 | IMC_UNCORE_DEV(CML_U1), |
1108 | IMC_UNCORE_DEV(CML_U2), |
1109 | IMC_UNCORE_DEV(CML_U3), |
1110 | IMC_UNCORE_DEV(CML_S1), |
1111 | IMC_UNCORE_DEV(CML_S2), |
1112 | IMC_UNCORE_DEV(CML_S3), |
1113 | IMC_UNCORE_DEV(CML_S4), |
1114 | IMC_UNCORE_DEV(CML_S5), |
1115 | { /* end: all zeroes */ }, |
1116 | }; |
1117 | |
1118 | static const struct pci_device_id icl_uncore_pci_ids[] = { |
1119 | IMC_UNCORE_DEV(ICL_U), |
1120 | IMC_UNCORE_DEV(ICL_U2), |
1121 | IMC_UNCORE_DEV(RKL_1), |
1122 | IMC_UNCORE_DEV(RKL_2), |
1123 | { /* end: all zeroes */ }, |
1124 | }; |
1125 | |
1126 | static struct pci_driver snb_uncore_pci_driver = { |
1127 | .name = "snb_uncore" , |
1128 | .id_table = snb_uncore_pci_ids, |
1129 | }; |
1130 | |
1131 | static struct pci_driver ivb_uncore_pci_driver = { |
1132 | .name = "ivb_uncore" , |
1133 | .id_table = ivb_uncore_pci_ids, |
1134 | }; |
1135 | |
1136 | static struct pci_driver hsw_uncore_pci_driver = { |
1137 | .name = "hsw_uncore" , |
1138 | .id_table = hsw_uncore_pci_ids, |
1139 | }; |
1140 | |
1141 | static struct pci_driver bdw_uncore_pci_driver = { |
1142 | .name = "bdw_uncore" , |
1143 | .id_table = bdw_uncore_pci_ids, |
1144 | }; |
1145 | |
1146 | static struct pci_driver skl_uncore_pci_driver = { |
1147 | .name = "skl_uncore" , |
1148 | .id_table = skl_uncore_pci_ids, |
1149 | }; |
1150 | |
1151 | static struct pci_driver icl_uncore_pci_driver = { |
1152 | .name = "icl_uncore" , |
1153 | .id_table = icl_uncore_pci_ids, |
1154 | }; |
1155 | |
1156 | struct imc_uncore_pci_dev { |
1157 | __u32 pci_id; |
1158 | struct pci_driver *driver; |
1159 | }; |
1160 | #define IMC_DEV(a, d) \ |
1161 | { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } |
1162 | |
1163 | static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { |
1164 | IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), |
1165 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ |
1166 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ |
1167 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ |
1168 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ |
1169 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ |
1170 | IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ |
1171 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ |
1172 | IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ |
1173 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ |
1174 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ |
1175 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ |
1176 | IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ |
1177 | IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ |
1178 | IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ |
1179 | IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ |
1180 | IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ |
1181 | IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ |
1182 | IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */ |
1183 | IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */ |
1184 | IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ |
1185 | IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ |
1186 | IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ |
1187 | IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ |
1188 | IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ |
1189 | IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ |
1190 | IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ |
1191 | IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ |
1192 | IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ |
1193 | IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ |
1194 | IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ |
1195 | IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ |
1196 | IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ |
1197 | IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ |
1198 | IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */ |
1199 | IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */ |
1200 | IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ |
1201 | IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ |
1202 | IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */ |
1203 | IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver), |
1204 | IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver), |
1205 | IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver), |
1206 | IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver), |
1207 | IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver), |
1208 | IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver), |
1209 | IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver), |
1210 | IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver), |
1211 | IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver), |
1212 | IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver), |
1213 | IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver), |
1214 | IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ |
1215 | IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ |
1216 | IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver), |
1217 | IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver), |
1218 | { /* end marker */ } |
1219 | }; |
1220 | |
1221 | |
1222 | #define for_each_imc_pci_id(x, t) \ |
1223 | for (x = (t); (x)->pci_id; x++) |
1224 | |
1225 | static struct pci_driver *imc_uncore_find_dev(void) |
1226 | { |
1227 | const struct imc_uncore_pci_dev *p; |
1228 | int ret; |
1229 | |
1230 | for_each_imc_pci_id(p, desktop_imc_pci_ids) { |
1231 | ret = snb_pci2phy_map_init(devid: p->pci_id); |
1232 | if (ret == 0) |
1233 | return p->driver; |
1234 | } |
1235 | return NULL; |
1236 | } |
1237 | |
1238 | static int imc_uncore_pci_init(void) |
1239 | { |
1240 | struct pci_driver *imc_drv = imc_uncore_find_dev(); |
1241 | |
1242 | if (!imc_drv) |
1243 | return -ENODEV; |
1244 | |
1245 | uncore_pci_uncores = snb_pci_uncores; |
1246 | uncore_pci_driver = imc_drv; |
1247 | |
1248 | return 0; |
1249 | } |
1250 | |
1251 | int snb_uncore_pci_init(void) |
1252 | { |
1253 | return imc_uncore_pci_init(); |
1254 | } |
1255 | |
1256 | int ivb_uncore_pci_init(void) |
1257 | { |
1258 | return imc_uncore_pci_init(); |
1259 | } |
1260 | int hsw_uncore_pci_init(void) |
1261 | { |
1262 | return imc_uncore_pci_init(); |
1263 | } |
1264 | |
1265 | int bdw_uncore_pci_init(void) |
1266 | { |
1267 | return imc_uncore_pci_init(); |
1268 | } |
1269 | |
1270 | int skl_uncore_pci_init(void) |
1271 | { |
1272 | return imc_uncore_pci_init(); |
1273 | } |
1274 | |
1275 | /* end of Sandy Bridge uncore support */ |
1276 | |
1277 | /* Nehalem uncore support */ |
1278 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) |
1279 | { |
1280 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, val: 0); |
1281 | } |
1282 | |
1283 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) |
1284 | { |
1285 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); |
1286 | } |
1287 | |
1288 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
1289 | { |
1290 | struct hw_perf_event *hwc = &event->hw; |
1291 | |
1292 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) |
1293 | wrmsrl(msr: hwc->config_base, val: hwc->config | SNB_UNC_CTL_EN); |
1294 | else |
1295 | wrmsrl(msr: hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); |
1296 | } |
1297 | |
1298 | static struct attribute *nhm_uncore_formats_attr[] = { |
1299 | &format_attr_event.attr, |
1300 | &format_attr_umask.attr, |
1301 | &format_attr_edge.attr, |
1302 | &format_attr_inv.attr, |
1303 | &format_attr_cmask8.attr, |
1304 | NULL, |
1305 | }; |
1306 | |
1307 | static const struct attribute_group nhm_uncore_format_group = { |
1308 | .name = "format" , |
1309 | .attrs = nhm_uncore_formats_attr, |
1310 | }; |
1311 | |
1312 | static struct uncore_event_desc nhm_uncore_events[] = { |
1313 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00" ), |
1314 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f" ), |
1315 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f" ), |
1316 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01" ), |
1317 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02" ), |
1318 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04" ), |
1319 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08" ), |
1320 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10" ), |
1321 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20" ), |
1322 | { /* end: all zeroes */ }, |
1323 | }; |
1324 | |
1325 | static struct intel_uncore_ops nhm_uncore_msr_ops = { |
1326 | .disable_box = nhm_uncore_msr_disable_box, |
1327 | .enable_box = nhm_uncore_msr_enable_box, |
1328 | .disable_event = snb_uncore_msr_disable_event, |
1329 | .enable_event = nhm_uncore_msr_enable_event, |
1330 | .read_counter = uncore_msr_read_counter, |
1331 | }; |
1332 | |
1333 | static struct intel_uncore_type nhm_uncore = { |
1334 | .name = "" , |
1335 | .num_counters = 8, |
1336 | .num_boxes = 1, |
1337 | .perf_ctr_bits = 48, |
1338 | .fixed_ctr_bits = 48, |
1339 | .event_ctl = NHM_UNC_PERFEVTSEL0, |
1340 | .perf_ctr = NHM_UNC_UNCORE_PMC0, |
1341 | .fixed_ctr = NHM_UNC_FIXED_CTR, |
1342 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, |
1343 | .event_mask = NHM_UNC_RAW_EVENT_MASK, |
1344 | .event_descs = nhm_uncore_events, |
1345 | .ops = &nhm_uncore_msr_ops, |
1346 | .format_group = &nhm_uncore_format_group, |
1347 | }; |
1348 | |
1349 | static struct intel_uncore_type *nhm_msr_uncores[] = { |
1350 | &nhm_uncore, |
1351 | NULL, |
1352 | }; |
1353 | |
1354 | void nhm_uncore_cpu_init(void) |
1355 | { |
1356 | uncore_msr_uncores = nhm_msr_uncores; |
1357 | } |
1358 | |
1359 | /* end of Nehalem uncore support */ |
1360 | |
1361 | /* Tiger Lake MMIO uncore support */ |
1362 | |
1363 | static const struct pci_device_id tgl_uncore_pci_ids[] = { |
1364 | IMC_UNCORE_DEV(TGL_U1), |
1365 | IMC_UNCORE_DEV(TGL_U2), |
1366 | IMC_UNCORE_DEV(TGL_U3), |
1367 | IMC_UNCORE_DEV(TGL_U4), |
1368 | IMC_UNCORE_DEV(TGL_H), |
1369 | IMC_UNCORE_DEV(ADL_1), |
1370 | IMC_UNCORE_DEV(ADL_2), |
1371 | IMC_UNCORE_DEV(ADL_3), |
1372 | IMC_UNCORE_DEV(ADL_4), |
1373 | IMC_UNCORE_DEV(ADL_5), |
1374 | IMC_UNCORE_DEV(ADL_6), |
1375 | IMC_UNCORE_DEV(ADL_7), |
1376 | IMC_UNCORE_DEV(ADL_8), |
1377 | IMC_UNCORE_DEV(ADL_9), |
1378 | IMC_UNCORE_DEV(ADL_10), |
1379 | IMC_UNCORE_DEV(ADL_11), |
1380 | IMC_UNCORE_DEV(ADL_12), |
1381 | IMC_UNCORE_DEV(ADL_13), |
1382 | IMC_UNCORE_DEV(ADL_14), |
1383 | IMC_UNCORE_DEV(ADL_15), |
1384 | IMC_UNCORE_DEV(ADL_16), |
1385 | IMC_UNCORE_DEV(ADL_17), |
1386 | IMC_UNCORE_DEV(ADL_18), |
1387 | IMC_UNCORE_DEV(ADL_19), |
1388 | IMC_UNCORE_DEV(ADL_20), |
1389 | IMC_UNCORE_DEV(ADL_21), |
1390 | IMC_UNCORE_DEV(RPL_1), |
1391 | IMC_UNCORE_DEV(RPL_2), |
1392 | IMC_UNCORE_DEV(RPL_3), |
1393 | IMC_UNCORE_DEV(RPL_4), |
1394 | IMC_UNCORE_DEV(RPL_5), |
1395 | IMC_UNCORE_DEV(RPL_6), |
1396 | IMC_UNCORE_DEV(RPL_7), |
1397 | IMC_UNCORE_DEV(RPL_8), |
1398 | IMC_UNCORE_DEV(RPL_9), |
1399 | IMC_UNCORE_DEV(RPL_10), |
1400 | IMC_UNCORE_DEV(RPL_11), |
1401 | IMC_UNCORE_DEV(RPL_12), |
1402 | IMC_UNCORE_DEV(RPL_13), |
1403 | IMC_UNCORE_DEV(RPL_14), |
1404 | IMC_UNCORE_DEV(RPL_15), |
1405 | IMC_UNCORE_DEV(RPL_16), |
1406 | IMC_UNCORE_DEV(RPL_17), |
1407 | IMC_UNCORE_DEV(RPL_18), |
1408 | IMC_UNCORE_DEV(RPL_19), |
1409 | IMC_UNCORE_DEV(RPL_20), |
1410 | IMC_UNCORE_DEV(RPL_21), |
1411 | IMC_UNCORE_DEV(RPL_22), |
1412 | IMC_UNCORE_DEV(RPL_23), |
1413 | IMC_UNCORE_DEV(RPL_24), |
1414 | IMC_UNCORE_DEV(RPL_25), |
1415 | IMC_UNCORE_DEV(MTL_1), |
1416 | IMC_UNCORE_DEV(MTL_2), |
1417 | IMC_UNCORE_DEV(MTL_3), |
1418 | IMC_UNCORE_DEV(MTL_4), |
1419 | IMC_UNCORE_DEV(MTL_5), |
1420 | IMC_UNCORE_DEV(MTL_6), |
1421 | IMC_UNCORE_DEV(MTL_7), |
1422 | IMC_UNCORE_DEV(MTL_8), |
1423 | IMC_UNCORE_DEV(MTL_9), |
1424 | IMC_UNCORE_DEV(MTL_10), |
1425 | IMC_UNCORE_DEV(MTL_11), |
1426 | IMC_UNCORE_DEV(MTL_12), |
1427 | IMC_UNCORE_DEV(MTL_13), |
1428 | { /* end: all zeroes */ } |
1429 | }; |
1430 | |
1431 | enum perf_tgl_uncore_imc_freerunning_types { |
1432 | TGL_MMIO_UNCORE_IMC_DATA_TOTAL, |
1433 | TGL_MMIO_UNCORE_IMC_DATA_READ, |
1434 | TGL_MMIO_UNCORE_IMC_DATA_WRITE, |
1435 | TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX |
1436 | }; |
1437 | |
1438 | static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = { |
1439 | [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 }, |
1440 | [TGL_MMIO_UNCORE_IMC_DATA_READ] = { .counter_base: 0x5058, .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 64 }, |
1441 | [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { .counter_base: 0x50A0, .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 64 }, |
1442 | }; |
1443 | |
1444 | static struct freerunning_counters tgl_uncore_imc_freerunning[] = { |
1445 | [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { .counter_base: 0xd840, .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 64 }, |
1446 | [TGL_MMIO_UNCORE_IMC_DATA_READ] = { .counter_base: 0xd858, .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 64 }, |
1447 | [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { .counter_base: 0xd8A0, .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 64 }, |
1448 | }; |
1449 | |
1450 | static struct uncore_event_desc tgl_uncore_imc_events[] = { |
1451 | INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10" ), |
1452 | INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5" ), |
1453 | INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB" ), |
1454 | |
1455 | INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20" ), |
1456 | INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5" ), |
1457 | INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB" ), |
1458 | |
1459 | INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30" ), |
1460 | INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5" ), |
1461 | INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB" ), |
1462 | |
1463 | { /* end: all zeroes */ } |
1464 | }; |
1465 | |
1466 | static struct pci_dev *tgl_uncore_get_mc_dev(void) |
1467 | { |
1468 | const struct pci_device_id *ids = tgl_uncore_pci_ids; |
1469 | struct pci_dev *mc_dev = NULL; |
1470 | |
1471 | while (ids && ids->vendor) { |
1472 | mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device: ids->device, NULL); |
1473 | if (mc_dev) |
1474 | return mc_dev; |
1475 | ids++; |
1476 | } |
1477 | |
1478 | return mc_dev; |
1479 | } |
1480 | |
1481 | #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000 |
1482 | #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000 |
1483 | |
1484 | static void __uncore_imc_init_box(struct intel_uncore_box *box, |
1485 | unsigned int base_offset) |
1486 | { |
1487 | struct pci_dev *pdev = tgl_uncore_get_mc_dev(); |
1488 | struct intel_uncore_pmu *pmu = box->pmu; |
1489 | struct intel_uncore_type *type = pmu->type; |
1490 | resource_size_t addr; |
1491 | u32 mch_bar; |
1492 | |
1493 | if (!pdev) { |
1494 | pr_warn("perf uncore: Cannot find matched IMC device.\n" ); |
1495 | return; |
1496 | } |
1497 | |
1498 | pci_read_config_dword(dev: pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, val: &mch_bar); |
1499 | /* MCHBAR is disabled */ |
1500 | if (!(mch_bar & BIT(0))) { |
1501 | pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n" ); |
1502 | pci_dev_put(dev: pdev); |
1503 | return; |
1504 | } |
1505 | mch_bar &= ~BIT(0); |
1506 | addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx); |
1507 | |
1508 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
1509 | pci_read_config_dword(dev: pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, val: &mch_bar); |
1510 | addr |= ((resource_size_t)mch_bar << 32); |
1511 | #endif |
1512 | |
1513 | addr += base_offset; |
1514 | box->io_addr = ioremap(offset: addr, size: type->mmio_map_size); |
1515 | if (!box->io_addr) |
1516 | pr_warn("perf uncore: Failed to ioremap for %s.\n" , type->name); |
1517 | |
1518 | pci_dev_put(dev: pdev); |
1519 | } |
1520 | |
1521 | static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) |
1522 | { |
1523 | __uncore_imc_init_box(box, base_offset: 0); |
1524 | } |
1525 | |
1526 | static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { |
1527 | .init_box = tgl_uncore_imc_freerunning_init_box, |
1528 | .exit_box = uncore_mmio_exit_box, |
1529 | .read_counter = uncore_mmio_read_counter, |
1530 | .hw_config = uncore_freerunning_hw_config, |
1531 | }; |
1532 | |
1533 | static struct attribute *tgl_uncore_imc_formats_attr[] = { |
1534 | &format_attr_event.attr, |
1535 | &format_attr_umask.attr, |
1536 | NULL |
1537 | }; |
1538 | |
1539 | static const struct attribute_group tgl_uncore_imc_format_group = { |
1540 | .name = "format" , |
1541 | .attrs = tgl_uncore_imc_formats_attr, |
1542 | }; |
1543 | |
1544 | static struct intel_uncore_type tgl_uncore_imc_free_running = { |
1545 | .name = "imc_free_running" , |
1546 | .num_counters = 3, |
1547 | .num_boxes = 2, |
1548 | .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
1549 | .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE, |
1550 | .freerunning = tgl_uncore_imc_freerunning, |
1551 | .ops = &tgl_uncore_imc_freerunning_ops, |
1552 | .event_descs = tgl_uncore_imc_events, |
1553 | .format_group = &tgl_uncore_imc_format_group, |
1554 | }; |
1555 | |
1556 | static struct intel_uncore_type *tgl_mmio_uncores[] = { |
1557 | &tgl_uncore_imc_free_running, |
1558 | NULL |
1559 | }; |
1560 | |
1561 | void tgl_l_uncore_mmio_init(void) |
1562 | { |
1563 | tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning; |
1564 | uncore_mmio_uncores = tgl_mmio_uncores; |
1565 | } |
1566 | |
1567 | void tgl_uncore_mmio_init(void) |
1568 | { |
1569 | uncore_mmio_uncores = tgl_mmio_uncores; |
1570 | } |
1571 | |
1572 | /* end of Tiger Lake MMIO uncore support */ |
1573 | |
1574 | /* Alder Lake MMIO uncore support */ |
1575 | #define ADL_UNCORE_IMC_BASE 0xd900 |
1576 | #define ADL_UNCORE_IMC_MAP_SIZE 0x200 |
1577 | #define ADL_UNCORE_IMC_CTR 0xe8 |
1578 | #define ADL_UNCORE_IMC_CTRL 0xd0 |
1579 | #define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0 |
1580 | #define ADL_UNCORE_IMC_BOX_CTL 0xc4 |
1581 | #define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800 |
1582 | #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100 |
1583 | |
1584 | #define ADL_UNCORE_IMC_CTL_FRZ (1 << 0) |
1585 | #define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1) |
1586 | #define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2) |
1587 | #define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \ |
1588 | ADL_UNCORE_IMC_CTL_RST_CTRS) |
1589 | |
1590 | static void adl_uncore_imc_init_box(struct intel_uncore_box *box) |
1591 | { |
1592 | __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE); |
1593 | |
1594 | /* The global control in MC1 can control both MCs. */ |
1595 | if (box->io_addr && (box->pmu->pmu_idx == 1)) |
1596 | writel(ADL_UNCORE_IMC_CTL_INT, addr: box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL); |
1597 | } |
1598 | |
1599 | static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box) |
1600 | { |
1601 | if (!box->io_addr) |
1602 | return; |
1603 | |
1604 | writel(ADL_UNCORE_IMC_CTL_FRZ, addr: box->io_addr + uncore_mmio_box_ctl(box)); |
1605 | } |
1606 | |
1607 | static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box) |
1608 | { |
1609 | if (!box->io_addr) |
1610 | return; |
1611 | |
1612 | writel(val: 0, addr: box->io_addr + uncore_mmio_box_ctl(box)); |
1613 | } |
1614 | |
1615 | static struct intel_uncore_ops adl_uncore_mmio_ops = { |
1616 | .init_box = adl_uncore_imc_init_box, |
1617 | .exit_box = uncore_mmio_exit_box, |
1618 | .disable_box = adl_uncore_mmio_disable_box, |
1619 | .enable_box = adl_uncore_mmio_enable_box, |
1620 | .disable_event = intel_generic_uncore_mmio_disable_event, |
1621 | .enable_event = intel_generic_uncore_mmio_enable_event, |
1622 | .read_counter = uncore_mmio_read_counter, |
1623 | }; |
1624 | |
1625 | #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00 |
1626 | #define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ |
1627 | ADL_UNC_CTL_CHMASK_MASK | \ |
1628 | SNB_UNC_CTL_EDGE_DET) |
1629 | |
1630 | static struct attribute *adl_uncore_imc_formats_attr[] = { |
1631 | &format_attr_event.attr, |
1632 | &format_attr_chmask.attr, |
1633 | &format_attr_edge.attr, |
1634 | NULL, |
1635 | }; |
1636 | |
1637 | static const struct attribute_group adl_uncore_imc_format_group = { |
1638 | .name = "format" , |
1639 | .attrs = adl_uncore_imc_formats_attr, |
1640 | }; |
1641 | |
1642 | static struct intel_uncore_type adl_uncore_imc = { |
1643 | .name = "imc" , |
1644 | .num_counters = 5, |
1645 | .num_boxes = 2, |
1646 | .perf_ctr_bits = 64, |
1647 | .perf_ctr = ADL_UNCORE_IMC_CTR, |
1648 | .event_ctl = ADL_UNCORE_IMC_CTRL, |
1649 | .event_mask = ADL_UNC_IMC_EVENT_MASK, |
1650 | .box_ctl = ADL_UNCORE_IMC_BOX_CTL, |
1651 | .mmio_offset = 0, |
1652 | .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE, |
1653 | .ops = &adl_uncore_mmio_ops, |
1654 | .format_group = &adl_uncore_imc_format_group, |
1655 | }; |
1656 | |
1657 | enum perf_adl_uncore_imc_freerunning_types { |
1658 | ADL_MMIO_UNCORE_IMC_DATA_TOTAL, |
1659 | ADL_MMIO_UNCORE_IMC_DATA_READ, |
1660 | ADL_MMIO_UNCORE_IMC_DATA_WRITE, |
1661 | ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX |
1662 | }; |
1663 | |
1664 | static struct freerunning_counters adl_uncore_imc_freerunning[] = { |
1665 | [ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 }, |
1666 | [ADL_MMIO_UNCORE_IMC_DATA_READ] = { .counter_base: 0x58, .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 64 }, |
1667 | [ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { .counter_base: 0xA0, .counter_offset: 0x0, .box_offset: 0x0, .num_counters: 1, .bits: 64 }, |
1668 | }; |
1669 | |
1670 | static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) |
1671 | { |
1672 | __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE); |
1673 | } |
1674 | |
1675 | static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = { |
1676 | .init_box = adl_uncore_imc_freerunning_init_box, |
1677 | .exit_box = uncore_mmio_exit_box, |
1678 | .read_counter = uncore_mmio_read_counter, |
1679 | .hw_config = uncore_freerunning_hw_config, |
1680 | }; |
1681 | |
1682 | static struct intel_uncore_type adl_uncore_imc_free_running = { |
1683 | .name = "imc_free_running" , |
1684 | .num_counters = 3, |
1685 | .num_boxes = 2, |
1686 | .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
1687 | .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE, |
1688 | .freerunning = adl_uncore_imc_freerunning, |
1689 | .ops = &adl_uncore_imc_freerunning_ops, |
1690 | .event_descs = tgl_uncore_imc_events, |
1691 | .format_group = &tgl_uncore_imc_format_group, |
1692 | }; |
1693 | |
1694 | static struct intel_uncore_type *adl_mmio_uncores[] = { |
1695 | &adl_uncore_imc, |
1696 | &adl_uncore_imc_free_running, |
1697 | NULL |
1698 | }; |
1699 | |
1700 | void adl_uncore_mmio_init(void) |
1701 | { |
1702 | uncore_mmio_uncores = adl_mmio_uncores; |
1703 | } |
1704 | |
1705 | /* end of Alder Lake MMIO uncore support */ |
1706 | |