1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Support Intel uncore PerfMon discovery mechanism. |
4 | * Copyright(c) 2021 Intel Corporation. |
5 | */ |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | |
8 | #include "uncore.h" |
9 | #include "uncore_discovery.h" |
10 | |
11 | static struct rb_root discovery_tables = RB_ROOT; |
12 | static int num_discovered_types[UNCORE_ACCESS_MAX]; |
13 | |
14 | static bool has_generic_discovery_table(void) |
15 | { |
16 | struct pci_dev *dev; |
17 | int dvsec; |
18 | |
19 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL); |
20 | if (!dev) |
21 | return false; |
22 | |
23 | /* A discovery table device has the unique capability ID. */ |
24 | dvsec = pci_find_next_ext_capability(dev, pos: 0, UNCORE_EXT_CAP_ID_DISCOVERY); |
25 | pci_dev_put(dev); |
26 | if (dvsec) |
27 | return true; |
28 | |
29 | return false; |
30 | } |
31 | |
32 | static int logical_die_id; |
33 | |
34 | static int get_device_die_id(struct pci_dev *dev) |
35 | { |
36 | int node = pcibus_to_node(dev->bus); |
37 | |
38 | /* |
39 | * If the NUMA info is not available, assume that the logical die id is |
40 | * continuous in the order in which the discovery table devices are |
41 | * detected. |
42 | */ |
43 | if (node < 0) |
44 | return logical_die_id++; |
45 | |
46 | return uncore_device_to_die(dev); |
47 | } |
48 | |
49 | #define __node_2_type(cur) \ |
50 | rb_entry((cur), struct intel_uncore_discovery_type, node) |
51 | |
52 | static inline int __type_cmp(const void *key, const struct rb_node *b) |
53 | { |
54 | struct intel_uncore_discovery_type *type_b = __node_2_type(b); |
55 | const u16 *type_id = key; |
56 | |
57 | if (type_b->type > *type_id) |
58 | return -1; |
59 | else if (type_b->type < *type_id) |
60 | return 1; |
61 | |
62 | return 0; |
63 | } |
64 | |
65 | static inline struct intel_uncore_discovery_type * |
66 | search_uncore_discovery_type(u16 type_id) |
67 | { |
68 | struct rb_node *node = rb_find(key: &type_id, tree: &discovery_tables, cmp: __type_cmp); |
69 | |
70 | return (node) ? __node_2_type(node) : NULL; |
71 | } |
72 | |
73 | static inline bool __type_less(struct rb_node *a, const struct rb_node *b) |
74 | { |
75 | return (__node_2_type(a)->type < __node_2_type(b)->type); |
76 | } |
77 | |
78 | static struct intel_uncore_discovery_type * |
79 | add_uncore_discovery_type(struct uncore_unit_discovery *unit) |
80 | { |
81 | struct intel_uncore_discovery_type *type; |
82 | |
83 | if (unit->access_type >= UNCORE_ACCESS_MAX) { |
84 | pr_warn("Unsupported access type %d\n" , unit->access_type); |
85 | return NULL; |
86 | } |
87 | |
88 | type = kzalloc(size: sizeof(struct intel_uncore_discovery_type), GFP_KERNEL); |
89 | if (!type) |
90 | return NULL; |
91 | |
92 | type->box_ctrl_die = kcalloc(n: __uncore_max_dies, size: sizeof(u64), GFP_KERNEL); |
93 | if (!type->box_ctrl_die) |
94 | goto free_type; |
95 | |
96 | type->access_type = unit->access_type; |
97 | num_discovered_types[type->access_type]++; |
98 | type->type = unit->box_type; |
99 | |
100 | rb_add(node: &type->node, tree: &discovery_tables, less: __type_less); |
101 | |
102 | return type; |
103 | |
104 | free_type: |
105 | kfree(objp: type); |
106 | |
107 | return NULL; |
108 | |
109 | } |
110 | |
111 | static struct intel_uncore_discovery_type * |
112 | get_uncore_discovery_type(struct uncore_unit_discovery *unit) |
113 | { |
114 | struct intel_uncore_discovery_type *type; |
115 | |
116 | type = search_uncore_discovery_type(type_id: unit->box_type); |
117 | if (type) |
118 | return type; |
119 | |
120 | return add_uncore_discovery_type(unit); |
121 | } |
122 | |
123 | static void |
124 | uncore_insert_box_info(struct uncore_unit_discovery *unit, |
125 | int die, bool parsed) |
126 | { |
127 | struct intel_uncore_discovery_type *type; |
128 | unsigned int *ids; |
129 | u64 *box_offset; |
130 | int i; |
131 | |
132 | if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) { |
133 | pr_info("Invalid address is detected for uncore type %d box %d, " |
134 | "Disable the uncore unit.\n" , |
135 | unit->box_type, unit->box_id); |
136 | return; |
137 | } |
138 | |
139 | if (parsed) { |
140 | type = search_uncore_discovery_type(type_id: unit->box_type); |
141 | if (!type) { |
142 | pr_info("A spurious uncore type %d is detected, " |
143 | "Disable the uncore type.\n" , |
144 | unit->box_type); |
145 | return; |
146 | } |
147 | /* Store the first box of each die */ |
148 | if (!type->box_ctrl_die[die]) |
149 | type->box_ctrl_die[die] = unit->ctl; |
150 | return; |
151 | } |
152 | |
153 | type = get_uncore_discovery_type(unit); |
154 | if (!type) |
155 | return; |
156 | |
157 | box_offset = kcalloc(n: type->num_boxes + 1, size: sizeof(u64), GFP_KERNEL); |
158 | if (!box_offset) |
159 | return; |
160 | |
161 | ids = kcalloc(n: type->num_boxes + 1, size: sizeof(unsigned int), GFP_KERNEL); |
162 | if (!ids) |
163 | goto free_box_offset; |
164 | |
165 | /* Store generic information for the first box */ |
166 | if (!type->num_boxes) { |
167 | type->box_ctrl = unit->ctl; |
168 | type->box_ctrl_die[die] = unit->ctl; |
169 | type->num_counters = unit->num_regs; |
170 | type->counter_width = unit->bit_width; |
171 | type->ctl_offset = unit->ctl_offset; |
172 | type->ctr_offset = unit->ctr_offset; |
173 | *ids = unit->box_id; |
174 | goto end; |
175 | } |
176 | |
177 | for (i = 0; i < type->num_boxes; i++) { |
178 | ids[i] = type->ids[i]; |
179 | box_offset[i] = type->box_offset[i]; |
180 | |
181 | if (unit->box_id == ids[i]) { |
182 | pr_info("Duplicate uncore type %d box ID %d is detected, " |
183 | "Drop the duplicate uncore unit.\n" , |
184 | unit->box_type, unit->box_id); |
185 | goto free_ids; |
186 | } |
187 | } |
188 | ids[i] = unit->box_id; |
189 | box_offset[i] = unit->ctl - type->box_ctrl; |
190 | kfree(objp: type->ids); |
191 | kfree(objp: type->box_offset); |
192 | end: |
193 | type->ids = ids; |
194 | type->box_offset = box_offset; |
195 | type->num_boxes++; |
196 | return; |
197 | |
198 | free_ids: |
199 | kfree(objp: ids); |
200 | |
201 | free_box_offset: |
202 | kfree(objp: box_offset); |
203 | |
204 | } |
205 | |
206 | static bool |
207 | uncore_ignore_unit(struct uncore_unit_discovery *unit, int *ignore) |
208 | { |
209 | int i; |
210 | |
211 | if (!ignore) |
212 | return false; |
213 | |
214 | for (i = 0; ignore[i] != UNCORE_IGNORE_END ; i++) { |
215 | if (unit->box_type == ignore[i]) |
216 | return true; |
217 | } |
218 | |
219 | return false; |
220 | } |
221 | |
222 | static int parse_discovery_table(struct pci_dev *dev, int die, |
223 | u32 bar_offset, bool *parsed, |
224 | int *ignore) |
225 | { |
226 | struct uncore_global_discovery global; |
227 | struct uncore_unit_discovery unit; |
228 | void __iomem *io_addr; |
229 | resource_size_t addr; |
230 | unsigned long size; |
231 | u32 val; |
232 | int i; |
233 | |
234 | pci_read_config_dword(dev, where: bar_offset, val: &val); |
235 | |
236 | if (val & ~PCI_BASE_ADDRESS_MEM_MASK & ~PCI_BASE_ADDRESS_MEM_TYPE_64) |
237 | return -EINVAL; |
238 | |
239 | addr = (resource_size_t)(val & PCI_BASE_ADDRESS_MEM_MASK); |
240 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
241 | if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { |
242 | u32 val2; |
243 | |
244 | pci_read_config_dword(dev, where: bar_offset + 4, val: &val2); |
245 | addr |= ((resource_size_t)val2) << 32; |
246 | } |
247 | #endif |
248 | size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE; |
249 | io_addr = ioremap(offset: addr, size); |
250 | if (!io_addr) |
251 | return -ENOMEM; |
252 | |
253 | /* Read Global Discovery State */ |
254 | memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery)); |
255 | if (uncore_discovery_invalid_unit(global)) { |
256 | pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n" , |
257 | global.table1, global.ctl, global.table3); |
258 | iounmap(addr: io_addr); |
259 | return -EINVAL; |
260 | } |
261 | iounmap(addr: io_addr); |
262 | |
263 | size = (1 + global.max_units) * global.stride * 8; |
264 | io_addr = ioremap(offset: addr, size); |
265 | if (!io_addr) |
266 | return -ENOMEM; |
267 | |
268 | /* Parsing Unit Discovery State */ |
269 | for (i = 0; i < global.max_units; i++) { |
270 | memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8), |
271 | sizeof(struct uncore_unit_discovery)); |
272 | |
273 | if (uncore_discovery_invalid_unit(unit)) |
274 | continue; |
275 | |
276 | if (unit.access_type >= UNCORE_ACCESS_MAX) |
277 | continue; |
278 | |
279 | if (uncore_ignore_unit(unit: &unit, ignore)) |
280 | continue; |
281 | |
282 | uncore_insert_box_info(unit: &unit, die, parsed: *parsed); |
283 | } |
284 | |
285 | *parsed = true; |
286 | iounmap(addr: io_addr); |
287 | return 0; |
288 | } |
289 | |
290 | bool intel_uncore_has_discovery_tables(int *ignore) |
291 | { |
292 | u32 device, val, entry_id, bar_offset; |
293 | int die, dvsec = 0, ret = true; |
294 | struct pci_dev *dev = NULL; |
295 | bool parsed = false; |
296 | |
297 | if (has_generic_discovery_table()) |
298 | device = UNCORE_DISCOVERY_TABLE_DEVICE; |
299 | else |
300 | device = PCI_ANY_ID; |
301 | |
302 | /* |
303 | * Start a new search and iterates through the list of |
304 | * the discovery table devices. |
305 | */ |
306 | while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, from: dev)) != NULL) { |
307 | while ((dvsec = pci_find_next_ext_capability(dev, pos: dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) { |
308 | pci_read_config_dword(dev, where: dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, val: &val); |
309 | entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK; |
310 | if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON) |
311 | continue; |
312 | |
313 | pci_read_config_dword(dev, where: dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, val: &val); |
314 | |
315 | if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) { |
316 | ret = false; |
317 | goto err; |
318 | } |
319 | bar_offset = UNCORE_DISCOVERY_BIR_BASE + |
320 | (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP; |
321 | |
322 | die = get_device_die_id(dev); |
323 | if (die < 0) |
324 | continue; |
325 | |
326 | parse_discovery_table(dev, die, bar_offset, parsed: &parsed, ignore); |
327 | } |
328 | } |
329 | |
330 | /* None of the discovery tables are available */ |
331 | if (!parsed) |
332 | ret = false; |
333 | err: |
334 | pci_dev_put(dev); |
335 | |
336 | return ret; |
337 | } |
338 | |
339 | void intel_uncore_clear_discovery_tables(void) |
340 | { |
341 | struct intel_uncore_discovery_type *type, *next; |
342 | |
343 | rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) { |
344 | kfree(objp: type->box_ctrl_die); |
345 | kfree(objp: type); |
346 | } |
347 | } |
348 | |
349 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7" ); |
350 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15" ); |
351 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18" ); |
352 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23" ); |
353 | DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31" ); |
354 | |
355 | static struct attribute *generic_uncore_formats_attr[] = { |
356 | &format_attr_event.attr, |
357 | &format_attr_umask.attr, |
358 | &format_attr_edge.attr, |
359 | &format_attr_inv.attr, |
360 | &format_attr_thresh.attr, |
361 | NULL, |
362 | }; |
363 | |
364 | static const struct attribute_group generic_uncore_format_group = { |
365 | .name = "format" , |
366 | .attrs = generic_uncore_formats_attr, |
367 | }; |
368 | |
369 | void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box) |
370 | { |
371 | wrmsrl(msr: uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); |
372 | } |
373 | |
374 | void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box) |
375 | { |
376 | wrmsrl(msr: uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); |
377 | } |
378 | |
379 | void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box) |
380 | { |
381 | wrmsrl(msr: uncore_msr_box_ctl(box), val: 0); |
382 | } |
383 | |
384 | static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box, |
385 | struct perf_event *event) |
386 | { |
387 | struct hw_perf_event *hwc = &event->hw; |
388 | |
389 | wrmsrl(msr: hwc->config_base, val: hwc->config); |
390 | } |
391 | |
392 | static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box, |
393 | struct perf_event *event) |
394 | { |
395 | struct hw_perf_event *hwc = &event->hw; |
396 | |
397 | wrmsrl(msr: hwc->config_base, val: 0); |
398 | } |
399 | |
400 | static struct intel_uncore_ops generic_uncore_msr_ops = { |
401 | .init_box = intel_generic_uncore_msr_init_box, |
402 | .disable_box = intel_generic_uncore_msr_disable_box, |
403 | .enable_box = intel_generic_uncore_msr_enable_box, |
404 | .disable_event = intel_generic_uncore_msr_disable_event, |
405 | .enable_event = intel_generic_uncore_msr_enable_event, |
406 | .read_counter = uncore_msr_read_counter, |
407 | }; |
408 | |
409 | void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) |
410 | { |
411 | struct pci_dev *pdev = box->pci_dev; |
412 | int box_ctl = uncore_pci_box_ctl(box); |
413 | |
414 | __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); |
415 | pci_write_config_dword(dev: pdev, where: box_ctl, GENERIC_PMON_BOX_CTL_INT); |
416 | } |
417 | |
418 | void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) |
419 | { |
420 | struct pci_dev *pdev = box->pci_dev; |
421 | int box_ctl = uncore_pci_box_ctl(box); |
422 | |
423 | pci_write_config_dword(dev: pdev, where: box_ctl, GENERIC_PMON_BOX_CTL_FRZ); |
424 | } |
425 | |
426 | void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box) |
427 | { |
428 | struct pci_dev *pdev = box->pci_dev; |
429 | int box_ctl = uncore_pci_box_ctl(box); |
430 | |
431 | pci_write_config_dword(dev: pdev, where: box_ctl, val: 0); |
432 | } |
433 | |
434 | static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box, |
435 | struct perf_event *event) |
436 | { |
437 | struct pci_dev *pdev = box->pci_dev; |
438 | struct hw_perf_event *hwc = &event->hw; |
439 | |
440 | pci_write_config_dword(dev: pdev, where: hwc->config_base, val: hwc->config); |
441 | } |
442 | |
443 | void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box, |
444 | struct perf_event *event) |
445 | { |
446 | struct pci_dev *pdev = box->pci_dev; |
447 | struct hw_perf_event *hwc = &event->hw; |
448 | |
449 | pci_write_config_dword(dev: pdev, where: hwc->config_base, val: 0); |
450 | } |
451 | |
452 | u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box, |
453 | struct perf_event *event) |
454 | { |
455 | struct pci_dev *pdev = box->pci_dev; |
456 | struct hw_perf_event *hwc = &event->hw; |
457 | u64 count = 0; |
458 | |
459 | pci_read_config_dword(dev: pdev, where: hwc->event_base, val: (u32 *)&count); |
460 | pci_read_config_dword(dev: pdev, where: hwc->event_base + 4, val: (u32 *)&count + 1); |
461 | |
462 | return count; |
463 | } |
464 | |
465 | static struct intel_uncore_ops generic_uncore_pci_ops = { |
466 | .init_box = intel_generic_uncore_pci_init_box, |
467 | .disable_box = intel_generic_uncore_pci_disable_box, |
468 | .enable_box = intel_generic_uncore_pci_enable_box, |
469 | .disable_event = intel_generic_uncore_pci_disable_event, |
470 | .enable_event = intel_generic_uncore_pci_enable_event, |
471 | .read_counter = intel_generic_uncore_pci_read_counter, |
472 | }; |
473 | |
474 | #define UNCORE_GENERIC_MMIO_SIZE 0x4000 |
475 | |
476 | static u64 generic_uncore_mmio_box_ctl(struct intel_uncore_box *box) |
477 | { |
478 | struct intel_uncore_type *type = box->pmu->type; |
479 | |
480 | if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets) |
481 | return 0; |
482 | |
483 | return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx]; |
484 | } |
485 | |
486 | void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box) |
487 | { |
488 | u64 box_ctl = generic_uncore_mmio_box_ctl(box); |
489 | struct intel_uncore_type *type = box->pmu->type; |
490 | resource_size_t addr; |
491 | |
492 | if (!box_ctl) { |
493 | pr_warn("Uncore type %d box %d: Invalid box control address.\n" , |
494 | type->type_id, type->box_ids[box->pmu->pmu_idx]); |
495 | return; |
496 | } |
497 | |
498 | addr = box_ctl; |
499 | box->io_addr = ioremap(offset: addr, UNCORE_GENERIC_MMIO_SIZE); |
500 | if (!box->io_addr) { |
501 | pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n" , |
502 | type->type_id, type->box_ids[box->pmu->pmu_idx], |
503 | (unsigned long long)addr); |
504 | return; |
505 | } |
506 | |
507 | writel(GENERIC_PMON_BOX_CTL_INT, addr: box->io_addr); |
508 | } |
509 | |
510 | void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box) |
511 | { |
512 | if (!box->io_addr) |
513 | return; |
514 | |
515 | writel(GENERIC_PMON_BOX_CTL_FRZ, addr: box->io_addr); |
516 | } |
517 | |
518 | void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box) |
519 | { |
520 | if (!box->io_addr) |
521 | return; |
522 | |
523 | writel(val: 0, addr: box->io_addr); |
524 | } |
525 | |
526 | void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box, |
527 | struct perf_event *event) |
528 | { |
529 | struct hw_perf_event *hwc = &event->hw; |
530 | |
531 | if (!box->io_addr) |
532 | return; |
533 | |
534 | writel(val: hwc->config, addr: box->io_addr + hwc->config_base); |
535 | } |
536 | |
537 | void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box, |
538 | struct perf_event *event) |
539 | { |
540 | struct hw_perf_event *hwc = &event->hw; |
541 | |
542 | if (!box->io_addr) |
543 | return; |
544 | |
545 | writel(val: 0, addr: box->io_addr + hwc->config_base); |
546 | } |
547 | |
548 | static struct intel_uncore_ops generic_uncore_mmio_ops = { |
549 | .init_box = intel_generic_uncore_mmio_init_box, |
550 | .exit_box = uncore_mmio_exit_box, |
551 | .disable_box = intel_generic_uncore_mmio_disable_box, |
552 | .enable_box = intel_generic_uncore_mmio_enable_box, |
553 | .disable_event = intel_generic_uncore_mmio_disable_event, |
554 | .enable_event = intel_generic_uncore_mmio_enable_event, |
555 | .read_counter = uncore_mmio_read_counter, |
556 | }; |
557 | |
558 | static bool uncore_update_uncore_type(enum uncore_access_type type_id, |
559 | struct intel_uncore_type *uncore, |
560 | struct intel_uncore_discovery_type *type) |
561 | { |
562 | uncore->type_id = type->type; |
563 | uncore->num_boxes = type->num_boxes; |
564 | uncore->num_counters = type->num_counters; |
565 | uncore->perf_ctr_bits = type->counter_width; |
566 | uncore->box_ids = type->ids; |
567 | |
568 | switch (type_id) { |
569 | case UNCORE_ACCESS_MSR: |
570 | uncore->ops = &generic_uncore_msr_ops; |
571 | uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset; |
572 | uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset; |
573 | uncore->box_ctl = (unsigned int)type->box_ctrl; |
574 | uncore->msr_offsets = type->box_offset; |
575 | break; |
576 | case UNCORE_ACCESS_PCI: |
577 | uncore->ops = &generic_uncore_pci_ops; |
578 | uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset; |
579 | uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset; |
580 | uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl); |
581 | uncore->box_ctls = type->box_ctrl_die; |
582 | uncore->pci_offsets = type->box_offset; |
583 | break; |
584 | case UNCORE_ACCESS_MMIO: |
585 | uncore->ops = &generic_uncore_mmio_ops; |
586 | uncore->perf_ctr = (unsigned int)type->ctr_offset; |
587 | uncore->event_ctl = (unsigned int)type->ctl_offset; |
588 | uncore->box_ctl = (unsigned int)type->box_ctrl; |
589 | uncore->box_ctls = type->box_ctrl_die; |
590 | uncore->mmio_offsets = type->box_offset; |
591 | uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE; |
592 | break; |
593 | default: |
594 | return false; |
595 | } |
596 | |
597 | return true; |
598 | } |
599 | |
600 | struct intel_uncore_type ** |
601 | intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int ) |
602 | { |
603 | struct intel_uncore_discovery_type *type; |
604 | struct intel_uncore_type **uncores; |
605 | struct intel_uncore_type *uncore; |
606 | struct rb_node *node; |
607 | int i = 0; |
608 | |
609 | uncores = kcalloc(n: num_discovered_types[type_id] + num_extra + 1, |
610 | size: sizeof(struct intel_uncore_type *), GFP_KERNEL); |
611 | if (!uncores) |
612 | return empty_uncore; |
613 | |
614 | for (node = rb_first(&discovery_tables); node; node = rb_next(node)) { |
615 | type = rb_entry(node, struct intel_uncore_discovery_type, node); |
616 | if (type->access_type != type_id) |
617 | continue; |
618 | |
619 | uncore = kzalloc(size: sizeof(struct intel_uncore_type), GFP_KERNEL); |
620 | if (!uncore) |
621 | break; |
622 | |
623 | uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK; |
624 | uncore->format_group = &generic_uncore_format_group; |
625 | |
626 | if (!uncore_update_uncore_type(type_id, uncore, type)) { |
627 | kfree(objp: uncore); |
628 | continue; |
629 | } |
630 | uncores[i++] = uncore; |
631 | } |
632 | |
633 | return uncores; |
634 | } |
635 | |
636 | void intel_uncore_generic_uncore_cpu_init(void) |
637 | { |
638 | uncore_msr_uncores = intel_uncore_generic_init_uncores(type_id: UNCORE_ACCESS_MSR, num_extra: 0); |
639 | } |
640 | |
641 | int intel_uncore_generic_uncore_pci_init(void) |
642 | { |
643 | uncore_pci_uncores = intel_uncore_generic_init_uncores(type_id: UNCORE_ACCESS_PCI, num_extra: 0); |
644 | |
645 | return 0; |
646 | } |
647 | |
648 | void intel_uncore_generic_uncore_mmio_init(void) |
649 | { |
650 | uncore_mmio_uncores = intel_uncore_generic_init_uncores(type_id: UNCORE_ACCESS_MMIO, num_extra: 0); |
651 | } |
652 | |