1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ARM CoreSight Architecture PMU driver.
4 *
5 * This driver adds support for uncore PMU based on ARM CoreSight Performance
6 * Monitoring Unit Architecture. The PMU is accessible via MMIO registers and
7 * like other uncore PMUs, it does not support process specific events and
8 * cannot be used in sampling mode.
9 *
10 * This code is based on other uncore PMUs like ARM DSU PMU. It provides a
11 * generic implementation to operate the PMU according to CoreSight PMU
12 * architecture and ACPI ARM PMU table (APMT) documents below:
13 * - ARM CoreSight PMU architecture document number: ARM IHI 0091 A.a-00bet0.
14 * - APMT document number: ARM DEN0117.
15 *
16 * The user should refer to the vendor technical documentation to get details
17 * about the supported events.
18 *
19 * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
20 *
21 */
22
23#include <linux/acpi.h>
24#include <linux/cacheinfo.h>
25#include <linux/ctype.h>
26#include <linux/interrupt.h>
27#include <linux/io-64-nonatomic-lo-hi.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30#include <linux/of.h>
31#include <linux/perf_event.h>
32#include <linux/platform_device.h>
33
34#include "arm_cspmu.h"
35
36#define PMUNAME "arm_cspmu"
37#define DRVNAME "arm-cs-arch-pmu"
38
39#define ARM_CSPMU_CPUMASK_ATTR(_name, _config) \
40 ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
41 (unsigned long)_config)
42
43/*
44 * CoreSight PMU Arch register offsets.
45 */
46#define PMEVCNTR_LO 0x0
47#define PMEVCNTR_HI 0x4
48#define PMEVTYPER 0x400
49#define PMCCFILTR 0x47C
50#define PMEVFILTR 0xA00
51#define PMCNTENSET 0xC00
52#define PMCNTENCLR 0xC20
53#define PMINTENSET 0xC40
54#define PMINTENCLR 0xC60
55#define PMOVSCLR 0xC80
56#define PMOVSSET 0xCC0
57#define PMCFGR 0xE00
58#define PMCR 0xE04
59#define PMIIDR 0xE08
60
61/* PMCFGR register field */
62#define PMCFGR_NCG GENMASK(31, 28)
63#define PMCFGR_HDBG BIT(24)
64#define PMCFGR_TRO BIT(23)
65#define PMCFGR_SS BIT(22)
66#define PMCFGR_FZO BIT(21)
67#define PMCFGR_MSI BIT(20)
68#define PMCFGR_UEN BIT(19)
69#define PMCFGR_NA BIT(17)
70#define PMCFGR_EX BIT(16)
71#define PMCFGR_CCD BIT(15)
72#define PMCFGR_CC BIT(14)
73#define PMCFGR_SIZE GENMASK(13, 8)
74#define PMCFGR_N GENMASK(7, 0)
75
76/* PMCR register field */
77#define PMCR_TRO BIT(11)
78#define PMCR_HDBG BIT(10)
79#define PMCR_FZO BIT(9)
80#define PMCR_NA BIT(8)
81#define PMCR_DP BIT(5)
82#define PMCR_X BIT(4)
83#define PMCR_D BIT(3)
84#define PMCR_C BIT(2)
85#define PMCR_P BIT(1)
86#define PMCR_E BIT(0)
87
88/* Each SET/CLR register supports up to 32 counters. */
89#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
90#define ARM_CSPMU_SET_CLR_COUNTER_NUM \
91 (1 << ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
92
93/* Convert counter idx into SET/CLR register number. */
94#define COUNTER_TO_SET_CLR_ID(idx) \
95 (idx >> ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
96
97/* Convert counter idx into SET/CLR register bit. */
98#define COUNTER_TO_SET_CLR_BIT(idx) \
99 (idx & (ARM_CSPMU_SET_CLR_COUNTER_NUM - 1))
100
101#define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
102#define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
103
104/*
105 * Maximum poll count for reading counter value using high-low-high sequence.
106 */
107#define HILOHI_MAX_POLL 1000
108
109static unsigned long arm_cspmu_cpuhp_state;
110
111static DEFINE_MUTEX(arm_cspmu_lock);
112
113static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
114 struct hw_perf_event *hwc, u32 filter);
115
116static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
117{
118 struct acpi_apmt_node **ptr = dev_get_platdata(dev);
119
120 return ptr ? *ptr : NULL;
121}
122
123/*
124 * In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
125 * counter register. The counter register can be implemented as 32-bit or 64-bit
126 * register depending on the value of PMCFGR.SIZE field. For 64-bit access,
127 * single-copy 64-bit atomic support is implementation defined. APMT node flag
128 * is used to identify if the PMU supports 64-bit single copy atomic. If 64-bit
129 * single copy atomic is not supported, the driver treats the register as a pair
130 * of 32-bit register.
131 */
132
133/*
134 * Read 64-bit register as a pair of 32-bit registers using hi-lo-hi sequence.
135 */
136static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
137{
138 u32 val_lo, val_hi;
139 u64 val;
140
141 /* Use high-low-high sequence to avoid tearing */
142 do {
143 if (max_poll_count-- == 0) {
144 pr_err("ARM CSPMU: timeout hi-low-high sequence\n");
145 return 0;
146 }
147
148 val_hi = readl(addr: addr + 4);
149 val_lo = readl(addr);
150 } while (val_hi != readl(addr: addr + 4));
151
152 val = (((u64)val_hi << 32) | val_lo);
153
154 return val;
155}
156
157/* Check if cycle counter is supported. */
158static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu)
159{
160 return (cspmu->pmcfgr & PMCFGR_CC);
161}
162
163/* Get counter size, which is (PMCFGR_SIZE + 1). */
164static inline u32 counter_size(const struct arm_cspmu *cspmu)
165{
166 return FIELD_GET(PMCFGR_SIZE, cspmu->pmcfgr) + 1;
167}
168
169/* Get counter mask. */
170static inline u64 counter_mask(const struct arm_cspmu *cspmu)
171{
172 return GENMASK_ULL(counter_size(cspmu) - 1, 0);
173}
174
175/* Check if counter is implemented as 64-bit register. */
176static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
177{
178 return (counter_size(cspmu) > 32);
179}
180
181ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
182 struct device_attribute *attr, char *buf)
183{
184 struct perf_pmu_events_attr *pmu_attr;
185
186 pmu_attr = container_of(attr, typeof(*pmu_attr), attr);
187 return sysfs_emit(buf, fmt: "event=0x%llx\n", pmu_attr->id);
188}
189EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show);
190
191/* Default event list. */
192static struct attribute *arm_cspmu_event_attrs[] = {
193 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
194 NULL,
195};
196
197static struct attribute **
198arm_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
199{
200 struct attribute **attrs;
201
202 attrs = devm_kmemdup(dev: cspmu->dev, src: arm_cspmu_event_attrs,
203 len: sizeof(arm_cspmu_event_attrs), GFP_KERNEL);
204
205 return attrs;
206}
207
208static umode_t
209arm_cspmu_event_attr_is_visible(struct kobject *kobj,
210 struct attribute *attr, int unused)
211{
212 struct device *dev = kobj_to_dev(kobj);
213 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
214 struct perf_pmu_events_attr *eattr;
215
216 eattr = container_of(attr, typeof(*eattr), attr.attr);
217
218 /* Hide cycle event if not supported */
219 if (!supports_cycle_counter(cspmu) &&
220 eattr->id == ARM_CSPMU_EVT_CYCLES_DEFAULT)
221 return 0;
222
223 return attr->mode;
224}
225
226ssize_t arm_cspmu_sysfs_format_show(struct device *dev,
227 struct device_attribute *attr,
228 char *buf)
229{
230 struct dev_ext_attribute *eattr =
231 container_of(attr, struct dev_ext_attribute, attr);
232 return sysfs_emit(buf, fmt: "%s\n", (char *)eattr->var);
233}
234EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_format_show);
235
236static struct attribute *arm_cspmu_format_attrs[] = {
237 ARM_CSPMU_FORMAT_EVENT_ATTR,
238 ARM_CSPMU_FORMAT_FILTER_ATTR,
239 NULL,
240};
241
242static struct attribute **
243arm_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
244{
245 struct attribute **attrs;
246
247 attrs = devm_kmemdup(dev: cspmu->dev, src: arm_cspmu_format_attrs,
248 len: sizeof(arm_cspmu_format_attrs), GFP_KERNEL);
249
250 return attrs;
251}
252
253static u32 arm_cspmu_event_type(const struct perf_event *event)
254{
255 return event->attr.config & ARM_CSPMU_EVENT_MASK;
256}
257
258static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
259{
260 return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
261}
262
263static u32 arm_cspmu_event_filter(const struct perf_event *event)
264{
265 return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
266}
267
268static ssize_t arm_cspmu_identifier_show(struct device *dev,
269 struct device_attribute *attr,
270 char *page)
271{
272 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
273
274 return sysfs_emit(buf: page, fmt: "%s\n", cspmu->identifier);
275}
276
277static struct device_attribute arm_cspmu_identifier_attr =
278 __ATTR(identifier, 0444, arm_cspmu_identifier_show, NULL);
279
280static struct attribute *arm_cspmu_identifier_attrs[] = {
281 &arm_cspmu_identifier_attr.attr,
282 NULL,
283};
284
285static struct attribute_group arm_cspmu_identifier_attr_group = {
286 .attrs = arm_cspmu_identifier_attrs,
287};
288
289static const char *arm_cspmu_get_identifier(const struct arm_cspmu *cspmu)
290{
291 const char *identifier =
292 devm_kasprintf(dev: cspmu->dev, GFP_KERNEL, fmt: "%x",
293 cspmu->impl.pmiidr);
294 return identifier;
295}
296
297static const char *arm_cspmu_type_str[ACPI_APMT_NODE_TYPE_COUNT] = {
298 "mc",
299 "smmu",
300 "pcie",
301 "acpi",
302 "cache",
303};
304
305static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
306{
307 struct device *dev;
308 struct acpi_apmt_node *apmt_node;
309 u8 pmu_type;
310 char *name;
311 char acpi_hid_string[ACPI_ID_LEN] = { 0 };
312 static atomic_t pmu_idx[ACPI_APMT_NODE_TYPE_COUNT] = { 0 };
313
314 dev = cspmu->dev;
315 apmt_node = arm_cspmu_apmt_node(dev);
316 if (!apmt_node)
317 return devm_kasprintf(dev, GFP_KERNEL, PMUNAME "_%u",
318 atomic_fetch_inc(v: &pmu_idx[0]));
319
320 pmu_type = apmt_node->type;
321
322 if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
323 dev_err(dev, "unsupported PMU type-%u\n", pmu_type);
324 return NULL;
325 }
326
327 if (pmu_type == ACPI_APMT_NODE_TYPE_ACPI) {
328 memcpy(acpi_hid_string,
329 &apmt_node->inst_primary,
330 sizeof(apmt_node->inst_primary));
331 name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s_%s_%s_%u", PMUNAME,
332 arm_cspmu_type_str[pmu_type],
333 acpi_hid_string,
334 apmt_node->inst_secondary);
335 } else {
336 name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s_%s_%d", PMUNAME,
337 arm_cspmu_type_str[pmu_type],
338 atomic_fetch_inc(v: &pmu_idx[pmu_type]));
339 }
340
341 return name;
342}
343
344static ssize_t arm_cspmu_cpumask_show(struct device *dev,
345 struct device_attribute *attr,
346 char *buf)
347{
348 struct pmu *pmu = dev_get_drvdata(dev);
349 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
350 struct dev_ext_attribute *eattr =
351 container_of(attr, struct dev_ext_attribute, attr);
352 unsigned long mask_id = (unsigned long)eattr->var;
353 const cpumask_t *cpumask;
354
355 switch (mask_id) {
356 case ARM_CSPMU_ACTIVE_CPU_MASK:
357 cpumask = &cspmu->active_cpu;
358 break;
359 case ARM_CSPMU_ASSOCIATED_CPU_MASK:
360 cpumask = &cspmu->associated_cpus;
361 break;
362 default:
363 return 0;
364 }
365 return cpumap_print_to_pagebuf(list: true, buf, mask: cpumask);
366}
367
368static struct attribute *arm_cspmu_cpumask_attrs[] = {
369 ARM_CSPMU_CPUMASK_ATTR(cpumask, ARM_CSPMU_ACTIVE_CPU_MASK),
370 ARM_CSPMU_CPUMASK_ATTR(associated_cpus, ARM_CSPMU_ASSOCIATED_CPU_MASK),
371 NULL,
372};
373
374static struct attribute_group arm_cspmu_cpumask_attr_group = {
375 .attrs = arm_cspmu_cpumask_attrs,
376};
377
378static struct arm_cspmu_impl_match impl_match[] = {
379 {
380 .module_name = "nvidia_cspmu",
381 .pmiidr_val = ARM_CSPMU_IMPL_ID_NVIDIA,
382 .pmiidr_mask = ARM_CSPMU_PMIIDR_IMPLEMENTER,
383 .module = NULL,
384 .impl_init_ops = NULL,
385 },
386 {
387 .module_name = "ampere_cspmu",
388 .pmiidr_val = ARM_CSPMU_IMPL_ID_AMPERE,
389 .pmiidr_mask = ARM_CSPMU_PMIIDR_IMPLEMENTER,
390 .module = NULL,
391 .impl_init_ops = NULL,
392 },
393
394 {0}
395};
396
397static struct arm_cspmu_impl_match *arm_cspmu_impl_match_get(u32 pmiidr)
398{
399 struct arm_cspmu_impl_match *match = impl_match;
400
401 for (; match->pmiidr_val; match++) {
402 u32 mask = match->pmiidr_mask;
403
404 if ((match->pmiidr_val & mask) == (pmiidr & mask))
405 return match;
406 }
407
408 return NULL;
409}
410
411#define DEFAULT_IMPL_OP(name) .name = arm_cspmu_##name
412
413static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
414{
415 int ret = 0;
416 struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(dev: cspmu->dev);
417 struct arm_cspmu_impl_match *match;
418
419 /* Start with a default PMU implementation */
420 cspmu->impl.module = THIS_MODULE;
421 cspmu->impl.pmiidr = readl(addr: cspmu->base0 + PMIIDR);
422 cspmu->impl.ops = (struct arm_cspmu_impl_ops) {
423 DEFAULT_IMPL_OP(get_event_attrs),
424 DEFAULT_IMPL_OP(get_format_attrs),
425 DEFAULT_IMPL_OP(get_identifier),
426 DEFAULT_IMPL_OP(get_name),
427 DEFAULT_IMPL_OP(is_cycle_counter_event),
428 DEFAULT_IMPL_OP(event_type),
429 DEFAULT_IMPL_OP(event_filter),
430 DEFAULT_IMPL_OP(set_ev_filter),
431 DEFAULT_IMPL_OP(event_attr_is_visible),
432 };
433
434 /* Firmware may override implementer/product ID from PMIIDR */
435 if (apmt_node && apmt_node->impl_id)
436 cspmu->impl.pmiidr = apmt_node->impl_id;
437
438 /* Find implementer specific attribute ops. */
439 match = arm_cspmu_impl_match_get(pmiidr: cspmu->impl.pmiidr);
440
441 /* Load implementer module and initialize the callbacks. */
442 if (match) {
443 mutex_lock(&arm_cspmu_lock);
444
445 if (match->impl_init_ops) {
446 /* Prevent unload until PMU registration is done. */
447 if (try_module_get(module: match->module)) {
448 cspmu->impl.module = match->module;
449 cspmu->impl.match = match;
450 ret = match->impl_init_ops(cspmu);
451 if (ret)
452 module_put(module: match->module);
453 } else {
454 WARN(1, "arm_cspmu failed to get module: %s\n",
455 match->module_name);
456 ret = -EINVAL;
457 }
458 } else {
459 request_module_nowait(match->module_name);
460 ret = -EPROBE_DEFER;
461 }
462
463 mutex_unlock(lock: &arm_cspmu_lock);
464 }
465
466 return ret;
467}
468
469static struct attribute_group *
470arm_cspmu_alloc_event_attr_group(struct arm_cspmu *cspmu)
471{
472 struct attribute_group *event_group;
473 struct device *dev = cspmu->dev;
474 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
475
476 event_group =
477 devm_kzalloc(dev, size: sizeof(struct attribute_group), GFP_KERNEL);
478 if (!event_group)
479 return NULL;
480
481 event_group->name = "events";
482 event_group->is_visible = impl_ops->event_attr_is_visible;
483 event_group->attrs = impl_ops->get_event_attrs(cspmu);
484
485 if (!event_group->attrs)
486 return NULL;
487
488 return event_group;
489}
490
491static struct attribute_group *
492arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
493{
494 struct attribute_group *format_group;
495 struct device *dev = cspmu->dev;
496
497 format_group =
498 devm_kzalloc(dev, size: sizeof(struct attribute_group), GFP_KERNEL);
499 if (!format_group)
500 return NULL;
501
502 format_group->name = "format";
503 format_group->attrs = cspmu->impl.ops.get_format_attrs(cspmu);
504
505 if (!format_group->attrs)
506 return NULL;
507
508 return format_group;
509}
510
511static int arm_cspmu_alloc_attr_groups(struct arm_cspmu *cspmu)
512{
513 const struct attribute_group **attr_groups = cspmu->attr_groups;
514 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
515
516 cspmu->identifier = impl_ops->get_identifier(cspmu);
517 cspmu->name = impl_ops->get_name(cspmu);
518
519 if (!cspmu->identifier || !cspmu->name)
520 return -ENOMEM;
521
522 attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
523 attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
524 attr_groups[2] = &arm_cspmu_identifier_attr_group;
525 attr_groups[3] = &arm_cspmu_cpumask_attr_group;
526
527 if (!attr_groups[0] || !attr_groups[1])
528 return -ENOMEM;
529
530 return 0;
531}
532
533static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
534{
535 writel(PMCR_C | PMCR_P, addr: cspmu->base0 + PMCR);
536}
537
538static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
539{
540 writel(PMCR_E, addr: cspmu->base0 + PMCR);
541}
542
543static inline void arm_cspmu_stop_counters(struct arm_cspmu *cspmu)
544{
545 writel(val: 0, addr: cspmu->base0 + PMCR);
546}
547
548static void arm_cspmu_enable(struct pmu *pmu)
549{
550 bool disabled;
551 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
552
553 disabled = bitmap_empty(src: cspmu->hw_events.used_ctrs,
554 nbits: cspmu->num_logical_ctrs);
555
556 if (disabled)
557 return;
558
559 arm_cspmu_start_counters(cspmu);
560}
561
562static void arm_cspmu_disable(struct pmu *pmu)
563{
564 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
565
566 arm_cspmu_stop_counters(cspmu);
567}
568
569static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
570 struct perf_event *event)
571{
572 int idx, ret;
573 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
574
575 if (supports_cycle_counter(cspmu)) {
576 if (cspmu->impl.ops.is_cycle_counter_event(event)) {
577 /* Search for available cycle counter. */
578 if (test_and_set_bit(nr: cspmu->cycle_counter_logical_idx,
579 addr: hw_events->used_ctrs))
580 return -EAGAIN;
581
582 return cspmu->cycle_counter_logical_idx;
583 }
584
585 /*
586 * Search a regular counter from the used counter bitmap.
587 * The cycle counter divides the bitmap into two parts. Search
588 * the first then second half to exclude the cycle counter bit.
589 */
590 idx = find_first_zero_bit(addr: hw_events->used_ctrs,
591 size: cspmu->cycle_counter_logical_idx);
592 if (idx >= cspmu->cycle_counter_logical_idx) {
593 idx = find_next_zero_bit(
594 addr: hw_events->used_ctrs,
595 size: cspmu->num_logical_ctrs,
596 offset: cspmu->cycle_counter_logical_idx + 1);
597 }
598 } else {
599 idx = find_first_zero_bit(addr: hw_events->used_ctrs,
600 size: cspmu->num_logical_ctrs);
601 }
602
603 if (idx >= cspmu->num_logical_ctrs)
604 return -EAGAIN;
605
606 if (cspmu->impl.ops.validate_event) {
607 ret = cspmu->impl.ops.validate_event(cspmu, event);
608 if (ret)
609 return ret;
610 }
611
612 set_bit(nr: idx, addr: hw_events->used_ctrs);
613
614 return idx;
615}
616
617static bool arm_cspmu_validate_event(struct pmu *pmu,
618 struct arm_cspmu_hw_events *hw_events,
619 struct perf_event *event)
620{
621 if (is_software_event(event))
622 return true;
623
624 /* Reject groups spanning multiple HW PMUs. */
625 if (event->pmu != pmu)
626 return false;
627
628 return (arm_cspmu_get_event_idx(hw_events, event) >= 0);
629}
630
631/*
632 * Make sure the group of events can be scheduled at once
633 * on the PMU.
634 */
635static bool arm_cspmu_validate_group(struct perf_event *event)
636{
637 struct perf_event *sibling, *leader = event->group_leader;
638 struct arm_cspmu_hw_events fake_hw_events;
639
640 if (event->group_leader == event)
641 return true;
642
643 memset(&fake_hw_events, 0, sizeof(fake_hw_events));
644
645 if (!arm_cspmu_validate_event(pmu: event->pmu, hw_events: &fake_hw_events, event: leader))
646 return false;
647
648 for_each_sibling_event(sibling, leader) {
649 if (!arm_cspmu_validate_event(pmu: event->pmu, hw_events: &fake_hw_events,
650 event: sibling))
651 return false;
652 }
653
654 return arm_cspmu_validate_event(pmu: event->pmu, hw_events: &fake_hw_events, event);
655}
656
657static int arm_cspmu_event_init(struct perf_event *event)
658{
659 struct arm_cspmu *cspmu;
660 struct hw_perf_event *hwc = &event->hw;
661
662 cspmu = to_arm_cspmu(event->pmu);
663
664 if (event->attr.type != event->pmu->type)
665 return -ENOENT;
666
667 /*
668 * Following other "uncore" PMUs, we do not support sampling mode or
669 * attach to a task (per-process mode).
670 */
671 if (is_sampling_event(event)) {
672 dev_dbg(cspmu->pmu.dev,
673 "Can't support sampling events\n");
674 return -EOPNOTSUPP;
675 }
676
677 if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
678 dev_dbg(cspmu->pmu.dev,
679 "Can't support per-task counters\n");
680 return -EINVAL;
681 }
682
683 /*
684 * Make sure the CPU assignment is on one of the CPUs associated with
685 * this PMU.
686 */
687 if (!cpumask_test_cpu(cpu: event->cpu, cpumask: &cspmu->associated_cpus)) {
688 dev_dbg(cspmu->pmu.dev,
689 "Requested cpu is not associated with the PMU\n");
690 return -EINVAL;
691 }
692
693 /* Enforce the current active CPU to handle the events in this PMU. */
694 event->cpu = cpumask_first(srcp: &cspmu->active_cpu);
695 if (event->cpu >= nr_cpu_ids)
696 return -EINVAL;
697
698 if (!arm_cspmu_validate_group(event))
699 return -EINVAL;
700
701 /*
702 * The logical counter id is tracked with hw_perf_event.extra_reg.idx.
703 * The physical counter id is tracked with hw_perf_event.idx.
704 * We don't assign an index until we actually place the event onto
705 * hardware. Use -1 to signify that we haven't decided where to put it
706 * yet.
707 */
708 hwc->idx = -1;
709 hwc->extra_reg.idx = -1;
710 hwc->config = cspmu->impl.ops.event_type(event);
711
712 return 0;
713}
714
715static inline u32 counter_offset(u32 reg_sz, u32 ctr_idx)
716{
717 return (PMEVCNTR_LO + (reg_sz * ctr_idx));
718}
719
720static void arm_cspmu_write_counter(struct perf_event *event, u64 val)
721{
722 u32 offset;
723 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
724
725 if (use_64b_counter_reg(cspmu)) {
726 offset = counter_offset(reg_sz: sizeof(u64), ctr_idx: event->hw.idx);
727
728 if (cspmu->has_atomic_dword)
729 writeq(val, addr: cspmu->base1 + offset);
730 else
731 lo_hi_writeq(val, addr: cspmu->base1 + offset);
732 } else {
733 offset = counter_offset(reg_sz: sizeof(u32), ctr_idx: event->hw.idx);
734
735 writel(lower_32_bits(val), addr: cspmu->base1 + offset);
736 }
737}
738
739static u64 arm_cspmu_read_counter(struct perf_event *event)
740{
741 u32 offset;
742 const void __iomem *counter_addr;
743 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
744
745 if (use_64b_counter_reg(cspmu)) {
746 offset = counter_offset(reg_sz: sizeof(u64), ctr_idx: event->hw.idx);
747 counter_addr = cspmu->base1 + offset;
748
749 return cspmu->has_atomic_dword ?
750 readq(addr: counter_addr) :
751 read_reg64_hilohi(addr: counter_addr, HILOHI_MAX_POLL);
752 }
753
754 offset = counter_offset(reg_sz: sizeof(u32), ctr_idx: event->hw.idx);
755 return readl(addr: cspmu->base1 + offset);
756}
757
758/*
759 * arm_cspmu_set_event_period: Set the period for the counter.
760 *
761 * To handle cases of extreme interrupt latency, we program
762 * the counter with half of the max count for the counters.
763 */
764static void arm_cspmu_set_event_period(struct perf_event *event)
765{
766 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
767 u64 val = counter_mask(cspmu) >> 1ULL;
768
769 local64_set(&event->hw.prev_count, val);
770 arm_cspmu_write_counter(event, val);
771}
772
773static void arm_cspmu_enable_counter(struct arm_cspmu *cspmu, int idx)
774{
775 u32 reg_id, reg_bit, inten_off, cnten_off;
776
777 reg_id = COUNTER_TO_SET_CLR_ID(idx);
778 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
779
780 inten_off = PMINTENSET + (4 * reg_id);
781 cnten_off = PMCNTENSET + (4 * reg_id);
782
783 writel(BIT(reg_bit), addr: cspmu->base0 + inten_off);
784 writel(BIT(reg_bit), addr: cspmu->base0 + cnten_off);
785}
786
787static void arm_cspmu_disable_counter(struct arm_cspmu *cspmu, int idx)
788{
789 u32 reg_id, reg_bit, inten_off, cnten_off;
790
791 reg_id = COUNTER_TO_SET_CLR_ID(idx);
792 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
793
794 inten_off = PMINTENCLR + (4 * reg_id);
795 cnten_off = PMCNTENCLR + (4 * reg_id);
796
797 writel(BIT(reg_bit), addr: cspmu->base0 + cnten_off);
798 writel(BIT(reg_bit), addr: cspmu->base0 + inten_off);
799}
800
801static void arm_cspmu_event_update(struct perf_event *event)
802{
803 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
804 struct hw_perf_event *hwc = &event->hw;
805 u64 delta, prev, now;
806
807 do {
808 prev = local64_read(&hwc->prev_count);
809 now = arm_cspmu_read_counter(event);
810 } while (local64_cmpxchg(l: &hwc->prev_count, old: prev, new: now) != prev);
811
812 delta = (now - prev) & counter_mask(cspmu);
813 local64_add(delta, &event->count);
814}
815
816static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
817 struct hw_perf_event *hwc)
818{
819 u32 offset = PMEVTYPER + (4 * hwc->idx);
820
821 writel(val: hwc->config, addr: cspmu->base0 + offset);
822}
823
824static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
825 struct hw_perf_event *hwc,
826 u32 filter)
827{
828 u32 offset = PMEVFILTR + (4 * hwc->idx);
829
830 writel(val: filter, addr: cspmu->base0 + offset);
831}
832
833static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
834{
835 u32 offset = PMCCFILTR;
836
837 writel(val: filter, addr: cspmu->base0 + offset);
838}
839
840static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
841{
842 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
843 struct hw_perf_event *hwc = &event->hw;
844 u32 filter;
845
846 /* We always reprogram the counter */
847 if (pmu_flags & PERF_EF_RELOAD)
848 WARN_ON(!(hwc->state & PERF_HES_UPTODATE));
849
850 arm_cspmu_set_event_period(event);
851
852 filter = cspmu->impl.ops.event_filter(event);
853
854 if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
855 arm_cspmu_set_cc_filter(cspmu, filter);
856 } else {
857 arm_cspmu_set_event(cspmu, hwc);
858 cspmu->impl.ops.set_ev_filter(cspmu, hwc, filter);
859 }
860
861 hwc->state = 0;
862
863 arm_cspmu_enable_counter(cspmu, idx: hwc->idx);
864}
865
866static void arm_cspmu_stop(struct perf_event *event, int pmu_flags)
867{
868 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
869 struct hw_perf_event *hwc = &event->hw;
870
871 if (hwc->state & PERF_HES_STOPPED)
872 return;
873
874 arm_cspmu_disable_counter(cspmu, idx: hwc->idx);
875 arm_cspmu_event_update(event);
876
877 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
878}
879
880static inline u32 to_phys_idx(struct arm_cspmu *cspmu, u32 idx)
881{
882 return (idx == cspmu->cycle_counter_logical_idx) ?
883 ARM_CSPMU_CYCLE_CNTR_IDX : idx;
884}
885
886static int arm_cspmu_add(struct perf_event *event, int flags)
887{
888 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
889 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
890 struct hw_perf_event *hwc = &event->hw;
891 int idx;
892
893 if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
894 &cspmu->associated_cpus)))
895 return -ENOENT;
896
897 idx = arm_cspmu_get_event_idx(hw_events, event);
898 if (idx < 0)
899 return idx;
900
901 hw_events->events[idx] = event;
902 hwc->idx = to_phys_idx(cspmu, idx);
903 hwc->extra_reg.idx = idx;
904 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
905
906 if (flags & PERF_EF_START)
907 arm_cspmu_start(event, PERF_EF_RELOAD);
908
909 /* Propagate changes to the userspace mapping. */
910 perf_event_update_userpage(event);
911
912 return 0;
913}
914
915static void arm_cspmu_del(struct perf_event *event, int flags)
916{
917 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
918 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
919 struct hw_perf_event *hwc = &event->hw;
920 int idx = hwc->extra_reg.idx;
921
922 arm_cspmu_stop(event, PERF_EF_UPDATE);
923
924 hw_events->events[idx] = NULL;
925
926 clear_bit(nr: idx, addr: hw_events->used_ctrs);
927
928 perf_event_update_userpage(event);
929}
930
931static void arm_cspmu_read(struct perf_event *event)
932{
933 arm_cspmu_event_update(event);
934}
935
936static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
937{
938 struct acpi_apmt_node *apmt_node;
939 struct arm_cspmu *cspmu;
940 struct device *dev = &pdev->dev;
941
942 cspmu = devm_kzalloc(dev, size: sizeof(*cspmu), GFP_KERNEL);
943 if (!cspmu)
944 return NULL;
945
946 cspmu->dev = dev;
947 platform_set_drvdata(pdev, data: cspmu);
948
949 apmt_node = arm_cspmu_apmt_node(dev);
950 if (apmt_node) {
951 cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
952 } else {
953 u32 width = 0;
954
955 device_property_read_u32(dev, propname: "reg-io-width", val: &width);
956 cspmu->has_atomic_dword = (width == 8);
957 }
958
959 return cspmu;
960}
961
962static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
963{
964 struct device *dev;
965 struct platform_device *pdev;
966
967 dev = cspmu->dev;
968 pdev = to_platform_device(dev);
969
970 /* Base address for page 0. */
971 cspmu->base0 = devm_platform_ioremap_resource(pdev, index: 0);
972 if (IS_ERR(ptr: cspmu->base0)) {
973 dev_err(dev, "ioremap failed for page-0 resource\n");
974 return PTR_ERR(ptr: cspmu->base0);
975 }
976
977 /* Base address for page 1 if supported. Otherwise point to page 0. */
978 cspmu->base1 = cspmu->base0;
979 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) {
980 cspmu->base1 = devm_platform_ioremap_resource(pdev, index: 1);
981 if (IS_ERR(ptr: cspmu->base1)) {
982 dev_err(dev, "ioremap failed for page-1 resource\n");
983 return PTR_ERR(ptr: cspmu->base1);
984 }
985 }
986
987 cspmu->pmcfgr = readl(addr: cspmu->base0 + PMCFGR);
988
989 cspmu->num_logical_ctrs = FIELD_GET(PMCFGR_N, cspmu->pmcfgr) + 1;
990
991 cspmu->cycle_counter_logical_idx = ARM_CSPMU_MAX_HW_CNTRS;
992
993 if (supports_cycle_counter(cspmu)) {
994 /*
995 * The last logical counter is mapped to cycle counter if
996 * there is a gap between regular and cycle counter. Otherwise,
997 * logical and physical have 1-to-1 mapping.
998 */
999 cspmu->cycle_counter_logical_idx =
1000 (cspmu->num_logical_ctrs <= ARM_CSPMU_CYCLE_CNTR_IDX) ?
1001 cspmu->num_logical_ctrs - 1 :
1002 ARM_CSPMU_CYCLE_CNTR_IDX;
1003 }
1004
1005 cspmu->num_set_clr_reg =
1006 DIV_ROUND_UP(cspmu->num_logical_ctrs,
1007 ARM_CSPMU_SET_CLR_COUNTER_NUM);
1008
1009 cspmu->hw_events.events =
1010 devm_kcalloc(dev, n: cspmu->num_logical_ctrs,
1011 size: sizeof(*cspmu->hw_events.events), GFP_KERNEL);
1012
1013 if (!cspmu->hw_events.events)
1014 return -ENOMEM;
1015
1016 return 0;
1017}
1018
1019static inline int arm_cspmu_get_reset_overflow(struct arm_cspmu *cspmu,
1020 u32 *pmovs)
1021{
1022 int i;
1023 u32 pmovclr_offset = PMOVSCLR;
1024 u32 has_overflowed = 0;
1025
1026 for (i = 0; i < cspmu->num_set_clr_reg; ++i) {
1027 pmovs[i] = readl(addr: cspmu->base1 + pmovclr_offset);
1028 has_overflowed |= pmovs[i];
1029 writel(val: pmovs[i], addr: cspmu->base1 + pmovclr_offset);
1030 pmovclr_offset += sizeof(u32);
1031 }
1032
1033 return has_overflowed != 0;
1034}
1035
1036static irqreturn_t arm_cspmu_handle_irq(int irq_num, void *dev)
1037{
1038 int idx, has_overflowed;
1039 struct perf_event *event;
1040 struct arm_cspmu *cspmu = dev;
1041 DECLARE_BITMAP(pmovs, ARM_CSPMU_MAX_HW_CNTRS);
1042 bool handled = false;
1043
1044 arm_cspmu_stop_counters(cspmu);
1045
1046 has_overflowed = arm_cspmu_get_reset_overflow(cspmu, pmovs: (u32 *)pmovs);
1047 if (!has_overflowed)
1048 goto done;
1049
1050 for_each_set_bit(idx, cspmu->hw_events.used_ctrs,
1051 cspmu->num_logical_ctrs) {
1052 event = cspmu->hw_events.events[idx];
1053
1054 if (!event)
1055 continue;
1056
1057 if (!test_bit(event->hw.idx, pmovs))
1058 continue;
1059
1060 arm_cspmu_event_update(event);
1061 arm_cspmu_set_event_period(event);
1062
1063 handled = true;
1064 }
1065
1066done:
1067 arm_cspmu_start_counters(cspmu);
1068 return IRQ_RETVAL(handled);
1069}
1070
1071static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
1072{
1073 int irq, ret;
1074 struct device *dev;
1075 struct platform_device *pdev;
1076
1077 dev = cspmu->dev;
1078 pdev = to_platform_device(dev);
1079
1080 /* Skip IRQ request if the PMU does not support overflow interrupt. */
1081 irq = platform_get_irq_optional(pdev, 0);
1082 if (irq < 0)
1083 return irq == -ENXIO ? 0 : irq;
1084
1085 ret = devm_request_irq(dev, irq, handler: arm_cspmu_handle_irq,
1086 IRQF_NOBALANCING | IRQF_NO_THREAD, devname: dev_name(dev),
1087 dev_id: cspmu);
1088 if (ret) {
1089 dev_err(dev, "Could not request IRQ %d\n", irq);
1090 return ret;
1091 }
1092
1093 cspmu->irq = irq;
1094
1095 return 0;
1096}
1097
1098#if defined(CONFIG_ACPI) && defined(CONFIG_ARM64)
1099#include <acpi/processor.h>
1100
1101static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
1102{
1103 struct device *cpu_dev;
1104 struct acpi_device *acpi_dev;
1105
1106 cpu_dev = get_cpu_device(cpu);
1107 if (!cpu_dev)
1108 return -ENODEV;
1109
1110 acpi_dev = ACPI_COMPANION(cpu_dev);
1111 while (acpi_dev) {
1112 if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, container_uid))
1113 return 0;
1114
1115 acpi_dev = acpi_dev_parent(acpi_dev);
1116 }
1117
1118 return -ENODEV;
1119}
1120
1121static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1122{
1123 struct acpi_apmt_node *apmt_node;
1124 int affinity_flag;
1125 int cpu;
1126
1127 apmt_node = arm_cspmu_apmt_node(cspmu->dev);
1128 affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY;
1129
1130 if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) {
1131 for_each_possible_cpu(cpu) {
1132 if (apmt_node->proc_affinity ==
1133 get_acpi_id_for_cpu(cpu)) {
1134 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1135 break;
1136 }
1137 }
1138 } else {
1139 for_each_possible_cpu(cpu) {
1140 if (arm_cspmu_find_cpu_container(
1141 cpu, apmt_node->proc_affinity))
1142 continue;
1143
1144 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1145 }
1146 }
1147
1148 return 0;
1149}
1150#else
1151static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1152{
1153 return -ENODEV;
1154}
1155#endif
1156
1157static int arm_cspmu_of_get_cpus(struct arm_cspmu *cspmu)
1158{
1159 struct of_phandle_iterator it;
1160 int ret, cpu;
1161
1162 of_for_each_phandle(&it, ret, dev_of_node(cspmu->dev), "cpus", NULL, 0) {
1163 cpu = of_cpu_node_to_id(np: it.node);
1164 if (cpu < 0)
1165 continue;
1166 cpumask_set_cpu(cpu, dstp: &cspmu->associated_cpus);
1167 }
1168 return ret == -ENOENT ? 0 : ret;
1169}
1170
1171static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
1172{
1173 int ret = 0;
1174
1175 if (arm_cspmu_apmt_node(dev: cspmu->dev))
1176 ret = arm_cspmu_acpi_get_cpus(cspmu);
1177 else if (device_property_present(dev: cspmu->dev, propname: "cpus"))
1178 ret = arm_cspmu_of_get_cpus(cspmu);
1179 else
1180 cpumask_copy(dstp: &cspmu->associated_cpus, cpu_possible_mask);
1181
1182 if (!ret && cpumask_empty(srcp: &cspmu->associated_cpus)) {
1183 dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
1184 ret = -ENODEV;
1185 }
1186 return ret;
1187}
1188
1189static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
1190{
1191 int ret, capabilities;
1192
1193 ret = arm_cspmu_alloc_attr_groups(cspmu);
1194 if (ret)
1195 return ret;
1196
1197 ret = cpuhp_state_add_instance(state: arm_cspmu_cpuhp_state,
1198 node: &cspmu->cpuhp_node);
1199 if (ret)
1200 return ret;
1201
1202 capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1203 if (cspmu->irq == 0)
1204 capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1205
1206 cspmu->pmu = (struct pmu){
1207 .task_ctx_nr = perf_invalid_context,
1208 .module = cspmu->impl.module,
1209 .pmu_enable = arm_cspmu_enable,
1210 .pmu_disable = arm_cspmu_disable,
1211 .event_init = arm_cspmu_event_init,
1212 .add = arm_cspmu_add,
1213 .del = arm_cspmu_del,
1214 .start = arm_cspmu_start,
1215 .stop = arm_cspmu_stop,
1216 .read = arm_cspmu_read,
1217 .attr_groups = cspmu->attr_groups,
1218 .capabilities = capabilities,
1219 };
1220
1221 /* Hardware counter init */
1222 arm_cspmu_reset_counters(cspmu);
1223
1224 ret = perf_pmu_register(pmu: &cspmu->pmu, name: cspmu->name, type: -1);
1225 if (ret) {
1226 cpuhp_state_remove_instance(state: arm_cspmu_cpuhp_state,
1227 node: &cspmu->cpuhp_node);
1228 }
1229
1230 return ret;
1231}
1232
1233static int arm_cspmu_device_probe(struct platform_device *pdev)
1234{
1235 int ret;
1236 struct arm_cspmu *cspmu;
1237
1238 cspmu = arm_cspmu_alloc(pdev);
1239 if (!cspmu)
1240 return -ENOMEM;
1241
1242 ret = arm_cspmu_init_mmio(cspmu);
1243 if (ret)
1244 return ret;
1245
1246 ret = arm_cspmu_request_irq(cspmu);
1247 if (ret)
1248 return ret;
1249
1250 ret = arm_cspmu_get_cpus(cspmu);
1251 if (ret)
1252 return ret;
1253
1254 ret = arm_cspmu_init_impl_ops(cspmu);
1255 if (ret)
1256 return ret;
1257
1258 ret = arm_cspmu_register_pmu(cspmu);
1259
1260 /* Matches arm_cspmu_init_impl_ops() above. */
1261 if (cspmu->impl.module != THIS_MODULE)
1262 module_put(module: cspmu->impl.module);
1263
1264 return ret;
1265}
1266
1267static void arm_cspmu_device_remove(struct platform_device *pdev)
1268{
1269 struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
1270
1271 perf_pmu_unregister(pmu: &cspmu->pmu);
1272 cpuhp_state_remove_instance(state: arm_cspmu_cpuhp_state, node: &cspmu->cpuhp_node);
1273}
1274
1275static const struct platform_device_id arm_cspmu_id[] = {
1276 {DRVNAME, 0},
1277 { },
1278};
1279MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
1280
1281static const struct of_device_id arm_cspmu_of_match[] = {
1282 { .compatible = "arm,coresight-pmu" },
1283 {}
1284};
1285MODULE_DEVICE_TABLE(of, arm_cspmu_of_match);
1286
1287static struct platform_driver arm_cspmu_driver = {
1288 .driver = {
1289 .name = DRVNAME,
1290 .of_match_table = arm_cspmu_of_match,
1291 .suppress_bind_attrs = true,
1292 },
1293 .probe = arm_cspmu_device_probe,
1294 .remove_new = arm_cspmu_device_remove,
1295 .id_table = arm_cspmu_id,
1296};
1297
1298static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
1299{
1300 cpumask_set_cpu(cpu, dstp: &cspmu->active_cpu);
1301 if (cspmu->irq)
1302 WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
1303}
1304
1305static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1306{
1307 struct arm_cspmu *cspmu =
1308 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1309
1310 if (!cpumask_test_cpu(cpu, cpumask: &cspmu->associated_cpus))
1311 return 0;
1312
1313 /* If the PMU is already managed, there is nothing to do */
1314 if (!cpumask_empty(srcp: &cspmu->active_cpu))
1315 return 0;
1316
1317 /* Use this CPU for event counting */
1318 arm_cspmu_set_active_cpu(cpu, cspmu);
1319
1320 return 0;
1321}
1322
1323static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1324{
1325 int dst;
1326 struct cpumask online_supported;
1327
1328 struct arm_cspmu *cspmu =
1329 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1330
1331 /* Nothing to do if this CPU doesn't own the PMU */
1332 if (!cpumask_test_and_clear_cpu(cpu, cpumask: &cspmu->active_cpu))
1333 return 0;
1334
1335 /* Choose a new CPU to migrate ownership of the PMU to */
1336 cpumask_and(dstp: &online_supported, src1p: &cspmu->associated_cpus,
1337 cpu_online_mask);
1338 dst = cpumask_any_but(mask: &online_supported, cpu);
1339 if (dst >= nr_cpu_ids)
1340 return 0;
1341
1342 /* Use this CPU for event counting */
1343 perf_pmu_migrate_context(pmu: &cspmu->pmu, src_cpu: cpu, dst_cpu: dst);
1344 arm_cspmu_set_active_cpu(cpu: dst, cspmu);
1345
1346 return 0;
1347}
1348
1349static int __init arm_cspmu_init(void)
1350{
1351 int ret;
1352
1353 ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN,
1354 name: "perf/arm/cspmu:online",
1355 startup: arm_cspmu_cpu_online,
1356 teardown: arm_cspmu_cpu_teardown);
1357 if (ret < 0)
1358 return ret;
1359 arm_cspmu_cpuhp_state = ret;
1360 return platform_driver_register(&arm_cspmu_driver);
1361}
1362
1363static void __exit arm_cspmu_exit(void)
1364{
1365 platform_driver_unregister(&arm_cspmu_driver);
1366 cpuhp_remove_multi_state(state: arm_cspmu_cpuhp_state);
1367}
1368
1369int arm_cspmu_impl_register(const struct arm_cspmu_impl_match *impl_match)
1370{
1371 struct arm_cspmu_impl_match *match;
1372 int ret = 0;
1373
1374 match = arm_cspmu_impl_match_get(pmiidr: impl_match->pmiidr_val);
1375
1376 if (match) {
1377 mutex_lock(&arm_cspmu_lock);
1378
1379 if (!match->impl_init_ops) {
1380 match->module = impl_match->module;
1381 match->impl_init_ops = impl_match->impl_init_ops;
1382 } else {
1383 /* Broken match table may contain non-unique entries */
1384 WARN(1, "arm_cspmu backend already registered for module: %s, pmiidr: 0x%x, mask: 0x%x\n",
1385 match->module_name,
1386 match->pmiidr_val,
1387 match->pmiidr_mask);
1388
1389 ret = -EINVAL;
1390 }
1391
1392 mutex_unlock(lock: &arm_cspmu_lock);
1393
1394 if (!ret)
1395 ret = driver_attach(drv: &arm_cspmu_driver.driver);
1396 } else {
1397 pr_err("arm_cspmu reg failed, unable to find a match for pmiidr: 0x%x\n",
1398 impl_match->pmiidr_val);
1399
1400 ret = -EINVAL;
1401 }
1402
1403 return ret;
1404}
1405EXPORT_SYMBOL_GPL(arm_cspmu_impl_register);
1406
1407static int arm_cspmu_match_device(struct device *dev, const void *match)
1408{
1409 struct arm_cspmu *cspmu = platform_get_drvdata(to_platform_device(dev));
1410
1411 return (cspmu && cspmu->impl.match == match) ? 1 : 0;
1412}
1413
1414void arm_cspmu_impl_unregister(const struct arm_cspmu_impl_match *impl_match)
1415{
1416 struct device *dev;
1417 struct arm_cspmu_impl_match *match;
1418
1419 match = arm_cspmu_impl_match_get(pmiidr: impl_match->pmiidr_val);
1420
1421 if (WARN_ON(!match))
1422 return;
1423
1424 /* Unbind the driver from all matching backend devices. */
1425 while ((dev = driver_find_device(drv: &arm_cspmu_driver.driver, NULL,
1426 data: match, match: arm_cspmu_match_device)))
1427 device_release_driver(dev);
1428
1429 mutex_lock(&arm_cspmu_lock);
1430
1431 match->module = NULL;
1432 match->impl_init_ops = NULL;
1433
1434 mutex_unlock(lock: &arm_cspmu_lock);
1435}
1436EXPORT_SYMBOL_GPL(arm_cspmu_impl_unregister);
1437
1438module_init(arm_cspmu_init);
1439module_exit(arm_cspmu_exit);
1440
1441MODULE_LICENSE("GPL v2");
1442

source code of linux/drivers/perf/arm_cspmu/arm_cspmu.c