1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * ARM DynamIQ Shared Unit (DSU) PMU driver |
4 | * |
5 | * Copyright (C) ARM Limited, 2017. |
6 | * |
7 | * Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers. |
8 | */ |
9 | |
10 | #define PMUNAME "arm_dsu" |
11 | #define DRVNAME PMUNAME "_pmu" |
12 | #define pr_fmt(fmt) DRVNAME ": " fmt |
13 | |
14 | #include <linux/acpi.h> |
15 | #include <linux/bitmap.h> |
16 | #include <linux/bitops.h> |
17 | #include <linux/bug.h> |
18 | #include <linux/cpumask.h> |
19 | #include <linux/device.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> |
23 | #include <linux/of.h> |
24 | #include <linux/perf_event.h> |
25 | #include <linux/platform_device.h> |
26 | #include <linux/spinlock.h> |
27 | #include <linux/smp.h> |
28 | #include <linux/sysfs.h> |
29 | #include <linux/types.h> |
30 | |
31 | #include <asm/arm_dsu_pmu.h> |
32 | #include <asm/local64.h> |
33 | |
34 | /* PMU event codes */ |
35 | #define DSU_PMU_EVT_CYCLES 0x11 |
36 | #define DSU_PMU_EVT_CHAIN 0x1e |
37 | |
38 | #define DSU_PMU_MAX_COMMON_EVENTS 0x40 |
39 | |
40 | #define DSU_PMU_MAX_HW_CNTRS 32 |
41 | #define DSU_PMU_HW_COUNTER_MASK (DSU_PMU_MAX_HW_CNTRS - 1) |
42 | |
43 | #define CLUSTERPMCR_E BIT(0) |
44 | #define CLUSTERPMCR_P BIT(1) |
45 | #define CLUSTERPMCR_C BIT(2) |
46 | #define CLUSTERPMCR_N_SHIFT 11 |
47 | #define CLUSTERPMCR_N_MASK 0x1f |
48 | #define CLUSTERPMCR_IDCODE_SHIFT 16 |
49 | #define CLUSTERPMCR_IDCODE_MASK 0xff |
50 | #define CLUSTERPMCR_IMP_SHIFT 24 |
51 | #define CLUSTERPMCR_IMP_MASK 0xff |
52 | #define CLUSTERPMCR_RES_MASK 0x7e8 |
53 | #define CLUSTERPMCR_RES_VAL 0x40 |
54 | |
55 | #define DSU_ACTIVE_CPU_MASK 0x0 |
56 | #define DSU_ASSOCIATED_CPU_MASK 0x1 |
57 | |
58 | /* |
59 | * We use the index of the counters as they appear in the counter |
60 | * bit maps in the PMU registers (e.g CLUSTERPMSELR). |
61 | * i.e, |
62 | * counter 0 - Bit 0 |
63 | * counter 1 - Bit 1 |
64 | * ... |
65 | * Cycle counter - Bit 31 |
66 | */ |
67 | #define DSU_PMU_IDX_CYCLE_COUNTER 31 |
68 | |
69 | /* All event counters are 32bit, with a 64bit Cycle counter */ |
70 | #define DSU_PMU_COUNTER_WIDTH(idx) \ |
71 | (((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32) |
72 | |
73 | #define DSU_PMU_COUNTER_MASK(idx) \ |
74 | GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0) |
75 | |
76 | #define DSU_EXT_ATTR(_name, _func, _config) \ |
77 | (&((struct dev_ext_attribute[]) { \ |
78 | { \ |
79 | .attr = __ATTR(_name, 0444, _func, NULL), \ |
80 | .var = (void *)_config \ |
81 | } \ |
82 | })[0].attr.attr) |
83 | |
84 | #define DSU_EVENT_ATTR(_name, _config) \ |
85 | DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config) |
86 | |
87 | #define DSU_FORMAT_ATTR(_name, _config) \ |
88 | DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config) |
89 | |
90 | #define DSU_CPUMASK_ATTR(_name, _config) \ |
91 | DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config) |
92 | |
93 | struct dsu_hw_events { |
94 | DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS); |
95 | struct perf_event *events[DSU_PMU_MAX_HW_CNTRS]; |
96 | }; |
97 | |
98 | /* |
99 | * struct dsu_pmu - DSU PMU descriptor |
100 | * |
101 | * @pmu_lock : Protects accesses to DSU PMU register from normal vs |
102 | * interrupt handler contexts. |
103 | * @hw_events : Holds the event counter state. |
104 | * @associated_cpus : CPUs attached to the DSU. |
105 | * @active_cpu : CPU to which the PMU is bound for accesses. |
106 | * @cpuhp_node : Node for CPU hotplug notifier link. |
107 | * @num_counters : Number of event counters implemented by the PMU, |
108 | * excluding the cycle counter. |
109 | * @irq : Interrupt line for counter overflow. |
110 | * @cpmceid_bitmap : Bitmap for the availability of architected common |
111 | * events (event_code < 0x40). |
112 | */ |
113 | struct dsu_pmu { |
114 | struct pmu pmu; |
115 | struct device *dev; |
116 | raw_spinlock_t pmu_lock; |
117 | struct dsu_hw_events hw_events; |
118 | cpumask_t associated_cpus; |
119 | cpumask_t active_cpu; |
120 | struct hlist_node cpuhp_node; |
121 | s8 num_counters; |
122 | int irq; |
123 | DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS); |
124 | }; |
125 | |
126 | static unsigned long dsu_pmu_cpuhp_state; |
127 | |
128 | static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu) |
129 | { |
130 | return container_of(pmu, struct dsu_pmu, pmu); |
131 | } |
132 | |
133 | static ssize_t dsu_pmu_sysfs_event_show(struct device *dev, |
134 | struct device_attribute *attr, |
135 | char *buf) |
136 | { |
137 | struct dev_ext_attribute *eattr = container_of(attr, |
138 | struct dev_ext_attribute, attr); |
139 | return sysfs_emit(buf, fmt: "event=0x%lx\n" , (unsigned long)eattr->var); |
140 | } |
141 | |
142 | static ssize_t dsu_pmu_sysfs_format_show(struct device *dev, |
143 | struct device_attribute *attr, |
144 | char *buf) |
145 | { |
146 | struct dev_ext_attribute *eattr = container_of(attr, |
147 | struct dev_ext_attribute, attr); |
148 | return sysfs_emit(buf, fmt: "%s\n" , (char *)eattr->var); |
149 | } |
150 | |
151 | static ssize_t dsu_pmu_cpumask_show(struct device *dev, |
152 | struct device_attribute *attr, |
153 | char *buf) |
154 | { |
155 | struct pmu *pmu = dev_get_drvdata(dev); |
156 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); |
157 | struct dev_ext_attribute *eattr = container_of(attr, |
158 | struct dev_ext_attribute, attr); |
159 | unsigned long mask_id = (unsigned long)eattr->var; |
160 | const cpumask_t *cpumask; |
161 | |
162 | switch (mask_id) { |
163 | case DSU_ACTIVE_CPU_MASK: |
164 | cpumask = &dsu_pmu->active_cpu; |
165 | break; |
166 | case DSU_ASSOCIATED_CPU_MASK: |
167 | cpumask = &dsu_pmu->associated_cpus; |
168 | break; |
169 | default: |
170 | return 0; |
171 | } |
172 | return cpumap_print_to_pagebuf(list: true, buf, mask: cpumask); |
173 | } |
174 | |
175 | static struct attribute *dsu_pmu_format_attrs[] = { |
176 | DSU_FORMAT_ATTR(event, "config:0-31" ), |
177 | NULL, |
178 | }; |
179 | |
180 | static const struct attribute_group dsu_pmu_format_attr_group = { |
181 | .name = "format" , |
182 | .attrs = dsu_pmu_format_attrs, |
183 | }; |
184 | |
185 | static struct attribute *dsu_pmu_event_attrs[] = { |
186 | DSU_EVENT_ATTR(cycles, 0x11), |
187 | DSU_EVENT_ATTR(bus_access, 0x19), |
188 | DSU_EVENT_ATTR(memory_error, 0x1a), |
189 | DSU_EVENT_ATTR(bus_cycles, 0x1d), |
190 | DSU_EVENT_ATTR(l3d_cache_allocate, 0x29), |
191 | DSU_EVENT_ATTR(l3d_cache_refill, 0x2a), |
192 | DSU_EVENT_ATTR(l3d_cache, 0x2b), |
193 | DSU_EVENT_ATTR(l3d_cache_wb, 0x2c), |
194 | NULL, |
195 | }; |
196 | |
197 | static umode_t |
198 | dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr, |
199 | int unused) |
200 | { |
201 | struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); |
202 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); |
203 | struct dev_ext_attribute *eattr = container_of(attr, |
204 | struct dev_ext_attribute, attr.attr); |
205 | unsigned long evt = (unsigned long)eattr->var; |
206 | |
207 | return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0; |
208 | } |
209 | |
210 | static const struct attribute_group dsu_pmu_events_attr_group = { |
211 | .name = "events" , |
212 | .attrs = dsu_pmu_event_attrs, |
213 | .is_visible = dsu_pmu_event_attr_is_visible, |
214 | }; |
215 | |
216 | static struct attribute *dsu_pmu_cpumask_attrs[] = { |
217 | DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK), |
218 | DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK), |
219 | NULL, |
220 | }; |
221 | |
222 | static const struct attribute_group dsu_pmu_cpumask_attr_group = { |
223 | .attrs = dsu_pmu_cpumask_attrs, |
224 | }; |
225 | |
226 | static const struct attribute_group *dsu_pmu_attr_groups[] = { |
227 | &dsu_pmu_cpumask_attr_group, |
228 | &dsu_pmu_events_attr_group, |
229 | &dsu_pmu_format_attr_group, |
230 | NULL, |
231 | }; |
232 | |
233 | static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu) |
234 | { |
235 | struct cpumask online_supported; |
236 | |
237 | cpumask_and(dstp: &online_supported, |
238 | src1p: &dsu_pmu->associated_cpus, cpu_online_mask); |
239 | return cpumask_any_but(mask: &online_supported, cpu); |
240 | } |
241 | |
242 | static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx) |
243 | { |
244 | return (idx < dsu_pmu->num_counters) || |
245 | (idx == DSU_PMU_IDX_CYCLE_COUNTER); |
246 | } |
247 | |
248 | static inline u64 dsu_pmu_read_counter(struct perf_event *event) |
249 | { |
250 | u64 val; |
251 | unsigned long flags; |
252 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
253 | int idx = event->hw.idx; |
254 | |
255 | if (WARN_ON(!cpumask_test_cpu(smp_processor_id(), |
256 | &dsu_pmu->associated_cpus))) |
257 | return 0; |
258 | |
259 | if (!dsu_pmu_counter_valid(dsu_pmu, idx)) { |
260 | dev_err(event->pmu->dev, |
261 | "Trying reading invalid counter %d\n" , idx); |
262 | return 0; |
263 | } |
264 | |
265 | raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); |
266 | if (idx == DSU_PMU_IDX_CYCLE_COUNTER) |
267 | val = __dsu_pmu_read_pmccntr(); |
268 | else |
269 | val = __dsu_pmu_read_counter(idx); |
270 | raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); |
271 | |
272 | return val; |
273 | } |
274 | |
275 | static void dsu_pmu_write_counter(struct perf_event *event, u64 val) |
276 | { |
277 | unsigned long flags; |
278 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
279 | int idx = event->hw.idx; |
280 | |
281 | if (WARN_ON(!cpumask_test_cpu(smp_processor_id(), |
282 | &dsu_pmu->associated_cpus))) |
283 | return; |
284 | |
285 | if (!dsu_pmu_counter_valid(dsu_pmu, idx)) { |
286 | dev_err(event->pmu->dev, |
287 | "writing to invalid counter %d\n" , idx); |
288 | return; |
289 | } |
290 | |
291 | raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); |
292 | if (idx == DSU_PMU_IDX_CYCLE_COUNTER) |
293 | __dsu_pmu_write_pmccntr(val); |
294 | else |
295 | __dsu_pmu_write_counter(idx, val); |
296 | raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); |
297 | } |
298 | |
299 | static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events, |
300 | struct perf_event *event) |
301 | { |
302 | int idx; |
303 | unsigned long evtype = event->attr.config; |
304 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
305 | unsigned long *used_mask = hw_events->used_mask; |
306 | |
307 | if (evtype == DSU_PMU_EVT_CYCLES) { |
308 | if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, addr: used_mask)) |
309 | return -EAGAIN; |
310 | return DSU_PMU_IDX_CYCLE_COUNTER; |
311 | } |
312 | |
313 | idx = find_first_zero_bit(addr: used_mask, size: dsu_pmu->num_counters); |
314 | if (idx >= dsu_pmu->num_counters) |
315 | return -EAGAIN; |
316 | set_bit(nr: idx, addr: hw_events->used_mask); |
317 | return idx; |
318 | } |
319 | |
320 | static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx) |
321 | { |
322 | __dsu_pmu_counter_interrupt_enable(idx); |
323 | __dsu_pmu_enable_counter(idx); |
324 | } |
325 | |
326 | static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx) |
327 | { |
328 | __dsu_pmu_disable_counter(idx); |
329 | __dsu_pmu_counter_interrupt_disable(idx); |
330 | } |
331 | |
332 | static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu, |
333 | struct perf_event *event) |
334 | { |
335 | int idx = event->hw.idx; |
336 | unsigned long flags; |
337 | |
338 | if (!dsu_pmu_counter_valid(dsu_pmu, idx)) { |
339 | dev_err(event->pmu->dev, |
340 | "Trying to set invalid counter %d\n" , idx); |
341 | return; |
342 | } |
343 | |
344 | raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); |
345 | __dsu_pmu_set_event(idx, event->hw.config_base); |
346 | raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); |
347 | } |
348 | |
349 | static void dsu_pmu_event_update(struct perf_event *event) |
350 | { |
351 | struct hw_perf_event *hwc = &event->hw; |
352 | u64 delta, prev_count, new_count; |
353 | |
354 | do { |
355 | /* We may also be called from the irq handler */ |
356 | prev_count = local64_read(&hwc->prev_count); |
357 | new_count = dsu_pmu_read_counter(event); |
358 | } while (local64_cmpxchg(l: &hwc->prev_count, old: prev_count, new: new_count) != |
359 | prev_count); |
360 | delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx); |
361 | local64_add(delta, &event->count); |
362 | } |
363 | |
364 | static void dsu_pmu_read(struct perf_event *event) |
365 | { |
366 | dsu_pmu_event_update(event); |
367 | } |
368 | |
369 | static inline u32 dsu_pmu_get_reset_overflow(void) |
370 | { |
371 | return __dsu_pmu_get_reset_overflow(); |
372 | } |
373 | |
374 | /** |
375 | * dsu_pmu_set_event_period: Set the period for the counter. |
376 | * |
377 | * All DSU PMU event counters, except the cycle counter are 32bit |
378 | * counters. To handle cases of extreme interrupt latency, we program |
379 | * the counter with half of the max count for the counters. |
380 | */ |
381 | static void dsu_pmu_set_event_period(struct perf_event *event) |
382 | { |
383 | int idx = event->hw.idx; |
384 | u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1; |
385 | |
386 | local64_set(&event->hw.prev_count, val); |
387 | dsu_pmu_write_counter(event, val); |
388 | } |
389 | |
390 | static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev) |
391 | { |
392 | int i; |
393 | bool handled = false; |
394 | struct dsu_pmu *dsu_pmu = dev; |
395 | struct dsu_hw_events *hw_events = &dsu_pmu->hw_events; |
396 | unsigned long overflow; |
397 | |
398 | overflow = dsu_pmu_get_reset_overflow(); |
399 | if (!overflow) |
400 | return IRQ_NONE; |
401 | |
402 | for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) { |
403 | struct perf_event *event = hw_events->events[i]; |
404 | |
405 | if (!event) |
406 | continue; |
407 | dsu_pmu_event_update(event); |
408 | dsu_pmu_set_event_period(event); |
409 | handled = true; |
410 | } |
411 | |
412 | return IRQ_RETVAL(handled); |
413 | } |
414 | |
415 | static void dsu_pmu_start(struct perf_event *event, int pmu_flags) |
416 | { |
417 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
418 | |
419 | /* We always reprogram the counter */ |
420 | if (pmu_flags & PERF_EF_RELOAD) |
421 | WARN_ON(!(event->hw.state & PERF_HES_UPTODATE)); |
422 | dsu_pmu_set_event_period(event); |
423 | if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER) |
424 | dsu_pmu_set_event(dsu_pmu, event); |
425 | event->hw.state = 0; |
426 | dsu_pmu_enable_counter(dsu_pmu, idx: event->hw.idx); |
427 | } |
428 | |
429 | static void dsu_pmu_stop(struct perf_event *event, int pmu_flags) |
430 | { |
431 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
432 | |
433 | if (event->hw.state & PERF_HES_STOPPED) |
434 | return; |
435 | dsu_pmu_disable_counter(dsu_pmu, idx: event->hw.idx); |
436 | dsu_pmu_event_update(event); |
437 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
438 | } |
439 | |
440 | static int dsu_pmu_add(struct perf_event *event, int flags) |
441 | { |
442 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
443 | struct dsu_hw_events *hw_events = &dsu_pmu->hw_events; |
444 | struct hw_perf_event *hwc = &event->hw; |
445 | int idx; |
446 | |
447 | if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), |
448 | &dsu_pmu->associated_cpus))) |
449 | return -ENOENT; |
450 | |
451 | idx = dsu_pmu_get_event_idx(hw_events, event); |
452 | if (idx < 0) |
453 | return idx; |
454 | |
455 | hwc->idx = idx; |
456 | hw_events->events[idx] = event; |
457 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
458 | |
459 | if (flags & PERF_EF_START) |
460 | dsu_pmu_start(event, PERF_EF_RELOAD); |
461 | |
462 | perf_event_update_userpage(event); |
463 | return 0; |
464 | } |
465 | |
466 | static void dsu_pmu_del(struct perf_event *event, int flags) |
467 | { |
468 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
469 | struct dsu_hw_events *hw_events = &dsu_pmu->hw_events; |
470 | struct hw_perf_event *hwc = &event->hw; |
471 | int idx = hwc->idx; |
472 | |
473 | dsu_pmu_stop(event, PERF_EF_UPDATE); |
474 | hw_events->events[idx] = NULL; |
475 | clear_bit(nr: idx, addr: hw_events->used_mask); |
476 | perf_event_update_userpage(event); |
477 | } |
478 | |
479 | static void dsu_pmu_enable(struct pmu *pmu) |
480 | { |
481 | u32 pmcr; |
482 | unsigned long flags; |
483 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); |
484 | |
485 | /* If no counters are added, skip enabling the PMU */ |
486 | if (bitmap_empty(src: dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS)) |
487 | return; |
488 | |
489 | raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); |
490 | pmcr = __dsu_pmu_read_pmcr(); |
491 | pmcr |= CLUSTERPMCR_E; |
492 | __dsu_pmu_write_pmcr(pmcr); |
493 | raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); |
494 | } |
495 | |
496 | static void dsu_pmu_disable(struct pmu *pmu) |
497 | { |
498 | u32 pmcr; |
499 | unsigned long flags; |
500 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); |
501 | |
502 | raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); |
503 | pmcr = __dsu_pmu_read_pmcr(); |
504 | pmcr &= ~CLUSTERPMCR_E; |
505 | __dsu_pmu_write_pmcr(pmcr); |
506 | raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); |
507 | } |
508 | |
509 | static bool dsu_pmu_validate_event(struct pmu *pmu, |
510 | struct dsu_hw_events *hw_events, |
511 | struct perf_event *event) |
512 | { |
513 | if (is_software_event(event)) |
514 | return true; |
515 | /* Reject groups spanning multiple HW PMUs. */ |
516 | if (event->pmu != pmu) |
517 | return false; |
518 | return dsu_pmu_get_event_idx(hw_events, event) >= 0; |
519 | } |
520 | |
521 | /* |
522 | * Make sure the group of events can be scheduled at once |
523 | * on the PMU. |
524 | */ |
525 | static bool dsu_pmu_validate_group(struct perf_event *event) |
526 | { |
527 | struct perf_event *sibling, *leader = event->group_leader; |
528 | struct dsu_hw_events fake_hw; |
529 | |
530 | if (event->group_leader == event) |
531 | return true; |
532 | |
533 | memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask)); |
534 | if (!dsu_pmu_validate_event(pmu: event->pmu, hw_events: &fake_hw, event: leader)) |
535 | return false; |
536 | for_each_sibling_event(sibling, leader) { |
537 | if (!dsu_pmu_validate_event(pmu: event->pmu, hw_events: &fake_hw, event: sibling)) |
538 | return false; |
539 | } |
540 | return dsu_pmu_validate_event(pmu: event->pmu, hw_events: &fake_hw, event); |
541 | } |
542 | |
543 | static int dsu_pmu_event_init(struct perf_event *event) |
544 | { |
545 | struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu: event->pmu); |
546 | |
547 | if (event->attr.type != event->pmu->type) |
548 | return -ENOENT; |
549 | |
550 | /* We don't support sampling */ |
551 | if (is_sampling_event(event)) { |
552 | dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n" ); |
553 | return -EOPNOTSUPP; |
554 | } |
555 | |
556 | /* We cannot support task bound events */ |
557 | if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) { |
558 | dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n" ); |
559 | return -EINVAL; |
560 | } |
561 | |
562 | if (has_branch_stack(event)) { |
563 | dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n" ); |
564 | return -EINVAL; |
565 | } |
566 | |
567 | if (!cpumask_test_cpu(cpu: event->cpu, cpumask: &dsu_pmu->associated_cpus)) { |
568 | dev_dbg(dsu_pmu->pmu.dev, |
569 | "Requested cpu is not associated with the DSU\n" ); |
570 | return -EINVAL; |
571 | } |
572 | /* |
573 | * Choose the current active CPU to read the events. We don't want |
574 | * to migrate the event contexts, irq handling etc to the requested |
575 | * CPU. As long as the requested CPU is within the same DSU, we |
576 | * are fine. |
577 | */ |
578 | event->cpu = cpumask_first(srcp: &dsu_pmu->active_cpu); |
579 | if (event->cpu >= nr_cpu_ids) |
580 | return -EINVAL; |
581 | if (!dsu_pmu_validate_group(event)) |
582 | return -EINVAL; |
583 | |
584 | event->hw.config_base = event->attr.config; |
585 | return 0; |
586 | } |
587 | |
588 | static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev) |
589 | { |
590 | struct dsu_pmu *dsu_pmu; |
591 | |
592 | dsu_pmu = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dsu_pmu), GFP_KERNEL); |
593 | if (!dsu_pmu) |
594 | return ERR_PTR(error: -ENOMEM); |
595 | |
596 | raw_spin_lock_init(&dsu_pmu->pmu_lock); |
597 | /* |
598 | * Initialise the number of counters to -1, until we probe |
599 | * the real number on a connected CPU. |
600 | */ |
601 | dsu_pmu->num_counters = -1; |
602 | return dsu_pmu; |
603 | } |
604 | |
605 | /** |
606 | * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster |
607 | * from device tree. |
608 | */ |
609 | static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask) |
610 | { |
611 | int i = 0, n, cpu; |
612 | struct device_node *cpu_node; |
613 | |
614 | n = of_count_phandle_with_args(np: dev->of_node, list_name: "cpus" , NULL); |
615 | if (n <= 0) |
616 | return -ENODEV; |
617 | for (; i < n; i++) { |
618 | cpu_node = of_parse_phandle(np: dev->of_node, phandle_name: "cpus" , index: i); |
619 | if (!cpu_node) |
620 | break; |
621 | cpu = of_cpu_node_to_id(np: cpu_node); |
622 | of_node_put(node: cpu_node); |
623 | /* |
624 | * We have to ignore the failures here and continue scanning |
625 | * the list to handle cases where the nr_cpus could be capped |
626 | * in the running kernel. |
627 | */ |
628 | if (cpu < 0) |
629 | continue; |
630 | cpumask_set_cpu(cpu, dstp: mask); |
631 | } |
632 | return 0; |
633 | } |
634 | |
635 | /** |
636 | * dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster |
637 | * from ACPI. |
638 | */ |
639 | static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask) |
640 | { |
641 | #ifdef CONFIG_ACPI |
642 | struct acpi_device *parent_adev = acpi_dev_parent(ACPI_COMPANION(dev)); |
643 | int cpu; |
644 | |
645 | /* |
646 | * A dsu pmu node is inside a cluster parent node along with cpu nodes. |
647 | * We need to find out all cpus that have the same parent with this pmu. |
648 | */ |
649 | for_each_possible_cpu(cpu) { |
650 | struct acpi_device *acpi_dev; |
651 | struct device *cpu_dev = get_cpu_device(cpu); |
652 | |
653 | if (!cpu_dev) |
654 | continue; |
655 | |
656 | acpi_dev = ACPI_COMPANION(cpu_dev); |
657 | if (acpi_dev && acpi_dev_parent(adev: acpi_dev) == parent_adev) |
658 | cpumask_set_cpu(cpu, dstp: mask); |
659 | } |
660 | #endif |
661 | |
662 | return 0; |
663 | } |
664 | |
665 | /* |
666 | * dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster. |
667 | */ |
668 | static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu) |
669 | { |
670 | u64 num_counters; |
671 | u32 cpmceid[2]; |
672 | |
673 | num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) & |
674 | CLUSTERPMCR_N_MASK; |
675 | /* We can only support up to 31 independent counters */ |
676 | if (WARN_ON(num_counters > 31)) |
677 | num_counters = 31; |
678 | dsu_pmu->num_counters = num_counters; |
679 | if (!dsu_pmu->num_counters) |
680 | return; |
681 | cpmceid[0] = __dsu_pmu_read_pmceid(0); |
682 | cpmceid[1] = __dsu_pmu_read_pmceid(1); |
683 | bitmap_from_arr32(bitmap: dsu_pmu->cpmceid_bitmap, buf: cpmceid, |
684 | DSU_PMU_MAX_COMMON_EVENTS); |
685 | } |
686 | |
687 | static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu) |
688 | { |
689 | cpumask_set_cpu(cpu, dstp: &dsu_pmu->active_cpu); |
690 | if (irq_set_affinity(irq: dsu_pmu->irq, cpumask: &dsu_pmu->active_cpu)) |
691 | pr_warn("Failed to set irq affinity to %d\n" , cpu); |
692 | } |
693 | |
694 | /* |
695 | * dsu_pmu_init_pmu: Initialise the DSU PMU configurations if |
696 | * we haven't done it already. |
697 | */ |
698 | static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu) |
699 | { |
700 | if (dsu_pmu->num_counters == -1) |
701 | dsu_pmu_probe_pmu(dsu_pmu); |
702 | /* Reset the interrupt overflow mask */ |
703 | dsu_pmu_get_reset_overflow(); |
704 | } |
705 | |
706 | static int dsu_pmu_device_probe(struct platform_device *pdev) |
707 | { |
708 | int irq, rc; |
709 | struct dsu_pmu *dsu_pmu; |
710 | struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); |
711 | char *name; |
712 | static atomic_t pmu_idx = ATOMIC_INIT(-1); |
713 | |
714 | dsu_pmu = dsu_pmu_alloc(pdev); |
715 | if (IS_ERR(ptr: dsu_pmu)) |
716 | return PTR_ERR(ptr: dsu_pmu); |
717 | |
718 | if (is_of_node(fwnode)) |
719 | rc = dsu_pmu_dt_get_cpus(dev: &pdev->dev, mask: &dsu_pmu->associated_cpus); |
720 | else if (is_acpi_device_node(fwnode)) |
721 | rc = dsu_pmu_acpi_get_cpus(dev: &pdev->dev, mask: &dsu_pmu->associated_cpus); |
722 | else |
723 | return -ENOENT; |
724 | |
725 | if (rc) { |
726 | dev_warn(&pdev->dev, "Failed to parse the CPUs\n" ); |
727 | return rc; |
728 | } |
729 | |
730 | irq = platform_get_irq(pdev, 0); |
731 | if (irq < 0) |
732 | return -EINVAL; |
733 | |
734 | name = devm_kasprintf(dev: &pdev->dev, GFP_KERNEL, fmt: "%s_%d" , |
735 | PMUNAME, atomic_inc_return(v: &pmu_idx)); |
736 | if (!name) |
737 | return -ENOMEM; |
738 | rc = devm_request_irq(dev: &pdev->dev, irq, handler: dsu_pmu_handle_irq, |
739 | IRQF_NOBALANCING, devname: name, dev_id: dsu_pmu); |
740 | if (rc) { |
741 | dev_warn(&pdev->dev, "Failed to request IRQ %d\n" , irq); |
742 | return rc; |
743 | } |
744 | |
745 | dsu_pmu->irq = irq; |
746 | platform_set_drvdata(pdev, data: dsu_pmu); |
747 | rc = cpuhp_state_add_instance(state: dsu_pmu_cpuhp_state, |
748 | node: &dsu_pmu->cpuhp_node); |
749 | if (rc) |
750 | return rc; |
751 | |
752 | dsu_pmu->pmu = (struct pmu) { |
753 | .task_ctx_nr = perf_invalid_context, |
754 | .module = THIS_MODULE, |
755 | .pmu_enable = dsu_pmu_enable, |
756 | .pmu_disable = dsu_pmu_disable, |
757 | .event_init = dsu_pmu_event_init, |
758 | .add = dsu_pmu_add, |
759 | .del = dsu_pmu_del, |
760 | .start = dsu_pmu_start, |
761 | .stop = dsu_pmu_stop, |
762 | .read = dsu_pmu_read, |
763 | |
764 | .attr_groups = dsu_pmu_attr_groups, |
765 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
766 | }; |
767 | |
768 | rc = perf_pmu_register(pmu: &dsu_pmu->pmu, name, type: -1); |
769 | if (rc) { |
770 | cpuhp_state_remove_instance(state: dsu_pmu_cpuhp_state, |
771 | node: &dsu_pmu->cpuhp_node); |
772 | } |
773 | |
774 | return rc; |
775 | } |
776 | |
777 | static int dsu_pmu_device_remove(struct platform_device *pdev) |
778 | { |
779 | struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev); |
780 | |
781 | perf_pmu_unregister(pmu: &dsu_pmu->pmu); |
782 | cpuhp_state_remove_instance(state: dsu_pmu_cpuhp_state, node: &dsu_pmu->cpuhp_node); |
783 | |
784 | return 0; |
785 | } |
786 | |
787 | static const struct of_device_id dsu_pmu_of_match[] = { |
788 | { .compatible = "arm,dsu-pmu" , }, |
789 | {}, |
790 | }; |
791 | MODULE_DEVICE_TABLE(of, dsu_pmu_of_match); |
792 | |
793 | #ifdef CONFIG_ACPI |
794 | static const struct acpi_device_id dsu_pmu_acpi_match[] = { |
795 | { "ARMHD500" , 0}, |
796 | {}, |
797 | }; |
798 | MODULE_DEVICE_TABLE(acpi, dsu_pmu_acpi_match); |
799 | #endif |
800 | |
801 | static struct platform_driver dsu_pmu_driver = { |
802 | .driver = { |
803 | .name = DRVNAME, |
804 | .of_match_table = of_match_ptr(dsu_pmu_of_match), |
805 | .acpi_match_table = ACPI_PTR(dsu_pmu_acpi_match), |
806 | .suppress_bind_attrs = true, |
807 | }, |
808 | .probe = dsu_pmu_device_probe, |
809 | .remove = dsu_pmu_device_remove, |
810 | }; |
811 | |
812 | static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) |
813 | { |
814 | struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, |
815 | cpuhp_node); |
816 | |
817 | if (!cpumask_test_cpu(cpu, cpumask: &dsu_pmu->associated_cpus)) |
818 | return 0; |
819 | |
820 | /* If the PMU is already managed, there is nothing to do */ |
821 | if (!cpumask_empty(srcp: &dsu_pmu->active_cpu)) |
822 | return 0; |
823 | |
824 | dsu_pmu_init_pmu(dsu_pmu); |
825 | dsu_pmu_set_active_cpu(cpu, dsu_pmu); |
826 | |
827 | return 0; |
828 | } |
829 | |
830 | static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) |
831 | { |
832 | int dst; |
833 | struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, |
834 | cpuhp_node); |
835 | |
836 | if (!cpumask_test_and_clear_cpu(cpu, cpumask: &dsu_pmu->active_cpu)) |
837 | return 0; |
838 | |
839 | dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu); |
840 | /* If there are no active CPUs in the DSU, leave IRQ disabled */ |
841 | if (dst >= nr_cpu_ids) |
842 | return 0; |
843 | |
844 | perf_pmu_migrate_context(pmu: &dsu_pmu->pmu, src_cpu: cpu, dst_cpu: dst); |
845 | dsu_pmu_set_active_cpu(cpu: dst, dsu_pmu); |
846 | |
847 | return 0; |
848 | } |
849 | |
850 | static int __init dsu_pmu_init(void) |
851 | { |
852 | int ret; |
853 | |
854 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, |
855 | DRVNAME, |
856 | startup: dsu_pmu_cpu_online, |
857 | teardown: dsu_pmu_cpu_teardown); |
858 | if (ret < 0) |
859 | return ret; |
860 | dsu_pmu_cpuhp_state = ret; |
861 | ret = platform_driver_register(&dsu_pmu_driver); |
862 | if (ret) |
863 | cpuhp_remove_multi_state(state: dsu_pmu_cpuhp_state); |
864 | |
865 | return ret; |
866 | } |
867 | |
868 | static void __exit dsu_pmu_exit(void) |
869 | { |
870 | platform_driver_unregister(&dsu_pmu_driver); |
871 | cpuhp_remove_multi_state(state: dsu_pmu_cpuhp_state); |
872 | } |
873 | |
874 | module_init(dsu_pmu_init); |
875 | module_exit(dsu_pmu_exit); |
876 | |
877 | MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit" ); |
878 | MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>" ); |
879 | MODULE_LICENSE("GPL v2" ); |
880 | |