1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Hardware performance events for the Alpha. |
4 | * |
5 | * We implement HW counts on the EV67 and subsequent CPUs only. |
6 | * |
7 | * (C) 2010 Michael J. Cree |
8 | * |
9 | * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and |
10 | * ARM code, which are copyright by their respective authors. |
11 | */ |
12 | |
13 | #include <linux/perf_event.h> |
14 | #include <linux/kprobes.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/kdebug.h> |
17 | #include <linux/mutex.h> |
18 | #include <linux/init.h> |
19 | |
20 | #include <asm/hwrpb.h> |
21 | #include <linux/atomic.h> |
22 | #include <asm/irq.h> |
23 | #include <asm/irq_regs.h> |
24 | #include <asm/pal.h> |
25 | #include <asm/wrperfmon.h> |
26 | #include <asm/hw_irq.h> |
27 | |
28 | |
29 | /* The maximum number of PMCs on any Alpha CPU whatsoever. */ |
30 | #define MAX_HWEVENTS 3 |
31 | #define PMC_NO_INDEX -1 |
32 | |
33 | /* For tracking PMCs and the hw events they monitor on each CPU. */ |
34 | struct cpu_hw_events { |
35 | int enabled; |
36 | /* Number of events scheduled; also number entries valid in arrays below. */ |
37 | int n_events; |
38 | /* Number events added since last hw_perf_disable(). */ |
39 | int n_added; |
40 | /* Events currently scheduled. */ |
41 | struct perf_event *event[MAX_HWEVENTS]; |
42 | /* Event type of each scheduled event. */ |
43 | unsigned long evtype[MAX_HWEVENTS]; |
44 | /* Current index of each scheduled event; if not yet determined |
45 | * contains PMC_NO_INDEX. |
46 | */ |
47 | int current_idx[MAX_HWEVENTS]; |
48 | /* The active PMCs' config for easy use with wrperfmon(). */ |
49 | unsigned long config; |
50 | /* The active counters' indices for easy use with wrperfmon(). */ |
51 | unsigned long idx_mask; |
52 | }; |
53 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
54 | |
55 | |
56 | |
57 | /* |
58 | * A structure to hold the description of the PMCs available on a particular |
59 | * type of Alpha CPU. |
60 | */ |
61 | struct alpha_pmu_t { |
62 | /* Mapping of the perf system hw event types to indigenous event types */ |
63 | const int *event_map; |
64 | /* The number of entries in the event_map */ |
65 | int max_events; |
66 | /* The number of PMCs on this Alpha */ |
67 | int num_pmcs; |
68 | /* |
69 | * All PMC counters reside in the IBOX register PCTR. This is the |
70 | * LSB of the counter. |
71 | */ |
72 | int pmc_count_shift[MAX_HWEVENTS]; |
73 | /* |
74 | * The mask that isolates the PMC bits when the LSB of the counter |
75 | * is shifted to bit 0. |
76 | */ |
77 | unsigned long pmc_count_mask[MAX_HWEVENTS]; |
78 | /* The maximum period the PMC can count. */ |
79 | unsigned long pmc_max_period[MAX_HWEVENTS]; |
80 | /* |
81 | * The maximum value that may be written to the counter due to |
82 | * hardware restrictions is pmc_max_period - pmc_left. |
83 | */ |
84 | long pmc_left[3]; |
85 | /* Subroutine for allocation of PMCs. Enforces constraints. */ |
86 | int (*check_constraints)(struct perf_event **, unsigned long *, int); |
87 | /* Subroutine for checking validity of a raw event for this PMU. */ |
88 | int (*raw_event_valid)(u64 config); |
89 | }; |
90 | |
91 | /* |
92 | * The Alpha CPU PMU description currently in operation. This is set during |
93 | * the boot process to the specific CPU of the machine. |
94 | */ |
95 | static const struct alpha_pmu_t *alpha_pmu; |
96 | |
97 | |
98 | #define HW_OP_UNSUPPORTED -1 |
99 | |
100 | /* |
101 | * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs |
102 | * follow. Since they are identical we refer to them collectively as the |
103 | * EV67 henceforth. |
104 | */ |
105 | |
106 | /* |
107 | * EV67 PMC event types |
108 | * |
109 | * There is no one-to-one mapping of the possible hw event types to the |
110 | * actual codes that are used to program the PMCs hence we introduce our |
111 | * own hw event type identifiers. |
112 | */ |
113 | enum ev67_pmc_event_type { |
114 | EV67_CYCLES = 1, |
115 | EV67_INSTRUCTIONS, |
116 | EV67_BCACHEMISS, |
117 | EV67_MBOXREPLAY, |
118 | EV67_LAST_ET |
119 | }; |
120 | #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) |
121 | |
122 | |
123 | /* Mapping of the hw event types to the perf tool interface */ |
124 | static const int ev67_perfmon_event_map[] = { |
125 | [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, |
126 | [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, |
127 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, |
128 | [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, |
129 | }; |
130 | |
131 | struct ev67_mapping_t { |
132 | int config; |
133 | int idx; |
134 | }; |
135 | |
136 | /* |
137 | * The mapping used for one event only - these must be in same order as enum |
138 | * ev67_pmc_event_type definition. |
139 | */ |
140 | static const struct ev67_mapping_t ev67_mapping[] = { |
141 | {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ |
142 | {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ |
143 | {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ |
144 | {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ |
145 | }; |
146 | |
147 | |
148 | /* |
149 | * Check that a group of events can be simultaneously scheduled on to the |
150 | * EV67 PMU. Also allocate counter indices and config. |
151 | */ |
152 | static int ev67_check_constraints(struct perf_event **event, |
153 | unsigned long *evtype, int n_ev) |
154 | { |
155 | int idx0; |
156 | unsigned long config; |
157 | |
158 | idx0 = ev67_mapping[evtype[0]-1].idx; |
159 | config = ev67_mapping[evtype[0]-1].config; |
160 | if (n_ev == 1) |
161 | goto success; |
162 | |
163 | BUG_ON(n_ev != 2); |
164 | |
165 | if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { |
166 | /* MBOX replay traps must be on PMC 1 */ |
167 | idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; |
168 | /* Only cycles can accompany MBOX replay traps */ |
169 | if (evtype[idx0] == EV67_CYCLES) { |
170 | config = EV67_PCTR_CYCLES_MBOX; |
171 | goto success; |
172 | } |
173 | } |
174 | |
175 | if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { |
176 | /* Bcache misses must be on PMC 1 */ |
177 | idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; |
178 | /* Only instructions can accompany Bcache misses */ |
179 | if (evtype[idx0] == EV67_INSTRUCTIONS) { |
180 | config = EV67_PCTR_INSTR_BCACHEMISS; |
181 | goto success; |
182 | } |
183 | } |
184 | |
185 | if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { |
186 | /* Instructions must be on PMC 0 */ |
187 | idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; |
188 | /* By this point only cycles can accompany instructions */ |
189 | if (evtype[idx0^1] == EV67_CYCLES) { |
190 | config = EV67_PCTR_INSTR_CYCLES; |
191 | goto success; |
192 | } |
193 | } |
194 | |
195 | /* Otherwise, darn it, there is a conflict. */ |
196 | return -1; |
197 | |
198 | success: |
199 | event[0]->hw.idx = idx0; |
200 | event[0]->hw.config_base = config; |
201 | if (n_ev == 2) { |
202 | event[1]->hw.idx = idx0 ^ 1; |
203 | event[1]->hw.config_base = config; |
204 | } |
205 | return 0; |
206 | } |
207 | |
208 | |
209 | static int ev67_raw_event_valid(u64 config) |
210 | { |
211 | return config >= EV67_CYCLES && config < EV67_LAST_ET; |
212 | }; |
213 | |
214 | |
215 | static const struct alpha_pmu_t ev67_pmu = { |
216 | .event_map = ev67_perfmon_event_map, |
217 | .max_events = ARRAY_SIZE(ev67_perfmon_event_map), |
218 | .num_pmcs = 2, |
219 | .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, |
220 | .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, |
221 | .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, |
222 | .pmc_left = {16, 4, 0}, |
223 | .check_constraints = ev67_check_constraints, |
224 | .raw_event_valid = ev67_raw_event_valid, |
225 | }; |
226 | |
227 | |
228 | |
229 | /* |
230 | * Helper routines to ensure that we read/write only the correct PMC bits |
231 | * when calling the wrperfmon PALcall. |
232 | */ |
233 | static inline void alpha_write_pmc(int idx, unsigned long val) |
234 | { |
235 | val &= alpha_pmu->pmc_count_mask[idx]; |
236 | val <<= alpha_pmu->pmc_count_shift[idx]; |
237 | val |= (1<<idx); |
238 | wrperfmon(PERFMON_CMD_WRITE, val); |
239 | } |
240 | |
241 | static inline unsigned long alpha_read_pmc(int idx) |
242 | { |
243 | unsigned long val; |
244 | |
245 | val = wrperfmon(PERFMON_CMD_READ, 0); |
246 | val >>= alpha_pmu->pmc_count_shift[idx]; |
247 | val &= alpha_pmu->pmc_count_mask[idx]; |
248 | return val; |
249 | } |
250 | |
251 | /* Set a new period to sample over */ |
252 | static int alpha_perf_event_set_period(struct perf_event *event, |
253 | struct hw_perf_event *hwc, int idx) |
254 | { |
255 | long left = local64_read(&hwc->period_left); |
256 | long period = hwc->sample_period; |
257 | int ret = 0; |
258 | |
259 | if (unlikely(left <= -period)) { |
260 | left = period; |
261 | local64_set(&hwc->period_left, left); |
262 | hwc->last_period = period; |
263 | ret = 1; |
264 | } |
265 | |
266 | if (unlikely(left <= 0)) { |
267 | left += period; |
268 | local64_set(&hwc->period_left, left); |
269 | hwc->last_period = period; |
270 | ret = 1; |
271 | } |
272 | |
273 | /* |
274 | * Hardware restrictions require that the counters must not be |
275 | * written with values that are too close to the maximum period. |
276 | */ |
277 | if (unlikely(left < alpha_pmu->pmc_left[idx])) |
278 | left = alpha_pmu->pmc_left[idx]; |
279 | |
280 | if (left > (long)alpha_pmu->pmc_max_period[idx]) |
281 | left = alpha_pmu->pmc_max_period[idx]; |
282 | |
283 | local64_set(&hwc->prev_count, (unsigned long)(-left)); |
284 | |
285 | alpha_write_pmc(idx, val: (unsigned long)(-left)); |
286 | |
287 | perf_event_update_userpage(event); |
288 | |
289 | return ret; |
290 | } |
291 | |
292 | |
293 | /* |
294 | * Calculates the count (the 'delta') since the last time the PMC was read. |
295 | * |
296 | * As the PMCs' full period can easily be exceeded within the perf system |
297 | * sampling period we cannot use any high order bits as a guard bit in the |
298 | * PMCs to detect overflow as is done by other architectures. The code here |
299 | * calculates the delta on the basis that there is no overflow when ovf is |
300 | * zero. The value passed via ovf by the interrupt handler corrects for |
301 | * overflow. |
302 | * |
303 | * This can be racey on rare occasions -- a call to this routine can occur |
304 | * with an overflowed counter just before the PMI service routine is called. |
305 | * The check for delta negative hopefully always rectifies this situation. |
306 | */ |
307 | static unsigned long alpha_perf_event_update(struct perf_event *event, |
308 | struct hw_perf_event *hwc, int idx, long ovf) |
309 | { |
310 | long prev_raw_count, new_raw_count; |
311 | long delta; |
312 | |
313 | again: |
314 | prev_raw_count = local64_read(&hwc->prev_count); |
315 | new_raw_count = alpha_read_pmc(idx); |
316 | |
317 | if (local64_cmpxchg(l: &hwc->prev_count, old: prev_raw_count, |
318 | new: new_raw_count) != prev_raw_count) |
319 | goto again; |
320 | |
321 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; |
322 | |
323 | /* It is possible on very rare occasions that the PMC has overflowed |
324 | * but the interrupt is yet to come. Detect and fix this situation. |
325 | */ |
326 | if (unlikely(delta < 0)) { |
327 | delta += alpha_pmu->pmc_max_period[idx] + 1; |
328 | } |
329 | |
330 | local64_add(delta, &event->count); |
331 | local64_sub(delta, &hwc->period_left); |
332 | |
333 | return new_raw_count; |
334 | } |
335 | |
336 | |
337 | /* |
338 | * Collect all HW events into the array event[]. |
339 | */ |
340 | static int collect_events(struct perf_event *group, int max_count, |
341 | struct perf_event *event[], unsigned long *evtype, |
342 | int *current_idx) |
343 | { |
344 | struct perf_event *pe; |
345 | int n = 0; |
346 | |
347 | if (!is_software_event(event: group)) { |
348 | if (n >= max_count) |
349 | return -1; |
350 | event[n] = group; |
351 | evtype[n] = group->hw.event_base; |
352 | current_idx[n++] = PMC_NO_INDEX; |
353 | } |
354 | for_each_sibling_event(pe, group) { |
355 | if (!is_software_event(event: pe) && pe->state != PERF_EVENT_STATE_OFF) { |
356 | if (n >= max_count) |
357 | return -1; |
358 | event[n] = pe; |
359 | evtype[n] = pe->hw.event_base; |
360 | current_idx[n++] = PMC_NO_INDEX; |
361 | } |
362 | } |
363 | return n; |
364 | } |
365 | |
366 | |
367 | |
368 | /* |
369 | * Check that a group of events can be simultaneously scheduled on to the PMU. |
370 | */ |
371 | static int alpha_check_constraints(struct perf_event **events, |
372 | unsigned long *evtypes, int n_ev) |
373 | { |
374 | |
375 | /* No HW events is possible from hw_perf_group_sched_in(). */ |
376 | if (n_ev == 0) |
377 | return 0; |
378 | |
379 | if (n_ev > alpha_pmu->num_pmcs) |
380 | return -1; |
381 | |
382 | return alpha_pmu->check_constraints(events, evtypes, n_ev); |
383 | } |
384 | |
385 | |
386 | /* |
387 | * If new events have been scheduled then update cpuc with the new |
388 | * configuration. This may involve shifting cycle counts from one PMC to |
389 | * another. |
390 | */ |
391 | static void maybe_change_configuration(struct cpu_hw_events *cpuc) |
392 | { |
393 | int j; |
394 | |
395 | if (cpuc->n_added == 0) |
396 | return; |
397 | |
398 | /* Find counters that are moving to another PMC and update */ |
399 | for (j = 0; j < cpuc->n_events; j++) { |
400 | struct perf_event *pe = cpuc->event[j]; |
401 | |
402 | if (cpuc->current_idx[j] != PMC_NO_INDEX && |
403 | cpuc->current_idx[j] != pe->hw.idx) { |
404 | alpha_perf_event_update(event: pe, hwc: &pe->hw, idx: cpuc->current_idx[j], ovf: 0); |
405 | cpuc->current_idx[j] = PMC_NO_INDEX; |
406 | } |
407 | } |
408 | |
409 | /* Assign to counters all unassigned events. */ |
410 | cpuc->idx_mask = 0; |
411 | for (j = 0; j < cpuc->n_events; j++) { |
412 | struct perf_event *pe = cpuc->event[j]; |
413 | struct hw_perf_event *hwc = &pe->hw; |
414 | int idx = hwc->idx; |
415 | |
416 | if (cpuc->current_idx[j] == PMC_NO_INDEX) { |
417 | alpha_perf_event_set_period(event: pe, hwc, idx); |
418 | cpuc->current_idx[j] = idx; |
419 | } |
420 | |
421 | if (!(hwc->state & PERF_HES_STOPPED)) |
422 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); |
423 | } |
424 | cpuc->config = cpuc->event[0]->hw.config_base; |
425 | } |
426 | |
427 | |
428 | |
429 | /* Schedule perf HW event on to PMU. |
430 | * - this function is called from outside this module via the pmu struct |
431 | * returned from perf event initialisation. |
432 | */ |
433 | static int alpha_pmu_add(struct perf_event *event, int flags) |
434 | { |
435 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
436 | struct hw_perf_event *hwc = &event->hw; |
437 | int n0; |
438 | int ret; |
439 | unsigned long irq_flags; |
440 | |
441 | /* |
442 | * The Sparc code has the IRQ disable first followed by the perf |
443 | * disable, however this can lead to an overflowed counter with the |
444 | * PMI disabled on rare occasions. The alpha_perf_event_update() |
445 | * routine should detect this situation by noting a negative delta, |
446 | * nevertheless we disable the PMCs first to enable a potential |
447 | * final PMI to occur before we disable interrupts. |
448 | */ |
449 | perf_pmu_disable(pmu: event->pmu); |
450 | local_irq_save(irq_flags); |
451 | |
452 | /* Default to error to be returned */ |
453 | ret = -EAGAIN; |
454 | |
455 | /* Insert event on to PMU and if successful modify ret to valid return */ |
456 | n0 = cpuc->n_events; |
457 | if (n0 < alpha_pmu->num_pmcs) { |
458 | cpuc->event[n0] = event; |
459 | cpuc->evtype[n0] = event->hw.event_base; |
460 | cpuc->current_idx[n0] = PMC_NO_INDEX; |
461 | |
462 | if (!alpha_check_constraints(events: cpuc->event, evtypes: cpuc->evtype, n_ev: n0+1)) { |
463 | cpuc->n_events++; |
464 | cpuc->n_added++; |
465 | ret = 0; |
466 | } |
467 | } |
468 | |
469 | hwc->state = PERF_HES_UPTODATE; |
470 | if (!(flags & PERF_EF_START)) |
471 | hwc->state |= PERF_HES_STOPPED; |
472 | |
473 | local_irq_restore(irq_flags); |
474 | perf_pmu_enable(pmu: event->pmu); |
475 | |
476 | return ret; |
477 | } |
478 | |
479 | |
480 | |
481 | /* Disable performance monitoring unit |
482 | * - this function is called from outside this module via the pmu struct |
483 | * returned from perf event initialisation. |
484 | */ |
485 | static void alpha_pmu_del(struct perf_event *event, int flags) |
486 | { |
487 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
488 | struct hw_perf_event *hwc = &event->hw; |
489 | unsigned long irq_flags; |
490 | int j; |
491 | |
492 | perf_pmu_disable(pmu: event->pmu); |
493 | local_irq_save(irq_flags); |
494 | |
495 | for (j = 0; j < cpuc->n_events; j++) { |
496 | if (event == cpuc->event[j]) { |
497 | int idx = cpuc->current_idx[j]; |
498 | |
499 | /* Shift remaining entries down into the existing |
500 | * slot. |
501 | */ |
502 | while (++j < cpuc->n_events) { |
503 | cpuc->event[j - 1] = cpuc->event[j]; |
504 | cpuc->evtype[j - 1] = cpuc->evtype[j]; |
505 | cpuc->current_idx[j - 1] = |
506 | cpuc->current_idx[j]; |
507 | } |
508 | |
509 | /* Absorb the final count and turn off the event. */ |
510 | alpha_perf_event_update(event, hwc, idx, ovf: 0); |
511 | perf_event_update_userpage(event); |
512 | |
513 | cpuc->idx_mask &= ~(1UL<<idx); |
514 | cpuc->n_events--; |
515 | break; |
516 | } |
517 | } |
518 | |
519 | local_irq_restore(irq_flags); |
520 | perf_pmu_enable(pmu: event->pmu); |
521 | } |
522 | |
523 | |
524 | static void alpha_pmu_read(struct perf_event *event) |
525 | { |
526 | struct hw_perf_event *hwc = &event->hw; |
527 | |
528 | alpha_perf_event_update(event, hwc, idx: hwc->idx, ovf: 0); |
529 | } |
530 | |
531 | |
532 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
533 | { |
534 | struct hw_perf_event *hwc = &event->hw; |
535 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
536 | |
537 | if (!(hwc->state & PERF_HES_STOPPED)) { |
538 | cpuc->idx_mask &= ~(1UL<<hwc->idx); |
539 | hwc->state |= PERF_HES_STOPPED; |
540 | } |
541 | |
542 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { |
543 | alpha_perf_event_update(event, hwc, idx: hwc->idx, ovf: 0); |
544 | hwc->state |= PERF_HES_UPTODATE; |
545 | } |
546 | |
547 | if (cpuc->enabled) |
548 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); |
549 | } |
550 | |
551 | |
552 | static void alpha_pmu_start(struct perf_event *event, int flags) |
553 | { |
554 | struct hw_perf_event *hwc = &event->hw; |
555 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
556 | |
557 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) |
558 | return; |
559 | |
560 | if (flags & PERF_EF_RELOAD) { |
561 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
562 | alpha_perf_event_set_period(event, hwc, idx: hwc->idx); |
563 | } |
564 | |
565 | hwc->state = 0; |
566 | |
567 | cpuc->idx_mask |= 1UL<<hwc->idx; |
568 | if (cpuc->enabled) |
569 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); |
570 | } |
571 | |
572 | |
573 | /* |
574 | * Check that CPU performance counters are supported. |
575 | * - currently support EV67 and later CPUs. |
576 | * - actually some later revisions of the EV6 have the same PMC model as the |
577 | * EV67 but we don't do sufficiently deep CPU detection to detect them. |
578 | * Bad luck to the very few people who might have one, I guess. |
579 | */ |
580 | static int supported_cpu(void) |
581 | { |
582 | struct percpu_struct *cpu; |
583 | unsigned long cputype; |
584 | |
585 | /* Get cpu type from HW */ |
586 | cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); |
587 | cputype = cpu->type & 0xffffffff; |
588 | /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ |
589 | return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); |
590 | } |
591 | |
592 | |
593 | |
594 | static void hw_perf_event_destroy(struct perf_event *event) |
595 | { |
596 | /* Nothing to be done! */ |
597 | return; |
598 | } |
599 | |
600 | |
601 | |
602 | static int __hw_perf_event_init(struct perf_event *event) |
603 | { |
604 | struct perf_event_attr *attr = &event->attr; |
605 | struct hw_perf_event *hwc = &event->hw; |
606 | struct perf_event *evts[MAX_HWEVENTS]; |
607 | unsigned long evtypes[MAX_HWEVENTS]; |
608 | int idx_rubbish_bin[MAX_HWEVENTS]; |
609 | int ev; |
610 | int n; |
611 | |
612 | /* We only support a limited range of HARDWARE event types with one |
613 | * only programmable via a RAW event type. |
614 | */ |
615 | if (attr->type == PERF_TYPE_HARDWARE) { |
616 | if (attr->config >= alpha_pmu->max_events) |
617 | return -EINVAL; |
618 | ev = alpha_pmu->event_map[attr->config]; |
619 | } else if (attr->type == PERF_TYPE_HW_CACHE) { |
620 | return -EOPNOTSUPP; |
621 | } else if (attr->type == PERF_TYPE_RAW) { |
622 | if (!alpha_pmu->raw_event_valid(attr->config)) |
623 | return -EINVAL; |
624 | ev = attr->config; |
625 | } else { |
626 | return -EOPNOTSUPP; |
627 | } |
628 | |
629 | if (ev < 0) { |
630 | return ev; |
631 | } |
632 | |
633 | /* |
634 | * We place the event type in event_base here and leave calculation |
635 | * of the codes to programme the PMU for alpha_pmu_enable() because |
636 | * it is only then we will know what HW events are actually |
637 | * scheduled on to the PMU. At that point the code to programme the |
638 | * PMU is put into config_base and the PMC to use is placed into |
639 | * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that |
640 | * it is yet to be determined. |
641 | */ |
642 | hwc->event_base = ev; |
643 | |
644 | /* Collect events in a group together suitable for calling |
645 | * alpha_check_constraints() to verify that the group as a whole can |
646 | * be scheduled on to the PMU. |
647 | */ |
648 | n = 0; |
649 | if (event->group_leader != event) { |
650 | n = collect_events(group: event->group_leader, |
651 | max_count: alpha_pmu->num_pmcs - 1, |
652 | event: evts, evtype: evtypes, current_idx: idx_rubbish_bin); |
653 | if (n < 0) |
654 | return -EINVAL; |
655 | } |
656 | evtypes[n] = hwc->event_base; |
657 | evts[n] = event; |
658 | |
659 | if (alpha_check_constraints(events: evts, evtypes, n_ev: n + 1)) |
660 | return -EINVAL; |
661 | |
662 | /* Indicate that PMU config and idx are yet to be determined. */ |
663 | hwc->config_base = 0; |
664 | hwc->idx = PMC_NO_INDEX; |
665 | |
666 | event->destroy = hw_perf_event_destroy; |
667 | |
668 | /* |
669 | * Most architectures reserve the PMU for their use at this point. |
670 | * As there is no existing mechanism to arbitrate usage and there |
671 | * appears to be no other user of the Alpha PMU we just assume |
672 | * that we can just use it, hence a NO-OP here. |
673 | * |
674 | * Maybe an alpha_reserve_pmu() routine should be implemented but is |
675 | * anything else ever going to use it? |
676 | */ |
677 | |
678 | if (!hwc->sample_period) { |
679 | hwc->sample_period = alpha_pmu->pmc_max_period[0]; |
680 | hwc->last_period = hwc->sample_period; |
681 | local64_set(&hwc->period_left, hwc->sample_period); |
682 | } |
683 | |
684 | return 0; |
685 | } |
686 | |
687 | /* |
688 | * Main entry point to initialise a HW performance event. |
689 | */ |
690 | static int alpha_pmu_event_init(struct perf_event *event) |
691 | { |
692 | /* does not support taken branch sampling */ |
693 | if (has_branch_stack(event)) |
694 | return -EOPNOTSUPP; |
695 | |
696 | switch (event->attr.type) { |
697 | case PERF_TYPE_RAW: |
698 | case PERF_TYPE_HARDWARE: |
699 | case PERF_TYPE_HW_CACHE: |
700 | break; |
701 | |
702 | default: |
703 | return -ENOENT; |
704 | } |
705 | |
706 | if (!alpha_pmu) |
707 | return -ENODEV; |
708 | |
709 | /* Do the real initialisation work. */ |
710 | return __hw_perf_event_init(event); |
711 | } |
712 | |
713 | /* |
714 | * Main entry point - enable HW performance counters. |
715 | */ |
716 | static void alpha_pmu_enable(struct pmu *pmu) |
717 | { |
718 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
719 | |
720 | if (cpuc->enabled) |
721 | return; |
722 | |
723 | cpuc->enabled = 1; |
724 | barrier(); |
725 | |
726 | if (cpuc->n_events > 0) { |
727 | /* Update cpuc with information from any new scheduled events. */ |
728 | maybe_change_configuration(cpuc); |
729 | |
730 | /* Start counting the desired events. */ |
731 | wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); |
732 | wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); |
733 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
734 | } |
735 | } |
736 | |
737 | |
738 | /* |
739 | * Main entry point - disable HW performance counters. |
740 | */ |
741 | |
742 | static void alpha_pmu_disable(struct pmu *pmu) |
743 | { |
744 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
745 | |
746 | if (!cpuc->enabled) |
747 | return; |
748 | |
749 | cpuc->enabled = 0; |
750 | cpuc->n_added = 0; |
751 | |
752 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
753 | } |
754 | |
755 | static struct pmu pmu = { |
756 | .pmu_enable = alpha_pmu_enable, |
757 | .pmu_disable = alpha_pmu_disable, |
758 | .event_init = alpha_pmu_event_init, |
759 | .add = alpha_pmu_add, |
760 | .del = alpha_pmu_del, |
761 | .start = alpha_pmu_start, |
762 | .stop = alpha_pmu_stop, |
763 | .read = alpha_pmu_read, |
764 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
765 | }; |
766 | |
767 | |
768 | /* |
769 | * Main entry point - don't know when this is called but it |
770 | * obviously dumps debug info. |
771 | */ |
772 | void perf_event_print_debug(void) |
773 | { |
774 | unsigned long flags; |
775 | unsigned long pcr; |
776 | int pcr0, pcr1; |
777 | int cpu; |
778 | |
779 | if (!supported_cpu()) |
780 | return; |
781 | |
782 | local_irq_save(flags); |
783 | |
784 | cpu = smp_processor_id(); |
785 | |
786 | pcr = wrperfmon(PERFMON_CMD_READ, 0); |
787 | pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; |
788 | pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; |
789 | |
790 | pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n" , cpu, pcr0, pcr1); |
791 | |
792 | local_irq_restore(flags); |
793 | } |
794 | |
795 | |
796 | /* |
797 | * Performance Monitoring Interrupt Service Routine called when a PMC |
798 | * overflows. The PMC that overflowed is passed in la_ptr. |
799 | */ |
800 | static void alpha_perf_event_irq_handler(unsigned long la_ptr, |
801 | struct pt_regs *regs) |
802 | { |
803 | struct cpu_hw_events *cpuc; |
804 | struct perf_sample_data data; |
805 | struct perf_event *event; |
806 | struct hw_perf_event *hwc; |
807 | int idx, j; |
808 | |
809 | __this_cpu_inc(irq_pmi_count); |
810 | cpuc = this_cpu_ptr(&cpu_hw_events); |
811 | |
812 | /* Completely counting through the PMC's period to trigger a new PMC |
813 | * overflow interrupt while in this interrupt routine is utterly |
814 | * disastrous! The EV6 and EV67 counters are sufficiently large to |
815 | * prevent this but to be really sure disable the PMCs. |
816 | */ |
817 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
818 | |
819 | /* la_ptr is the counter that overflowed. */ |
820 | if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { |
821 | /* This should never occur! */ |
822 | irq_err_count++; |
823 | pr_warn("PMI: silly index %ld\n" , la_ptr); |
824 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
825 | return; |
826 | } |
827 | |
828 | idx = la_ptr; |
829 | |
830 | for (j = 0; j < cpuc->n_events; j++) { |
831 | if (cpuc->current_idx[j] == idx) |
832 | break; |
833 | } |
834 | |
835 | if (unlikely(j == cpuc->n_events)) { |
836 | /* This can occur if the event is disabled right on a PMC overflow. */ |
837 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
838 | return; |
839 | } |
840 | |
841 | event = cpuc->event[j]; |
842 | |
843 | if (unlikely(!event)) { |
844 | /* This should never occur! */ |
845 | irq_err_count++; |
846 | pr_warn("PMI: No event at index %d!\n" , idx); |
847 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
848 | return; |
849 | } |
850 | |
851 | hwc = &event->hw; |
852 | alpha_perf_event_update(event, hwc, idx, ovf: alpha_pmu->pmc_max_period[idx]+1); |
853 | perf_sample_data_init(data: &data, addr: 0, period: hwc->last_period); |
854 | |
855 | if (alpha_perf_event_set_period(event, hwc, idx)) { |
856 | if (perf_event_overflow(event, data: &data, regs)) { |
857 | /* Interrupts coming too quickly; "throttle" the |
858 | * counter, i.e., disable it for a little while. |
859 | */ |
860 | alpha_pmu_stop(event, flags: 0); |
861 | } |
862 | } |
863 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
864 | |
865 | return; |
866 | } |
867 | |
868 | |
869 | |
870 | /* |
871 | * Init call to initialise performance events at kernel startup. |
872 | */ |
873 | int __init init_hw_perf_events(void) |
874 | { |
875 | pr_info("Performance events: " ); |
876 | |
877 | if (!supported_cpu()) { |
878 | pr_cont("No support for your CPU.\n" ); |
879 | return 0; |
880 | } |
881 | |
882 | pr_cont("Supported CPU type!\n" ); |
883 | |
884 | /* Override performance counter IRQ vector */ |
885 | |
886 | perf_irq = alpha_perf_event_irq_handler; |
887 | |
888 | /* And set up PMU specification */ |
889 | alpha_pmu = &ev67_pmu; |
890 | |
891 | perf_pmu_register(pmu: &pmu, name: "cpu" , type: PERF_TYPE_RAW); |
892 | |
893 | return 0; |
894 | } |
895 | early_initcall(init_hw_perf_events); |
896 | |