1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Linux performance counter support for MIPS. |
4 | * |
5 | * Copyright (C) 2010 MIPS Technologies, Inc. |
6 | * Copyright (C) 2011 Cavium Networks, Inc. |
7 | * Author: Deng-Cheng Zhu |
8 | * |
9 | * This code is based on the implementation for ARM, which is in turn |
10 | * based on the sparc64 perf event code and the x86 code. Performance |
11 | * counter access is based on the MIPS Oprofile code. And the callchain |
12 | * support references the code of MIPS stacktrace.c. |
13 | */ |
14 | |
15 | #include <linux/cpumask.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/smp.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/perf_event.h> |
20 | #include <linux/uaccess.h> |
21 | |
22 | #include <asm/irq.h> |
23 | #include <asm/irq_regs.h> |
24 | #include <asm/stacktrace.h> |
25 | #include <asm/time.h> /* For perf_irq */ |
26 | |
27 | #define MIPS_MAX_HWEVENTS 4 |
28 | #define MIPS_TCS_PER_COUNTER 2 |
29 | #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1) |
30 | |
31 | struct cpu_hw_events { |
32 | /* Array of events on this cpu. */ |
33 | struct perf_event *events[MIPS_MAX_HWEVENTS]; |
34 | |
35 | /* |
36 | * Set the bit (indexed by the counter number) when the counter |
37 | * is used for an event. |
38 | */ |
39 | unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; |
40 | |
41 | /* |
42 | * Software copy of the control register for each performance counter. |
43 | * MIPS CPUs vary in performance counters. They use this differently, |
44 | * and even may not use it. |
45 | */ |
46 | unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; |
47 | }; |
48 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { |
49 | .saved_ctrl = {0}, |
50 | }; |
51 | |
52 | /* The description of MIPS performance events. */ |
53 | struct mips_perf_event { |
54 | unsigned int event_id; |
55 | /* |
56 | * MIPS performance counters are indexed starting from 0. |
57 | * CNTR_EVEN indicates the indexes of the counters to be used are |
58 | * even numbers. |
59 | */ |
60 | unsigned int cntr_mask; |
61 | #define CNTR_EVEN 0x55555555 |
62 | #define CNTR_ODD 0xaaaaaaaa |
63 | #define CNTR_ALL 0xffffffff |
64 | enum { |
65 | T = 0, |
66 | V = 1, |
67 | P = 2, |
68 | } range; |
69 | }; |
70 | |
71 | static struct mips_perf_event raw_event; |
72 | static DEFINE_MUTEX(raw_event_mutex); |
73 | |
74 | #define C(x) PERF_COUNT_HW_CACHE_##x |
75 | |
76 | struct mips_pmu { |
77 | u64 max_period; |
78 | u64 valid_count; |
79 | u64 overflow; |
80 | const char *name; |
81 | int irq; |
82 | u64 (*read_counter)(unsigned int idx); |
83 | void (*write_counter)(unsigned int idx, u64 val); |
84 | const struct mips_perf_event *(*map_raw_event)(u64 config); |
85 | const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; |
86 | const struct mips_perf_event (*cache_event_map) |
87 | [PERF_COUNT_HW_CACHE_MAX] |
88 | [PERF_COUNT_HW_CACHE_OP_MAX] |
89 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
90 | unsigned int num_counters; |
91 | }; |
92 | |
93 | static int counter_bits; |
94 | static struct mips_pmu mipspmu; |
95 | |
96 | #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \ |
97 | MIPS_PERFCTRL_EVENT) |
98 | #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S) |
99 | |
100 | #ifdef CONFIG_CPU_BMIPS5000 |
101 | #define M_PERFCTL_MT_EN(filter) 0 |
102 | #else /* !CONFIG_CPU_BMIPS5000 */ |
103 | #define M_PERFCTL_MT_EN(filter) (filter) |
104 | #endif /* CONFIG_CPU_BMIPS5000 */ |
105 | |
106 | #define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL) |
107 | #define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE) |
108 | #define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC) |
109 | |
110 | #define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \ |
111 | MIPS_PERFCTRL_K | \ |
112 | MIPS_PERFCTRL_U | \ |
113 | MIPS_PERFCTRL_S | \ |
114 | MIPS_PERFCTRL_IE) |
115 | |
116 | #ifdef CONFIG_MIPS_MT_SMP |
117 | #define M_PERFCTL_CONFIG_MASK 0x3fff801f |
118 | #else |
119 | #define M_PERFCTL_CONFIG_MASK 0x1f |
120 | #endif |
121 | |
122 | #define CNTR_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
123 | |
124 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS |
125 | static DEFINE_RWLOCK(pmuint_rwlock); |
126 | |
127 | #if defined(CONFIG_CPU_BMIPS5000) |
128 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ |
129 | 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK)) |
130 | #else |
131 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ |
132 | 0 : cpu_vpe_id(¤t_cpu_data)) |
133 | #endif |
134 | |
135 | /* Copied from op_model_mipsxx.c */ |
136 | static unsigned int vpe_shift(void) |
137 | { |
138 | if (num_possible_cpus() > 1) |
139 | return 1; |
140 | |
141 | return 0; |
142 | } |
143 | |
144 | static unsigned int counters_total_to_per_cpu(unsigned int counters) |
145 | { |
146 | return counters >> vpe_shift(); |
147 | } |
148 | |
149 | #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ |
150 | #define vpe_id() 0 |
151 | |
152 | #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ |
153 | |
154 | static void resume_local_counters(void); |
155 | static void pause_local_counters(void); |
156 | static irqreturn_t mipsxx_pmu_handle_irq(int, void *); |
157 | static int mipsxx_pmu_handle_shared_irq(void); |
158 | |
159 | /* 0: Not Loongson-3 |
160 | * 1: Loongson-3A1000/3B1000/3B1500 |
161 | * 2: Loongson-3A2000/3A3000 |
162 | * 3: Loongson-3A4000+ |
163 | */ |
164 | |
165 | #define LOONGSON_PMU_TYPE0 0 |
166 | #define LOONGSON_PMU_TYPE1 1 |
167 | #define LOONGSON_PMU_TYPE2 2 |
168 | #define LOONGSON_PMU_TYPE3 3 |
169 | |
170 | static inline int get_loongson3_pmu_type(void) |
171 | { |
172 | if (boot_cpu_type() != CPU_LOONGSON64) |
173 | return LOONGSON_PMU_TYPE0; |
174 | if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY) |
175 | return LOONGSON_PMU_TYPE1; |
176 | if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C) |
177 | return LOONGSON_PMU_TYPE2; |
178 | if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G) |
179 | return LOONGSON_PMU_TYPE3; |
180 | |
181 | return LOONGSON_PMU_TYPE0; |
182 | } |
183 | |
184 | static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx) |
185 | { |
186 | if (vpe_id() == 1) |
187 | idx = (idx + 2) & 3; |
188 | return idx; |
189 | } |
190 | |
191 | static u64 mipsxx_pmu_read_counter(unsigned int idx) |
192 | { |
193 | idx = mipsxx_pmu_swizzle_perf_idx(idx); |
194 | |
195 | switch (idx) { |
196 | case 0: |
197 | /* |
198 | * The counters are unsigned, we must cast to truncate |
199 | * off the high bits. |
200 | */ |
201 | return (u32)read_c0_perfcntr0(); |
202 | case 1: |
203 | return (u32)read_c0_perfcntr1(); |
204 | case 2: |
205 | return (u32)read_c0_perfcntr2(); |
206 | case 3: |
207 | return (u32)read_c0_perfcntr3(); |
208 | default: |
209 | WARN_ONCE(1, "Invalid performance counter number (%d)\n" , idx); |
210 | return 0; |
211 | } |
212 | } |
213 | |
214 | static u64 mipsxx_pmu_read_counter_64(unsigned int idx) |
215 | { |
216 | u64 mask = CNTR_BIT_MASK(counter_bits); |
217 | idx = mipsxx_pmu_swizzle_perf_idx(idx); |
218 | |
219 | switch (idx) { |
220 | case 0: |
221 | return read_c0_perfcntr0_64() & mask; |
222 | case 1: |
223 | return read_c0_perfcntr1_64() & mask; |
224 | case 2: |
225 | return read_c0_perfcntr2_64() & mask; |
226 | case 3: |
227 | return read_c0_perfcntr3_64() & mask; |
228 | default: |
229 | WARN_ONCE(1, "Invalid performance counter number (%d)\n" , idx); |
230 | return 0; |
231 | } |
232 | } |
233 | |
234 | static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) |
235 | { |
236 | idx = mipsxx_pmu_swizzle_perf_idx(idx); |
237 | |
238 | switch (idx) { |
239 | case 0: |
240 | write_c0_perfcntr0(val); |
241 | return; |
242 | case 1: |
243 | write_c0_perfcntr1(val); |
244 | return; |
245 | case 2: |
246 | write_c0_perfcntr2(val); |
247 | return; |
248 | case 3: |
249 | write_c0_perfcntr3(val); |
250 | return; |
251 | } |
252 | } |
253 | |
254 | static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val) |
255 | { |
256 | val &= CNTR_BIT_MASK(counter_bits); |
257 | idx = mipsxx_pmu_swizzle_perf_idx(idx); |
258 | |
259 | switch (idx) { |
260 | case 0: |
261 | write_c0_perfcntr0_64(val); |
262 | return; |
263 | case 1: |
264 | write_c0_perfcntr1_64(val); |
265 | return; |
266 | case 2: |
267 | write_c0_perfcntr2_64(val); |
268 | return; |
269 | case 3: |
270 | write_c0_perfcntr3_64(val); |
271 | return; |
272 | } |
273 | } |
274 | |
275 | static unsigned int mipsxx_pmu_read_control(unsigned int idx) |
276 | { |
277 | idx = mipsxx_pmu_swizzle_perf_idx(idx); |
278 | |
279 | switch (idx) { |
280 | case 0: |
281 | return read_c0_perfctrl0(); |
282 | case 1: |
283 | return read_c0_perfctrl1(); |
284 | case 2: |
285 | return read_c0_perfctrl2(); |
286 | case 3: |
287 | return read_c0_perfctrl3(); |
288 | default: |
289 | WARN_ONCE(1, "Invalid performance counter number (%d)\n" , idx); |
290 | return 0; |
291 | } |
292 | } |
293 | |
294 | static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) |
295 | { |
296 | idx = mipsxx_pmu_swizzle_perf_idx(idx); |
297 | |
298 | switch (idx) { |
299 | case 0: |
300 | write_c0_perfctrl0(val); |
301 | return; |
302 | case 1: |
303 | write_c0_perfctrl1(val); |
304 | return; |
305 | case 2: |
306 | write_c0_perfctrl2(val); |
307 | return; |
308 | case 3: |
309 | write_c0_perfctrl3(val); |
310 | return; |
311 | } |
312 | } |
313 | |
314 | static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, |
315 | struct hw_perf_event *hwc) |
316 | { |
317 | int i; |
318 | unsigned long cntr_mask; |
319 | |
320 | /* |
321 | * We only need to care the counter mask. The range has been |
322 | * checked definitely. |
323 | */ |
324 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) |
325 | cntr_mask = (hwc->event_base >> 10) & 0xffff; |
326 | else |
327 | cntr_mask = (hwc->event_base >> 8) & 0xffff; |
328 | |
329 | for (i = mipspmu.num_counters - 1; i >= 0; i--) { |
330 | /* |
331 | * Note that some MIPS perf events can be counted by both |
332 | * even and odd counters, whereas many other are only by |
333 | * even _or_ odd counters. This introduces an issue that |
334 | * when the former kind of event takes the counter the |
335 | * latter kind of event wants to use, then the "counter |
336 | * allocation" for the latter event will fail. In fact if |
337 | * they can be dynamically swapped, they both feel happy. |
338 | * But here we leave this issue alone for now. |
339 | */ |
340 | if (test_bit(i, &cntr_mask) && |
341 | !test_and_set_bit(nr: i, addr: cpuc->used_mask)) |
342 | return i; |
343 | } |
344 | |
345 | return -EAGAIN; |
346 | } |
347 | |
348 | static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) |
349 | { |
350 | struct perf_event *event = container_of(evt, struct perf_event, hw); |
351 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
352 | unsigned int range = evt->event_base >> 24; |
353 | |
354 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); |
355 | |
356 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) |
357 | cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) | |
358 | (evt->config_base & M_PERFCTL_CONFIG_MASK) | |
359 | /* Make sure interrupt enabled. */ |
360 | MIPS_PERFCTRL_IE; |
361 | else |
362 | cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | |
363 | (evt->config_base & M_PERFCTL_CONFIG_MASK) | |
364 | /* Make sure interrupt enabled. */ |
365 | MIPS_PERFCTRL_IE; |
366 | |
367 | if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) { |
368 | /* enable the counter for the calling thread */ |
369 | cpuc->saved_ctrl[idx] |= |
370 | (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; |
371 | } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) { |
372 | /* The counter is processor wide. Set it up to count all TCs. */ |
373 | pr_debug("Enabling perf counter for all TCs\n" ); |
374 | cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; |
375 | } else { |
376 | unsigned int cpu, ctrl; |
377 | |
378 | /* |
379 | * Set up the counter for a particular CPU when event->cpu is |
380 | * a valid CPU number. Otherwise set up the counter for the CPU |
381 | * scheduling this thread. |
382 | */ |
383 | cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); |
384 | |
385 | ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu])); |
386 | ctrl |= M_TC_EN_VPE; |
387 | cpuc->saved_ctrl[idx] |= ctrl; |
388 | pr_debug("Enabling perf counter for CPU%d\n" , cpu); |
389 | } |
390 | /* |
391 | * We do not actually let the counter run. Leave it until start(). |
392 | */ |
393 | } |
394 | |
395 | static void mipsxx_pmu_disable_event(int idx) |
396 | { |
397 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
398 | unsigned long flags; |
399 | |
400 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); |
401 | |
402 | local_irq_save(flags); |
403 | cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & |
404 | ~M_PERFCTL_COUNT_EVENT_WHENEVER; |
405 | mipsxx_pmu_write_control(idx, val: cpuc->saved_ctrl[idx]); |
406 | local_irq_restore(flags); |
407 | } |
408 | |
409 | static int mipspmu_event_set_period(struct perf_event *event, |
410 | struct hw_perf_event *hwc, |
411 | int idx) |
412 | { |
413 | u64 left = local64_read(&hwc->period_left); |
414 | u64 period = hwc->sample_period; |
415 | int ret = 0; |
416 | |
417 | if (unlikely((left + period) & (1ULL << 63))) { |
418 | /* left underflowed by more than period. */ |
419 | left = period; |
420 | local64_set(&hwc->period_left, left); |
421 | hwc->last_period = period; |
422 | ret = 1; |
423 | } else if (unlikely((left + period) <= period)) { |
424 | /* left underflowed by less than period. */ |
425 | left += period; |
426 | local64_set(&hwc->period_left, left); |
427 | hwc->last_period = period; |
428 | ret = 1; |
429 | } |
430 | |
431 | if (left > mipspmu.max_period) { |
432 | left = mipspmu.max_period; |
433 | local64_set(&hwc->period_left, left); |
434 | } |
435 | |
436 | local64_set(&hwc->prev_count, mipspmu.overflow - left); |
437 | |
438 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) |
439 | mipsxx_pmu_write_control(idx, |
440 | M_PERFCTL_EVENT(hwc->event_base & 0x3ff)); |
441 | |
442 | mipspmu.write_counter(idx, mipspmu.overflow - left); |
443 | |
444 | perf_event_update_userpage(event); |
445 | |
446 | return ret; |
447 | } |
448 | |
449 | static void mipspmu_event_update(struct perf_event *event, |
450 | struct hw_perf_event *hwc, |
451 | int idx) |
452 | { |
453 | u64 prev_raw_count, new_raw_count; |
454 | u64 delta; |
455 | |
456 | again: |
457 | prev_raw_count = local64_read(&hwc->prev_count); |
458 | new_raw_count = mipspmu.read_counter(idx); |
459 | |
460 | if (local64_cmpxchg(l: &hwc->prev_count, old: prev_raw_count, |
461 | new: new_raw_count) != prev_raw_count) |
462 | goto again; |
463 | |
464 | delta = new_raw_count - prev_raw_count; |
465 | |
466 | local64_add(delta, &event->count); |
467 | local64_sub(delta, &hwc->period_left); |
468 | } |
469 | |
470 | static void mipspmu_start(struct perf_event *event, int flags) |
471 | { |
472 | struct hw_perf_event *hwc = &event->hw; |
473 | |
474 | if (flags & PERF_EF_RELOAD) |
475 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
476 | |
477 | hwc->state = 0; |
478 | |
479 | /* Set the period for the event. */ |
480 | mipspmu_event_set_period(event, hwc, idx: hwc->idx); |
481 | |
482 | /* Enable the event. */ |
483 | mipsxx_pmu_enable_event(evt: hwc, idx: hwc->idx); |
484 | } |
485 | |
486 | static void mipspmu_stop(struct perf_event *event, int flags) |
487 | { |
488 | struct hw_perf_event *hwc = &event->hw; |
489 | |
490 | if (!(hwc->state & PERF_HES_STOPPED)) { |
491 | /* We are working on a local event. */ |
492 | mipsxx_pmu_disable_event(idx: hwc->idx); |
493 | barrier(); |
494 | mipspmu_event_update(event, hwc, idx: hwc->idx); |
495 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
496 | } |
497 | } |
498 | |
499 | static int mipspmu_add(struct perf_event *event, int flags) |
500 | { |
501 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
502 | struct hw_perf_event *hwc = &event->hw; |
503 | int idx; |
504 | int err = 0; |
505 | |
506 | perf_pmu_disable(pmu: event->pmu); |
507 | |
508 | /* To look for a free counter for this event. */ |
509 | idx = mipsxx_pmu_alloc_counter(cpuc, hwc); |
510 | if (idx < 0) { |
511 | err = idx; |
512 | goto out; |
513 | } |
514 | |
515 | /* |
516 | * If there is an event in the counter we are going to use then |
517 | * make sure it is disabled. |
518 | */ |
519 | event->hw.idx = idx; |
520 | mipsxx_pmu_disable_event(idx); |
521 | cpuc->events[idx] = event; |
522 | |
523 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
524 | if (flags & PERF_EF_START) |
525 | mipspmu_start(event, PERF_EF_RELOAD); |
526 | |
527 | /* Propagate our changes to the userspace mapping. */ |
528 | perf_event_update_userpage(event); |
529 | |
530 | out: |
531 | perf_pmu_enable(pmu: event->pmu); |
532 | return err; |
533 | } |
534 | |
535 | static void mipspmu_del(struct perf_event *event, int flags) |
536 | { |
537 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
538 | struct hw_perf_event *hwc = &event->hw; |
539 | int idx = hwc->idx; |
540 | |
541 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); |
542 | |
543 | mipspmu_stop(event, PERF_EF_UPDATE); |
544 | cpuc->events[idx] = NULL; |
545 | clear_bit(nr: idx, addr: cpuc->used_mask); |
546 | |
547 | perf_event_update_userpage(event); |
548 | } |
549 | |
550 | static void mipspmu_read(struct perf_event *event) |
551 | { |
552 | struct hw_perf_event *hwc = &event->hw; |
553 | |
554 | /* Don't read disabled counters! */ |
555 | if (hwc->idx < 0) |
556 | return; |
557 | |
558 | mipspmu_event_update(event, hwc, idx: hwc->idx); |
559 | } |
560 | |
561 | static void mipspmu_enable(struct pmu *pmu) |
562 | { |
563 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS |
564 | write_unlock(&pmuint_rwlock); |
565 | #endif |
566 | resume_local_counters(); |
567 | } |
568 | |
569 | /* |
570 | * MIPS performance counters can be per-TC. The control registers can |
571 | * not be directly accessed across CPUs. Hence if we want to do global |
572 | * control, we need cross CPU calls. on_each_cpu() can help us, but we |
573 | * can not make sure this function is called with interrupts enabled. So |
574 | * here we pause local counters and then grab a rwlock and leave the |
575 | * counters on other CPUs alone. If any counter interrupt raises while |
576 | * we own the write lock, simply pause local counters on that CPU and |
577 | * spin in the handler. Also we know we won't be switched to another |
578 | * CPU after pausing local counters and before grabbing the lock. |
579 | */ |
580 | static void mipspmu_disable(struct pmu *pmu) |
581 | { |
582 | pause_local_counters(); |
583 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS |
584 | write_lock(&pmuint_rwlock); |
585 | #endif |
586 | } |
587 | |
588 | static atomic_t active_events = ATOMIC_INIT(0); |
589 | static DEFINE_MUTEX(pmu_reserve_mutex); |
590 | static int (*save_perf_irq)(void); |
591 | |
592 | static int mipspmu_get_irq(void) |
593 | { |
594 | int err; |
595 | |
596 | if (mipspmu.irq >= 0) { |
597 | /* Request my own irq handler. */ |
598 | err = request_irq(irq: mipspmu.irq, handler: mipsxx_pmu_handle_irq, |
599 | IRQF_PERCPU | IRQF_NOBALANCING | |
600 | IRQF_NO_THREAD | IRQF_NO_SUSPEND | |
601 | IRQF_SHARED, |
602 | name: "mips_perf_pmu" , dev: &mipspmu); |
603 | if (err) { |
604 | pr_warn("Unable to request IRQ%d for MIPS performance counters!\n" , |
605 | mipspmu.irq); |
606 | } |
607 | } else if (cp0_perfcount_irq < 0) { |
608 | /* |
609 | * We are sharing the irq number with the timer interrupt. |
610 | */ |
611 | save_perf_irq = perf_irq; |
612 | perf_irq = mipsxx_pmu_handle_shared_irq; |
613 | err = 0; |
614 | } else { |
615 | pr_warn("The platform hasn't properly defined its interrupt controller\n" ); |
616 | err = -ENOENT; |
617 | } |
618 | |
619 | return err; |
620 | } |
621 | |
622 | static void mipspmu_free_irq(void) |
623 | { |
624 | if (mipspmu.irq >= 0) |
625 | free_irq(mipspmu.irq, &mipspmu); |
626 | else if (cp0_perfcount_irq < 0) |
627 | perf_irq = save_perf_irq; |
628 | } |
629 | |
630 | /* |
631 | * mipsxx/rm9000/loongson2 have different performance counters, they have |
632 | * specific low-level init routines. |
633 | */ |
634 | static void reset_counters(void *arg); |
635 | static int __hw_perf_event_init(struct perf_event *event); |
636 | |
637 | static void hw_perf_event_destroy(struct perf_event *event) |
638 | { |
639 | if (atomic_dec_and_mutex_lock(cnt: &active_events, |
640 | lock: &pmu_reserve_mutex)) { |
641 | /* |
642 | * We must not call the destroy function with interrupts |
643 | * disabled. |
644 | */ |
645 | on_each_cpu(func: reset_counters, |
646 | info: (void *)(long)mipspmu.num_counters, wait: 1); |
647 | mipspmu_free_irq(); |
648 | mutex_unlock(lock: &pmu_reserve_mutex); |
649 | } |
650 | } |
651 | |
652 | static int mipspmu_event_init(struct perf_event *event) |
653 | { |
654 | int err = 0; |
655 | |
656 | /* does not support taken branch sampling */ |
657 | if (has_branch_stack(event)) |
658 | return -EOPNOTSUPP; |
659 | |
660 | switch (event->attr.type) { |
661 | case PERF_TYPE_RAW: |
662 | case PERF_TYPE_HARDWARE: |
663 | case PERF_TYPE_HW_CACHE: |
664 | break; |
665 | |
666 | default: |
667 | return -ENOENT; |
668 | } |
669 | |
670 | if (event->cpu >= 0 && !cpu_online(cpu: event->cpu)) |
671 | return -ENODEV; |
672 | |
673 | if (!atomic_inc_not_zero(v: &active_events)) { |
674 | mutex_lock(&pmu_reserve_mutex); |
675 | if (atomic_read(v: &active_events) == 0) |
676 | err = mipspmu_get_irq(); |
677 | |
678 | if (!err) |
679 | atomic_inc(v: &active_events); |
680 | mutex_unlock(lock: &pmu_reserve_mutex); |
681 | } |
682 | |
683 | if (err) |
684 | return err; |
685 | |
686 | return __hw_perf_event_init(event); |
687 | } |
688 | |
689 | static struct pmu pmu = { |
690 | .pmu_enable = mipspmu_enable, |
691 | .pmu_disable = mipspmu_disable, |
692 | .event_init = mipspmu_event_init, |
693 | .add = mipspmu_add, |
694 | .del = mipspmu_del, |
695 | .start = mipspmu_start, |
696 | .stop = mipspmu_stop, |
697 | .read = mipspmu_read, |
698 | }; |
699 | |
700 | static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) |
701 | { |
702 | /* |
703 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for |
704 | * event_id. |
705 | */ |
706 | #ifdef CONFIG_MIPS_MT_SMP |
707 | if (num_possible_cpus() > 1) |
708 | return ((unsigned int)pev->range << 24) | |
709 | (pev->cntr_mask & 0xffff00) | |
710 | (pev->event_id & 0xff); |
711 | else |
712 | #endif /* CONFIG_MIPS_MT_SMP */ |
713 | { |
714 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) |
715 | return (pev->cntr_mask & 0xfffc00) | |
716 | (pev->event_id & 0x3ff); |
717 | else |
718 | return (pev->cntr_mask & 0xffff00) | |
719 | (pev->event_id & 0xff); |
720 | } |
721 | } |
722 | |
723 | static const struct mips_perf_event *mipspmu_map_general_event(int idx) |
724 | { |
725 | |
726 | if ((*mipspmu.general_event_map)[idx].cntr_mask == 0) |
727 | return ERR_PTR(error: -EOPNOTSUPP); |
728 | return &(*mipspmu.general_event_map)[idx]; |
729 | } |
730 | |
731 | static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) |
732 | { |
733 | unsigned int cache_type, cache_op, cache_result; |
734 | const struct mips_perf_event *pev; |
735 | |
736 | cache_type = (config >> 0) & 0xff; |
737 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) |
738 | return ERR_PTR(error: -EINVAL); |
739 | |
740 | cache_op = (config >> 8) & 0xff; |
741 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
742 | return ERR_PTR(error: -EINVAL); |
743 | |
744 | cache_result = (config >> 16) & 0xff; |
745 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
746 | return ERR_PTR(error: -EINVAL); |
747 | |
748 | pev = &((*mipspmu.cache_event_map) |
749 | [cache_type] |
750 | [cache_op] |
751 | [cache_result]); |
752 | |
753 | if (pev->cntr_mask == 0) |
754 | return ERR_PTR(error: -EOPNOTSUPP); |
755 | |
756 | return pev; |
757 | |
758 | } |
759 | |
760 | static int validate_group(struct perf_event *event) |
761 | { |
762 | struct perf_event *sibling, *leader = event->group_leader; |
763 | struct cpu_hw_events fake_cpuc; |
764 | |
765 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); |
766 | |
767 | if (mipsxx_pmu_alloc_counter(cpuc: &fake_cpuc, hwc: &leader->hw) < 0) |
768 | return -EINVAL; |
769 | |
770 | for_each_sibling_event(sibling, leader) { |
771 | if (mipsxx_pmu_alloc_counter(cpuc: &fake_cpuc, hwc: &sibling->hw) < 0) |
772 | return -EINVAL; |
773 | } |
774 | |
775 | if (mipsxx_pmu_alloc_counter(cpuc: &fake_cpuc, hwc: &event->hw) < 0) |
776 | return -EINVAL; |
777 | |
778 | return 0; |
779 | } |
780 | |
781 | /* This is needed by specific irq handlers in perf_event_*.c */ |
782 | static void handle_associated_event(struct cpu_hw_events *cpuc, |
783 | int idx, struct perf_sample_data *data, |
784 | struct pt_regs *regs) |
785 | { |
786 | struct perf_event *event = cpuc->events[idx]; |
787 | struct hw_perf_event *hwc = &event->hw; |
788 | |
789 | mipspmu_event_update(event, hwc, idx); |
790 | data->period = event->hw.last_period; |
791 | if (!mipspmu_event_set_period(event, hwc, idx)) |
792 | return; |
793 | |
794 | if (perf_event_overflow(event, data, regs)) |
795 | mipsxx_pmu_disable_event(idx); |
796 | } |
797 | |
798 | |
799 | static int __n_counters(void) |
800 | { |
801 | if (!cpu_has_perf) |
802 | return 0; |
803 | if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M)) |
804 | return 1; |
805 | if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M)) |
806 | return 2; |
807 | if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M)) |
808 | return 3; |
809 | |
810 | return 4; |
811 | } |
812 | |
813 | static int n_counters(void) |
814 | { |
815 | int counters; |
816 | |
817 | switch (current_cpu_type()) { |
818 | case CPU_R10000: |
819 | counters = 2; |
820 | break; |
821 | |
822 | case CPU_R12000: |
823 | case CPU_R14000: |
824 | case CPU_R16000: |
825 | counters = 4; |
826 | break; |
827 | |
828 | default: |
829 | counters = __n_counters(); |
830 | } |
831 | |
832 | return counters; |
833 | } |
834 | |
835 | static void loongson3_reset_counters(void *arg) |
836 | { |
837 | int counters = (int)(long)arg; |
838 | |
839 | switch (counters) { |
840 | case 4: |
841 | mipsxx_pmu_write_control(idx: 3, val: 0); |
842 | mipspmu.write_counter(3, 0); |
843 | mipsxx_pmu_write_control(idx: 3, val: 127<<5); |
844 | mipspmu.write_counter(3, 0); |
845 | mipsxx_pmu_write_control(idx: 3, val: 191<<5); |
846 | mipspmu.write_counter(3, 0); |
847 | mipsxx_pmu_write_control(idx: 3, val: 255<<5); |
848 | mipspmu.write_counter(3, 0); |
849 | mipsxx_pmu_write_control(idx: 3, val: 319<<5); |
850 | mipspmu.write_counter(3, 0); |
851 | mipsxx_pmu_write_control(idx: 3, val: 383<<5); |
852 | mipspmu.write_counter(3, 0); |
853 | mipsxx_pmu_write_control(idx: 3, val: 575<<5); |
854 | mipspmu.write_counter(3, 0); |
855 | fallthrough; |
856 | case 3: |
857 | mipsxx_pmu_write_control(idx: 2, val: 0); |
858 | mipspmu.write_counter(2, 0); |
859 | mipsxx_pmu_write_control(idx: 2, val: 127<<5); |
860 | mipspmu.write_counter(2, 0); |
861 | mipsxx_pmu_write_control(idx: 2, val: 191<<5); |
862 | mipspmu.write_counter(2, 0); |
863 | mipsxx_pmu_write_control(idx: 2, val: 255<<5); |
864 | mipspmu.write_counter(2, 0); |
865 | mipsxx_pmu_write_control(idx: 2, val: 319<<5); |
866 | mipspmu.write_counter(2, 0); |
867 | mipsxx_pmu_write_control(idx: 2, val: 383<<5); |
868 | mipspmu.write_counter(2, 0); |
869 | mipsxx_pmu_write_control(idx: 2, val: 575<<5); |
870 | mipspmu.write_counter(2, 0); |
871 | fallthrough; |
872 | case 2: |
873 | mipsxx_pmu_write_control(idx: 1, val: 0); |
874 | mipspmu.write_counter(1, 0); |
875 | mipsxx_pmu_write_control(idx: 1, val: 127<<5); |
876 | mipspmu.write_counter(1, 0); |
877 | mipsxx_pmu_write_control(idx: 1, val: 191<<5); |
878 | mipspmu.write_counter(1, 0); |
879 | mipsxx_pmu_write_control(idx: 1, val: 255<<5); |
880 | mipspmu.write_counter(1, 0); |
881 | mipsxx_pmu_write_control(idx: 1, val: 319<<5); |
882 | mipspmu.write_counter(1, 0); |
883 | mipsxx_pmu_write_control(idx: 1, val: 383<<5); |
884 | mipspmu.write_counter(1, 0); |
885 | mipsxx_pmu_write_control(idx: 1, val: 575<<5); |
886 | mipspmu.write_counter(1, 0); |
887 | fallthrough; |
888 | case 1: |
889 | mipsxx_pmu_write_control(idx: 0, val: 0); |
890 | mipspmu.write_counter(0, 0); |
891 | mipsxx_pmu_write_control(idx: 0, val: 127<<5); |
892 | mipspmu.write_counter(0, 0); |
893 | mipsxx_pmu_write_control(idx: 0, val: 191<<5); |
894 | mipspmu.write_counter(0, 0); |
895 | mipsxx_pmu_write_control(idx: 0, val: 255<<5); |
896 | mipspmu.write_counter(0, 0); |
897 | mipsxx_pmu_write_control(idx: 0, val: 319<<5); |
898 | mipspmu.write_counter(0, 0); |
899 | mipsxx_pmu_write_control(idx: 0, val: 383<<5); |
900 | mipspmu.write_counter(0, 0); |
901 | mipsxx_pmu_write_control(idx: 0, val: 575<<5); |
902 | mipspmu.write_counter(0, 0); |
903 | break; |
904 | } |
905 | } |
906 | |
907 | static void reset_counters(void *arg) |
908 | { |
909 | int counters = (int)(long)arg; |
910 | |
911 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) { |
912 | loongson3_reset_counters(arg); |
913 | return; |
914 | } |
915 | |
916 | switch (counters) { |
917 | case 4: |
918 | mipsxx_pmu_write_control(idx: 3, val: 0); |
919 | mipspmu.write_counter(3, 0); |
920 | fallthrough; |
921 | case 3: |
922 | mipsxx_pmu_write_control(idx: 2, val: 0); |
923 | mipspmu.write_counter(2, 0); |
924 | fallthrough; |
925 | case 2: |
926 | mipsxx_pmu_write_control(idx: 1, val: 0); |
927 | mipspmu.write_counter(1, 0); |
928 | fallthrough; |
929 | case 1: |
930 | mipsxx_pmu_write_control(idx: 0, val: 0); |
931 | mipspmu.write_counter(0, 0); |
932 | break; |
933 | } |
934 | } |
935 | |
936 | /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */ |
937 | static const struct mips_perf_event mipsxxcore_event_map |
938 | [PERF_COUNT_HW_MAX] = { |
939 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, |
940 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x01, CNTR_EVEN | CNTR_ODD, .range: T }, |
941 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { .event_id: 0x02, CNTR_EVEN, .range: T }, |
942 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x02, CNTR_ODD, .range: T }, |
943 | }; |
944 | |
945 | /* 74K/proAptiv core has different branch event code. */ |
946 | static const struct mips_perf_event mipsxxcore_event_map2 |
947 | [PERF_COUNT_HW_MAX] = { |
948 | [PERF_COUNT_HW_CPU_CYCLES] = { .event_id: 0x00, CNTR_EVEN | CNTR_ODD, .range: P }, |
949 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x01, CNTR_EVEN | CNTR_ODD, .range: T }, |
950 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { .event_id: 0x27, CNTR_EVEN, .range: T }, |
951 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x27, CNTR_ODD, .range: T }, |
952 | }; |
953 | |
954 | static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = { |
955 | [PERF_COUNT_HW_CPU_CYCLES] = { .event_id: 0x00, CNTR_EVEN | CNTR_ODD }, |
956 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x01, CNTR_EVEN | CNTR_ODD }, |
957 | /* These only count dcache, not icache */ |
958 | [PERF_COUNT_HW_CACHE_REFERENCES] = { .event_id: 0x45, CNTR_EVEN | CNTR_ODD }, |
959 | [PERF_COUNT_HW_CACHE_MISSES] = { .event_id: 0x48, CNTR_EVEN | CNTR_ODD }, |
960 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { .event_id: 0x15, CNTR_EVEN | CNTR_ODD }, |
961 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x16, CNTR_EVEN | CNTR_ODD }, |
962 | }; |
963 | |
964 | static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = { |
965 | [PERF_COUNT_HW_CPU_CYCLES] = { .event_id: 0x00, CNTR_EVEN }, |
966 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x00, CNTR_ODD }, |
967 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { .event_id: 0x01, CNTR_EVEN }, |
968 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x01, CNTR_ODD }, |
969 | }; |
970 | |
971 | static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = { |
972 | [PERF_COUNT_HW_CPU_CYCLES] = { .event_id: 0x80, CNTR_ALL }, |
973 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x81, CNTR_ALL }, |
974 | [PERF_COUNT_HW_CACHE_MISSES] = { .event_id: 0x18, CNTR_ALL }, |
975 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { .event_id: 0x94, CNTR_ALL }, |
976 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x9c, CNTR_ALL }, |
977 | }; |
978 | |
979 | static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = { |
980 | [PERF_COUNT_HW_CPU_CYCLES] = { .event_id: 0x00, CNTR_ALL }, |
981 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x01, CNTR_ALL }, |
982 | [PERF_COUNT_HW_CACHE_REFERENCES] = { .event_id: 0x1c, CNTR_ALL }, |
983 | [PERF_COUNT_HW_CACHE_MISSES] = { .event_id: 0x1d, CNTR_ALL }, |
984 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { .event_id: 0x02, CNTR_ALL }, |
985 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x08, CNTR_ALL }, |
986 | }; |
987 | |
988 | static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { |
989 | [PERF_COUNT_HW_CPU_CYCLES] = { .event_id: 0x01, CNTR_ALL }, |
990 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x03, CNTR_ALL }, |
991 | [PERF_COUNT_HW_CACHE_REFERENCES] = { .event_id: 0x2b, CNTR_ALL }, |
992 | [PERF_COUNT_HW_CACHE_MISSES] = { .event_id: 0x2e, CNTR_ALL }, |
993 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { .event_id: 0x08, CNTR_ALL }, |
994 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x09, CNTR_ALL }, |
995 | [PERF_COUNT_HW_BUS_CYCLES] = { .event_id: 0x25, CNTR_ALL }, |
996 | }; |
997 | |
998 | static const struct mips_perf_event bmips5000_event_map |
999 | [PERF_COUNT_HW_MAX] = { |
1000 | [PERF_COUNT_HW_CPU_CYCLES] = { .event_id: 0x00, CNTR_EVEN | CNTR_ODD, .range: T }, |
1001 | [PERF_COUNT_HW_INSTRUCTIONS] = { .event_id: 0x01, CNTR_EVEN | CNTR_ODD, .range: T }, |
1002 | [PERF_COUNT_HW_BRANCH_MISSES] = { .event_id: 0x02, CNTR_ODD, .range: T }, |
1003 | }; |
1004 | |
1005 | /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */ |
1006 | static const struct mips_perf_event mipsxxcore_cache_map |
1007 | [PERF_COUNT_HW_CACHE_MAX] |
1008 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1009 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1010 | [C(L1D)] = { |
1011 | /* |
1012 | * Like some other architectures (e.g. ARM), the performance |
1013 | * counters don't differentiate between read and write |
1014 | * accesses/misses, so this isn't strictly correct, but it's the |
1015 | * best we can do. Writes and reads get combined. |
1016 | */ |
1017 | [C(OP_READ)] = { |
1018 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, |
1019 | [C(RESULT_MISS)] = { .event_id: 0x0b, CNTR_EVEN | CNTR_ODD, .range: T }, |
1020 | }, |
1021 | [C(OP_WRITE)] = { |
1022 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, |
1023 | [C(RESULT_MISS)] = { .event_id: 0x0b, CNTR_EVEN | CNTR_ODD, .range: T }, |
1024 | }, |
1025 | }, |
1026 | [C(L1I)] = { |
1027 | [C(OP_READ)] = { |
1028 | [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, |
1029 | [C(RESULT_MISS)] = { .event_id: 0x09, CNTR_ODD, .range: T }, |
1030 | }, |
1031 | [C(OP_WRITE)] = { |
1032 | [C(RESULT_ACCESS)] = { .event_id: 0x09, CNTR_EVEN, .range: T }, |
1033 | [C(RESULT_MISS)] = { .event_id: 0x09, CNTR_ODD, .range: T }, |
1034 | }, |
1035 | [C(OP_PREFETCH)] = { |
1036 | [C(RESULT_ACCESS)] = { .event_id: 0x14, CNTR_EVEN, .range: T }, |
1037 | /* |
1038 | * Note that MIPS has only "hit" events countable for |
1039 | * the prefetch operation. |
1040 | */ |
1041 | }, |
1042 | }, |
1043 | [C(LL)] = { |
1044 | [C(OP_READ)] = { |
1045 | [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, |
1046 | [C(RESULT_MISS)] = { .event_id: 0x16, CNTR_EVEN, .range: P }, |
1047 | }, |
1048 | [C(OP_WRITE)] = { |
1049 | [C(RESULT_ACCESS)] = { .event_id: 0x15, CNTR_ODD, .range: P }, |
1050 | [C(RESULT_MISS)] = { .event_id: 0x16, CNTR_EVEN, .range: P }, |
1051 | }, |
1052 | }, |
1053 | [C(DTLB)] = { |
1054 | [C(OP_READ)] = { |
1055 | [C(RESULT_ACCESS)] = { .event_id: 0x06, CNTR_EVEN, .range: T }, |
1056 | [C(RESULT_MISS)] = { .event_id: 0x06, CNTR_ODD, .range: T }, |
1057 | }, |
1058 | [C(OP_WRITE)] = { |
1059 | [C(RESULT_ACCESS)] = { .event_id: 0x06, CNTR_EVEN, .range: T }, |
1060 | [C(RESULT_MISS)] = { .event_id: 0x06, CNTR_ODD, .range: T }, |
1061 | }, |
1062 | }, |
1063 | [C(ITLB)] = { |
1064 | [C(OP_READ)] = { |
1065 | [C(RESULT_ACCESS)] = { .event_id: 0x05, CNTR_EVEN, .range: T }, |
1066 | [C(RESULT_MISS)] = { .event_id: 0x05, CNTR_ODD, .range: T }, |
1067 | }, |
1068 | [C(OP_WRITE)] = { |
1069 | [C(RESULT_ACCESS)] = { .event_id: 0x05, CNTR_EVEN, .range: T }, |
1070 | [C(RESULT_MISS)] = { .event_id: 0x05, CNTR_ODD, .range: T }, |
1071 | }, |
1072 | }, |
1073 | [C(BPU)] = { |
1074 | /* Using the same code for *HW_BRANCH* */ |
1075 | [C(OP_READ)] = { |
1076 | [C(RESULT_ACCESS)] = { .event_id: 0x02, CNTR_EVEN, .range: T }, |
1077 | [C(RESULT_MISS)] = { .event_id: 0x02, CNTR_ODD, .range: T }, |
1078 | }, |
1079 | [C(OP_WRITE)] = { |
1080 | [C(RESULT_ACCESS)] = { .event_id: 0x02, CNTR_EVEN, .range: T }, |
1081 | [C(RESULT_MISS)] = { .event_id: 0x02, CNTR_ODD, .range: T }, |
1082 | }, |
1083 | }, |
1084 | }; |
1085 | |
1086 | /* 74K/proAptiv core has completely different cache event map. */ |
1087 | static const struct mips_perf_event mipsxxcore_cache_map2 |
1088 | [PERF_COUNT_HW_CACHE_MAX] |
1089 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1090 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1091 | [C(L1D)] = { |
1092 | /* |
1093 | * Like some other architectures (e.g. ARM), the performance |
1094 | * counters don't differentiate between read and write |
1095 | * accesses/misses, so this isn't strictly correct, but it's the |
1096 | * best we can do. Writes and reads get combined. |
1097 | */ |
1098 | [C(OP_READ)] = { |
1099 | [C(RESULT_ACCESS)] = { .event_id: 0x17, CNTR_ODD, .range: T }, |
1100 | [C(RESULT_MISS)] = { .event_id: 0x18, CNTR_ODD, .range: T }, |
1101 | }, |
1102 | [C(OP_WRITE)] = { |
1103 | [C(RESULT_ACCESS)] = { .event_id: 0x17, CNTR_ODD, .range: T }, |
1104 | [C(RESULT_MISS)] = { .event_id: 0x18, CNTR_ODD, .range: T }, |
1105 | }, |
1106 | }, |
1107 | [C(L1I)] = { |
1108 | [C(OP_READ)] = { |
1109 | [C(RESULT_ACCESS)] = { .event_id: 0x06, CNTR_EVEN, .range: T }, |
1110 | [C(RESULT_MISS)] = { .event_id: 0x06, CNTR_ODD, .range: T }, |
1111 | }, |
1112 | [C(OP_WRITE)] = { |
1113 | [C(RESULT_ACCESS)] = { .event_id: 0x06, CNTR_EVEN, .range: T }, |
1114 | [C(RESULT_MISS)] = { .event_id: 0x06, CNTR_ODD, .range: T }, |
1115 | }, |
1116 | [C(OP_PREFETCH)] = { |
1117 | [C(RESULT_ACCESS)] = { .event_id: 0x34, CNTR_EVEN, .range: T }, |
1118 | /* |
1119 | * Note that MIPS has only "hit" events countable for |
1120 | * the prefetch operation. |
1121 | */ |
1122 | }, |
1123 | }, |
1124 | [C(LL)] = { |
1125 | [C(OP_READ)] = { |
1126 | [C(RESULT_ACCESS)] = { .event_id: 0x1c, CNTR_ODD, .range: P }, |
1127 | [C(RESULT_MISS)] = { .event_id: 0x1d, CNTR_EVEN, .range: P }, |
1128 | }, |
1129 | [C(OP_WRITE)] = { |
1130 | [C(RESULT_ACCESS)] = { .event_id: 0x1c, CNTR_ODD, .range: P }, |
1131 | [C(RESULT_MISS)] = { .event_id: 0x1d, CNTR_EVEN, .range: P }, |
1132 | }, |
1133 | }, |
1134 | /* |
1135 | * 74K core does not have specific DTLB events. proAptiv core has |
1136 | * "speculative" DTLB events which are numbered 0x63 (even/odd) and |
1137 | * not included here. One can use raw events if really needed. |
1138 | */ |
1139 | [C(ITLB)] = { |
1140 | [C(OP_READ)] = { |
1141 | [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, |
1142 | [C(RESULT_MISS)] = { .event_id: 0x04, CNTR_ODD, .range: T }, |
1143 | }, |
1144 | [C(OP_WRITE)] = { |
1145 | [C(RESULT_ACCESS)] = { .event_id: 0x04, CNTR_EVEN, .range: T }, |
1146 | [C(RESULT_MISS)] = { .event_id: 0x04, CNTR_ODD, .range: T }, |
1147 | }, |
1148 | }, |
1149 | [C(BPU)] = { |
1150 | /* Using the same code for *HW_BRANCH* */ |
1151 | [C(OP_READ)] = { |
1152 | [C(RESULT_ACCESS)] = { .event_id: 0x27, CNTR_EVEN, .range: T }, |
1153 | [C(RESULT_MISS)] = { .event_id: 0x27, CNTR_ODD, .range: T }, |
1154 | }, |
1155 | [C(OP_WRITE)] = { |
1156 | [C(RESULT_ACCESS)] = { .event_id: 0x27, CNTR_EVEN, .range: T }, |
1157 | [C(RESULT_MISS)] = { .event_id: 0x27, CNTR_ODD, .range: T }, |
1158 | }, |
1159 | }, |
1160 | }; |
1161 | |
1162 | static const struct mips_perf_event i6x00_cache_map |
1163 | [PERF_COUNT_HW_CACHE_MAX] |
1164 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1165 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1166 | [C(L1D)] = { |
1167 | [C(OP_READ)] = { |
1168 | [C(RESULT_ACCESS)] = { .event_id: 0x46, CNTR_EVEN | CNTR_ODD }, |
1169 | [C(RESULT_MISS)] = { .event_id: 0x49, CNTR_EVEN | CNTR_ODD }, |
1170 | }, |
1171 | [C(OP_WRITE)] = { |
1172 | [C(RESULT_ACCESS)] = { .event_id: 0x47, CNTR_EVEN | CNTR_ODD }, |
1173 | [C(RESULT_MISS)] = { .event_id: 0x4a, CNTR_EVEN | CNTR_ODD }, |
1174 | }, |
1175 | }, |
1176 | [C(L1I)] = { |
1177 | [C(OP_READ)] = { |
1178 | [C(RESULT_ACCESS)] = { .event_id: 0x84, CNTR_EVEN | CNTR_ODD }, |
1179 | [C(RESULT_MISS)] = { .event_id: 0x85, CNTR_EVEN | CNTR_ODD }, |
1180 | }, |
1181 | }, |
1182 | [C(DTLB)] = { |
1183 | /* Can't distinguish read & write */ |
1184 | [C(OP_READ)] = { |
1185 | [C(RESULT_ACCESS)] = { .event_id: 0x40, CNTR_EVEN | CNTR_ODD }, |
1186 | [C(RESULT_MISS)] = { .event_id: 0x41, CNTR_EVEN | CNTR_ODD }, |
1187 | }, |
1188 | [C(OP_WRITE)] = { |
1189 | [C(RESULT_ACCESS)] = { .event_id: 0x40, CNTR_EVEN | CNTR_ODD }, |
1190 | [C(RESULT_MISS)] = { .event_id: 0x41, CNTR_EVEN | CNTR_ODD }, |
1191 | }, |
1192 | }, |
1193 | [C(BPU)] = { |
1194 | /* Conditional branches / mispredicted */ |
1195 | [C(OP_READ)] = { |
1196 | [C(RESULT_ACCESS)] = { .event_id: 0x15, CNTR_EVEN | CNTR_ODD }, |
1197 | [C(RESULT_MISS)] = { .event_id: 0x16, CNTR_EVEN | CNTR_ODD }, |
1198 | }, |
1199 | }, |
1200 | }; |
1201 | |
1202 | static const struct mips_perf_event loongson3_cache_map1 |
1203 | [PERF_COUNT_HW_CACHE_MAX] |
1204 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1205 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1206 | [C(L1D)] = { |
1207 | /* |
1208 | * Like some other architectures (e.g. ARM), the performance |
1209 | * counters don't differentiate between read and write |
1210 | * accesses/misses, so this isn't strictly correct, but it's the |
1211 | * best we can do. Writes and reads get combined. |
1212 | */ |
1213 | [C(OP_READ)] = { |
1214 | [C(RESULT_MISS)] = { .event_id: 0x04, CNTR_ODD }, |
1215 | }, |
1216 | [C(OP_WRITE)] = { |
1217 | [C(RESULT_MISS)] = { .event_id: 0x04, CNTR_ODD }, |
1218 | }, |
1219 | }, |
1220 | [C(L1I)] = { |
1221 | [C(OP_READ)] = { |
1222 | [C(RESULT_MISS)] = { .event_id: 0x04, CNTR_EVEN }, |
1223 | }, |
1224 | [C(OP_WRITE)] = { |
1225 | [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, |
1226 | }, |
1227 | }, |
1228 | [C(DTLB)] = { |
1229 | [C(OP_READ)] = { |
1230 | [C(RESULT_MISS)] = { .event_id: 0x09, CNTR_ODD }, |
1231 | }, |
1232 | [C(OP_WRITE)] = { |
1233 | [C(RESULT_MISS)] = { .event_id: 0x09, CNTR_ODD }, |
1234 | }, |
1235 | }, |
1236 | [C(ITLB)] = { |
1237 | [C(OP_READ)] = { |
1238 | [C(RESULT_MISS)] = { .event_id: 0x0c, CNTR_ODD }, |
1239 | }, |
1240 | [C(OP_WRITE)] = { |
1241 | [C(RESULT_MISS)] = { .event_id: 0x0c, CNTR_ODD }, |
1242 | }, |
1243 | }, |
1244 | [C(BPU)] = { |
1245 | /* Using the same code for *HW_BRANCH* */ |
1246 | [C(OP_READ)] = { |
1247 | [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN }, |
1248 | [C(RESULT_MISS)] = { .event_id: 0x01, CNTR_ODD }, |
1249 | }, |
1250 | [C(OP_WRITE)] = { |
1251 | [C(RESULT_ACCESS)] = { .event_id: 0x01, CNTR_EVEN }, |
1252 | [C(RESULT_MISS)] = { .event_id: 0x01, CNTR_ODD }, |
1253 | }, |
1254 | }, |
1255 | }; |
1256 | |
1257 | static const struct mips_perf_event loongson3_cache_map2 |
1258 | [PERF_COUNT_HW_CACHE_MAX] |
1259 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1260 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1261 | [C(L1D)] = { |
1262 | /* |
1263 | * Like some other architectures (e.g. ARM), the performance |
1264 | * counters don't differentiate between read and write |
1265 | * accesses/misses, so this isn't strictly correct, but it's the |
1266 | * best we can do. Writes and reads get combined. |
1267 | */ |
1268 | [C(OP_READ)] = { |
1269 | [C(RESULT_ACCESS)] = { .event_id: 0x156, CNTR_ALL }, |
1270 | }, |
1271 | [C(OP_WRITE)] = { |
1272 | [C(RESULT_ACCESS)] = { .event_id: 0x155, CNTR_ALL }, |
1273 | [C(RESULT_MISS)] = { .event_id: 0x153, CNTR_ALL }, |
1274 | }, |
1275 | }, |
1276 | [C(L1I)] = { |
1277 | [C(OP_READ)] = { |
1278 | [C(RESULT_MISS)] = { .event_id: 0x18, CNTR_ALL }, |
1279 | }, |
1280 | [C(OP_WRITE)] = { |
1281 | [C(RESULT_MISS)] = { .event_id: 0x18, CNTR_ALL }, |
1282 | }, |
1283 | }, |
1284 | [C(LL)] = { |
1285 | [C(OP_READ)] = { |
1286 | [C(RESULT_ACCESS)] = { .event_id: 0x1b6, CNTR_ALL }, |
1287 | }, |
1288 | [C(OP_WRITE)] = { |
1289 | [C(RESULT_ACCESS)] = { .event_id: 0x1b7, CNTR_ALL }, |
1290 | }, |
1291 | [C(OP_PREFETCH)] = { |
1292 | [C(RESULT_ACCESS)] = { .event_id: 0x1bf, CNTR_ALL }, |
1293 | }, |
1294 | }, |
1295 | [C(DTLB)] = { |
1296 | [C(OP_READ)] = { |
1297 | [C(RESULT_MISS)] = { .event_id: 0x92, CNTR_ALL }, |
1298 | }, |
1299 | [C(OP_WRITE)] = { |
1300 | [C(RESULT_MISS)] = { .event_id: 0x92, CNTR_ALL }, |
1301 | }, |
1302 | }, |
1303 | [C(ITLB)] = { |
1304 | [C(OP_READ)] = { |
1305 | [C(RESULT_MISS)] = { .event_id: 0x1a, CNTR_ALL }, |
1306 | }, |
1307 | [C(OP_WRITE)] = { |
1308 | [C(RESULT_MISS)] = { .event_id: 0x1a, CNTR_ALL }, |
1309 | }, |
1310 | }, |
1311 | [C(BPU)] = { |
1312 | /* Using the same code for *HW_BRANCH* */ |
1313 | [C(OP_READ)] = { |
1314 | [C(RESULT_ACCESS)] = { .event_id: 0x94, CNTR_ALL }, |
1315 | [C(RESULT_MISS)] = { .event_id: 0x9c, CNTR_ALL }, |
1316 | }, |
1317 | }, |
1318 | }; |
1319 | |
1320 | static const struct mips_perf_event loongson3_cache_map3 |
1321 | [PERF_COUNT_HW_CACHE_MAX] |
1322 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1323 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1324 | [C(L1D)] = { |
1325 | /* |
1326 | * Like some other architectures (e.g. ARM), the performance |
1327 | * counters don't differentiate between read and write |
1328 | * accesses/misses, so this isn't strictly correct, but it's the |
1329 | * best we can do. Writes and reads get combined. |
1330 | */ |
1331 | [C(OP_READ)] = { |
1332 | [C(RESULT_ACCESS)] = { .event_id: 0x1e, CNTR_ALL }, |
1333 | [C(RESULT_MISS)] = { .event_id: 0x1f, CNTR_ALL }, |
1334 | }, |
1335 | [C(OP_PREFETCH)] = { |
1336 | [C(RESULT_ACCESS)] = { .event_id: 0xaa, CNTR_ALL }, |
1337 | [C(RESULT_MISS)] = { .event_id: 0xa9, CNTR_ALL }, |
1338 | }, |
1339 | }, |
1340 | [C(L1I)] = { |
1341 | [C(OP_READ)] = { |
1342 | [C(RESULT_ACCESS)] = { .event_id: 0x1c, CNTR_ALL }, |
1343 | [C(RESULT_MISS)] = { .event_id: 0x1d, CNTR_ALL }, |
1344 | }, |
1345 | }, |
1346 | [C(LL)] = { |
1347 | [C(OP_READ)] = { |
1348 | [C(RESULT_ACCESS)] = { .event_id: 0x2e, CNTR_ALL }, |
1349 | [C(RESULT_MISS)] = { .event_id: 0x2f, CNTR_ALL }, |
1350 | }, |
1351 | }, |
1352 | [C(DTLB)] = { |
1353 | [C(OP_READ)] = { |
1354 | [C(RESULT_ACCESS)] = { .event_id: 0x14, CNTR_ALL }, |
1355 | [C(RESULT_MISS)] = { .event_id: 0x1b, CNTR_ALL }, |
1356 | }, |
1357 | }, |
1358 | [C(ITLB)] = { |
1359 | [C(OP_READ)] = { |
1360 | [C(RESULT_MISS)] = { .event_id: 0x1a, CNTR_ALL }, |
1361 | }, |
1362 | }, |
1363 | [C(BPU)] = { |
1364 | /* Using the same code for *HW_BRANCH* */ |
1365 | [C(OP_READ)] = { |
1366 | [C(RESULT_ACCESS)] = { .event_id: 0x02, CNTR_ALL }, |
1367 | [C(RESULT_MISS)] = { .event_id: 0x08, CNTR_ALL }, |
1368 | }, |
1369 | }, |
1370 | }; |
1371 | |
1372 | /* BMIPS5000 */ |
1373 | static const struct mips_perf_event bmips5000_cache_map |
1374 | [PERF_COUNT_HW_CACHE_MAX] |
1375 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1376 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1377 | [C(L1D)] = { |
1378 | /* |
1379 | * Like some other architectures (e.g. ARM), the performance |
1380 | * counters don't differentiate between read and write |
1381 | * accesses/misses, so this isn't strictly correct, but it's the |
1382 | * best we can do. Writes and reads get combined. |
1383 | */ |
1384 | [C(OP_READ)] = { |
1385 | [C(RESULT_ACCESS)] = { .event_id: 12, CNTR_EVEN, .range: T }, |
1386 | [C(RESULT_MISS)] = { .event_id: 12, CNTR_ODD, .range: T }, |
1387 | }, |
1388 | [C(OP_WRITE)] = { |
1389 | [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T }, |
1390 | [C(RESULT_MISS)] = { .event_id: 12, CNTR_ODD, .range: T }, |
1391 | }, |
1392 | }, |
1393 | [C(L1I)] = { |
1394 | [C(OP_READ)] = { |
1395 | [C(RESULT_ACCESS)] = { .event_id: 10, CNTR_EVEN, .range: T }, |
1396 | [C(RESULT_MISS)] = { .event_id: 10, CNTR_ODD, .range: T }, |
1397 | }, |
1398 | [C(OP_WRITE)] = { |
1399 | [C(RESULT_ACCESS)] = { .event_id: 10, CNTR_EVEN, .range: T }, |
1400 | [C(RESULT_MISS)] = { .event_id: 10, CNTR_ODD, .range: T }, |
1401 | }, |
1402 | [C(OP_PREFETCH)] = { |
1403 | [C(RESULT_ACCESS)] = { .event_id: 23, CNTR_EVEN, .range: T }, |
1404 | /* |
1405 | * Note that MIPS has only "hit" events countable for |
1406 | * the prefetch operation. |
1407 | */ |
1408 | }, |
1409 | }, |
1410 | [C(LL)] = { |
1411 | [C(OP_READ)] = { |
1412 | [C(RESULT_ACCESS)] = { .event_id: 28, CNTR_EVEN, .range: P }, |
1413 | [C(RESULT_MISS)] = { .event_id: 28, CNTR_ODD, .range: P }, |
1414 | }, |
1415 | [C(OP_WRITE)] = { |
1416 | [C(RESULT_ACCESS)] = { .event_id: 28, CNTR_EVEN, .range: P }, |
1417 | [C(RESULT_MISS)] = { .event_id: 28, CNTR_ODD, .range: P }, |
1418 | }, |
1419 | }, |
1420 | [C(BPU)] = { |
1421 | /* Using the same code for *HW_BRANCH* */ |
1422 | [C(OP_READ)] = { |
1423 | [C(RESULT_MISS)] = { .event_id: 0x02, CNTR_ODD, .range: T }, |
1424 | }, |
1425 | [C(OP_WRITE)] = { |
1426 | [C(RESULT_MISS)] = { .event_id: 0x02, CNTR_ODD, .range: T }, |
1427 | }, |
1428 | }, |
1429 | }; |
1430 | |
1431 | static const struct mips_perf_event octeon_cache_map |
1432 | [PERF_COUNT_HW_CACHE_MAX] |
1433 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1434 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
1435 | [C(L1D)] = { |
1436 | [C(OP_READ)] = { |
1437 | [C(RESULT_ACCESS)] = { .event_id: 0x2b, CNTR_ALL }, |
1438 | [C(RESULT_MISS)] = { .event_id: 0x2e, CNTR_ALL }, |
1439 | }, |
1440 | [C(OP_WRITE)] = { |
1441 | [C(RESULT_ACCESS)] = { .event_id: 0x30, CNTR_ALL }, |
1442 | }, |
1443 | }, |
1444 | [C(L1I)] = { |
1445 | [C(OP_READ)] = { |
1446 | [C(RESULT_ACCESS)] = { .event_id: 0x18, CNTR_ALL }, |
1447 | }, |
1448 | [C(OP_PREFETCH)] = { |
1449 | [C(RESULT_ACCESS)] = { .event_id: 0x19, CNTR_ALL }, |
1450 | }, |
1451 | }, |
1452 | [C(DTLB)] = { |
1453 | /* |
1454 | * Only general DTLB misses are counted use the same event for |
1455 | * read and write. |
1456 | */ |
1457 | [C(OP_READ)] = { |
1458 | [C(RESULT_MISS)] = { .event_id: 0x35, CNTR_ALL }, |
1459 | }, |
1460 | [C(OP_WRITE)] = { |
1461 | [C(RESULT_MISS)] = { .event_id: 0x35, CNTR_ALL }, |
1462 | }, |
1463 | }, |
1464 | [C(ITLB)] = { |
1465 | [C(OP_READ)] = { |
1466 | [C(RESULT_MISS)] = { .event_id: 0x37, CNTR_ALL }, |
1467 | }, |
1468 | }, |
1469 | }; |
1470 | |
1471 | static int __hw_perf_event_init(struct perf_event *event) |
1472 | { |
1473 | struct perf_event_attr *attr = &event->attr; |
1474 | struct hw_perf_event *hwc = &event->hw; |
1475 | const struct mips_perf_event *pev; |
1476 | int err; |
1477 | |
1478 | /* Returning MIPS event descriptor for generic perf event. */ |
1479 | if (PERF_TYPE_HARDWARE == event->attr.type) { |
1480 | if (event->attr.config >= PERF_COUNT_HW_MAX) |
1481 | return -EINVAL; |
1482 | pev = mipspmu_map_general_event(idx: event->attr.config); |
1483 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { |
1484 | pev = mipspmu_map_cache_event(config: event->attr.config); |
1485 | } else if (PERF_TYPE_RAW == event->attr.type) { |
1486 | /* We are working on the global raw event. */ |
1487 | mutex_lock(&raw_event_mutex); |
1488 | pev = mipspmu.map_raw_event(event->attr.config); |
1489 | } else { |
1490 | /* The event type is not (yet) supported. */ |
1491 | return -EOPNOTSUPP; |
1492 | } |
1493 | |
1494 | if (IS_ERR(ptr: pev)) { |
1495 | if (PERF_TYPE_RAW == event->attr.type) |
1496 | mutex_unlock(lock: &raw_event_mutex); |
1497 | return PTR_ERR(ptr: pev); |
1498 | } |
1499 | |
1500 | /* |
1501 | * We allow max flexibility on how each individual counter shared |
1502 | * by the single CPU operates (the mode exclusion and the range). |
1503 | */ |
1504 | hwc->config_base = MIPS_PERFCTRL_IE; |
1505 | |
1506 | hwc->event_base = mipspmu_perf_event_encode(pev); |
1507 | if (PERF_TYPE_RAW == event->attr.type) |
1508 | mutex_unlock(lock: &raw_event_mutex); |
1509 | |
1510 | if (!attr->exclude_user) |
1511 | hwc->config_base |= MIPS_PERFCTRL_U; |
1512 | if (!attr->exclude_kernel) { |
1513 | hwc->config_base |= MIPS_PERFCTRL_K; |
1514 | /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */ |
1515 | hwc->config_base |= MIPS_PERFCTRL_EXL; |
1516 | } |
1517 | if (!attr->exclude_hv) |
1518 | hwc->config_base |= MIPS_PERFCTRL_S; |
1519 | |
1520 | hwc->config_base &= M_PERFCTL_CONFIG_MASK; |
1521 | /* |
1522 | * The event can belong to another cpu. We do not assign a local |
1523 | * counter for it for now. |
1524 | */ |
1525 | hwc->idx = -1; |
1526 | hwc->config = 0; |
1527 | |
1528 | if (!hwc->sample_period) { |
1529 | hwc->sample_period = mipspmu.max_period; |
1530 | hwc->last_period = hwc->sample_period; |
1531 | local64_set(&hwc->period_left, hwc->sample_period); |
1532 | } |
1533 | |
1534 | err = 0; |
1535 | if (event->group_leader != event) |
1536 | err = validate_group(event); |
1537 | |
1538 | event->destroy = hw_perf_event_destroy; |
1539 | |
1540 | if (err) |
1541 | event->destroy(event); |
1542 | |
1543 | return err; |
1544 | } |
1545 | |
1546 | static void pause_local_counters(void) |
1547 | { |
1548 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1549 | int ctr = mipspmu.num_counters; |
1550 | unsigned long flags; |
1551 | |
1552 | local_irq_save(flags); |
1553 | do { |
1554 | ctr--; |
1555 | cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(idx: ctr); |
1556 | mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & |
1557 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); |
1558 | } while (ctr > 0); |
1559 | local_irq_restore(flags); |
1560 | } |
1561 | |
1562 | static void resume_local_counters(void) |
1563 | { |
1564 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1565 | int ctr = mipspmu.num_counters; |
1566 | |
1567 | do { |
1568 | ctr--; |
1569 | mipsxx_pmu_write_control(idx: ctr, val: cpuc->saved_ctrl[ctr]); |
1570 | } while (ctr > 0); |
1571 | } |
1572 | |
1573 | static int mipsxx_pmu_handle_shared_irq(void) |
1574 | { |
1575 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1576 | struct perf_sample_data data; |
1577 | unsigned int counters = mipspmu.num_counters; |
1578 | u64 counter; |
1579 | int n, handled = IRQ_NONE; |
1580 | struct pt_regs *regs; |
1581 | |
1582 | if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI)) |
1583 | return handled; |
1584 | /* |
1585 | * First we pause the local counters, so that when we are locked |
1586 | * here, the counters are all paused. When it gets locked due to |
1587 | * perf_disable(), the timer interrupt handler will be delayed. |
1588 | * |
1589 | * See also mipsxx_pmu_start(). |
1590 | */ |
1591 | pause_local_counters(); |
1592 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS |
1593 | read_lock(&pmuint_rwlock); |
1594 | #endif |
1595 | |
1596 | regs = get_irq_regs(); |
1597 | |
1598 | perf_sample_data_init(data: &data, addr: 0, period: 0); |
1599 | |
1600 | for (n = counters - 1; n >= 0; n--) { |
1601 | if (!test_bit(n, cpuc->used_mask)) |
1602 | continue; |
1603 | |
1604 | counter = mipspmu.read_counter(n); |
1605 | if (!(counter & mipspmu.overflow)) |
1606 | continue; |
1607 | |
1608 | handle_associated_event(cpuc, idx: n, data: &data, regs); |
1609 | handled = IRQ_HANDLED; |
1610 | } |
1611 | |
1612 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS |
1613 | read_unlock(&pmuint_rwlock); |
1614 | #endif |
1615 | resume_local_counters(); |
1616 | |
1617 | /* |
1618 | * Do all the work for the pending perf events. We can do this |
1619 | * in here because the performance counter interrupt is a regular |
1620 | * interrupt, not NMI. |
1621 | */ |
1622 | if (handled == IRQ_HANDLED) |
1623 | irq_work_run(); |
1624 | |
1625 | return handled; |
1626 | } |
1627 | |
1628 | static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) |
1629 | { |
1630 | return mipsxx_pmu_handle_shared_irq(); |
1631 | } |
1632 | |
1633 | /* 24K */ |
1634 | #define IS_BOTH_COUNTERS_24K_EVENT(b) \ |
1635 | ((b) == 0 || (b) == 1 || (b) == 11) |
1636 | |
1637 | /* 34K */ |
1638 | #define IS_BOTH_COUNTERS_34K_EVENT(b) \ |
1639 | ((b) == 0 || (b) == 1 || (b) == 11) |
1640 | #ifdef CONFIG_MIPS_MT_SMP |
1641 | #define IS_RANGE_P_34K_EVENT(r, b) \ |
1642 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ |
1643 | (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \ |
1644 | (r) == 176 || ((b) >= 50 && (b) <= 55) || \ |
1645 | ((b) >= 64 && (b) <= 67)) |
1646 | #define IS_RANGE_V_34K_EVENT(r) ((r) == 47) |
1647 | #endif |
1648 | |
1649 | /* 74K */ |
1650 | #define IS_BOTH_COUNTERS_74K_EVENT(b) \ |
1651 | ((b) == 0 || (b) == 1) |
1652 | |
1653 | /* proAptiv */ |
1654 | #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \ |
1655 | ((b) == 0 || (b) == 1) |
1656 | /* P5600 */ |
1657 | #define IS_BOTH_COUNTERS_P5600_EVENT(b) \ |
1658 | ((b) == 0 || (b) == 1) |
1659 | |
1660 | /* 1004K */ |
1661 | #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ |
1662 | ((b) == 0 || (b) == 1 || (b) == 11) |
1663 | #ifdef CONFIG_MIPS_MT_SMP |
1664 | #define IS_RANGE_P_1004K_EVENT(r, b) \ |
1665 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ |
1666 | (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \ |
1667 | (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \ |
1668 | (r) == 188 || (b) == 61 || (b) == 62 || \ |
1669 | ((b) >= 64 && (b) <= 67)) |
1670 | #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) |
1671 | #endif |
1672 | |
1673 | /* interAptiv */ |
1674 | #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \ |
1675 | ((b) == 0 || (b) == 1 || (b) == 11) |
1676 | #ifdef CONFIG_MIPS_MT_SMP |
1677 | /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */ |
1678 | #define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \ |
1679 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ |
1680 | (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \ |
1681 | (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \ |
1682 | (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \ |
1683 | ((b) >= 64 && (b) <= 67)) |
1684 | #define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175) |
1685 | #endif |
1686 | |
1687 | /* BMIPS5000 */ |
1688 | #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \ |
1689 | ((b) == 0 || (b) == 1) |
1690 | |
1691 | |
1692 | /* |
1693 | * For most cores the user can use 0-255 raw events, where 0-127 for the events |
1694 | * of even counters, and 128-255 for odd counters. Note that bit 7 is used to |
1695 | * indicate the even/odd bank selector. So, for example, when user wants to take |
1696 | * the Event Num of 15 for odd counters (by referring to the user manual), then |
1697 | * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F) |
1698 | * to be used. |
1699 | * |
1700 | * Some newer cores have even more events, in which case the user can use raw |
1701 | * events 0-511, where 0-255 are for the events of even counters, and 256-511 |
1702 | * are for odd counters, so bit 8 is used to indicate the even/odd bank selector. |
1703 | */ |
1704 | static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) |
1705 | { |
1706 | /* currently most cores have 7-bit event numbers */ |
1707 | int pmu_type; |
1708 | unsigned int raw_id = config & 0xff; |
1709 | unsigned int base_id = raw_id & 0x7f; |
1710 | |
1711 | switch (current_cpu_type()) { |
1712 | case CPU_24K: |
1713 | if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) |
1714 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1715 | else |
1716 | raw_event.cntr_mask = |
1717 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1718 | #ifdef CONFIG_MIPS_MT_SMP |
1719 | /* |
1720 | * This is actually doing nothing. Non-multithreading |
1721 | * CPUs will not check and calculate the range. |
1722 | */ |
1723 | raw_event.range = P; |
1724 | #endif |
1725 | break; |
1726 | case CPU_34K: |
1727 | if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) |
1728 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1729 | else |
1730 | raw_event.cntr_mask = |
1731 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1732 | #ifdef CONFIG_MIPS_MT_SMP |
1733 | if (IS_RANGE_P_34K_EVENT(raw_id, base_id)) |
1734 | raw_event.range = P; |
1735 | else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id))) |
1736 | raw_event.range = V; |
1737 | else |
1738 | raw_event.range = T; |
1739 | #endif |
1740 | break; |
1741 | case CPU_74K: |
1742 | case CPU_1074K: |
1743 | if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) |
1744 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1745 | else |
1746 | raw_event.cntr_mask = |
1747 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1748 | #ifdef CONFIG_MIPS_MT_SMP |
1749 | raw_event.range = P; |
1750 | #endif |
1751 | break; |
1752 | case CPU_PROAPTIV: |
1753 | if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id)) |
1754 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1755 | else |
1756 | raw_event.cntr_mask = |
1757 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1758 | #ifdef CONFIG_MIPS_MT_SMP |
1759 | raw_event.range = P; |
1760 | #endif |
1761 | break; |
1762 | case CPU_P5600: |
1763 | case CPU_P6600: |
1764 | /* 8-bit event numbers */ |
1765 | raw_id = config & 0x1ff; |
1766 | base_id = raw_id & 0xff; |
1767 | if (IS_BOTH_COUNTERS_P5600_EVENT(base_id)) |
1768 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1769 | else |
1770 | raw_event.cntr_mask = |
1771 | raw_id > 255 ? CNTR_ODD : CNTR_EVEN; |
1772 | #ifdef CONFIG_MIPS_MT_SMP |
1773 | raw_event.range = P; |
1774 | #endif |
1775 | break; |
1776 | case CPU_I6400: |
1777 | case CPU_I6500: |
1778 | /* 8-bit event numbers */ |
1779 | base_id = config & 0xff; |
1780 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1781 | break; |
1782 | case CPU_1004K: |
1783 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) |
1784 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1785 | else |
1786 | raw_event.cntr_mask = |
1787 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1788 | #ifdef CONFIG_MIPS_MT_SMP |
1789 | if (IS_RANGE_P_1004K_EVENT(raw_id, base_id)) |
1790 | raw_event.range = P; |
1791 | else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id))) |
1792 | raw_event.range = V; |
1793 | else |
1794 | raw_event.range = T; |
1795 | #endif |
1796 | break; |
1797 | case CPU_INTERAPTIV: |
1798 | if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id)) |
1799 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1800 | else |
1801 | raw_event.cntr_mask = |
1802 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1803 | #ifdef CONFIG_MIPS_MT_SMP |
1804 | if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id)) |
1805 | raw_event.range = P; |
1806 | else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id))) |
1807 | raw_event.range = V; |
1808 | else |
1809 | raw_event.range = T; |
1810 | #endif |
1811 | break; |
1812 | case CPU_BMIPS5000: |
1813 | if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id)) |
1814 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1815 | else |
1816 | raw_event.cntr_mask = |
1817 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1818 | break; |
1819 | case CPU_LOONGSON64: |
1820 | pmu_type = get_loongson3_pmu_type(); |
1821 | |
1822 | switch (pmu_type) { |
1823 | case LOONGSON_PMU_TYPE1: |
1824 | raw_event.cntr_mask = |
1825 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; |
1826 | break; |
1827 | case LOONGSON_PMU_TYPE2: |
1828 | base_id = config & 0x3ff; |
1829 | raw_event.cntr_mask = CNTR_ALL; |
1830 | |
1831 | if ((base_id >= 1 && base_id < 28) || |
1832 | (base_id >= 64 && base_id < 90) || |
1833 | (base_id >= 128 && base_id < 164) || |
1834 | (base_id >= 192 && base_id < 200) || |
1835 | (base_id >= 256 && base_id < 275) || |
1836 | (base_id >= 320 && base_id < 361) || |
1837 | (base_id >= 384 && base_id < 574)) |
1838 | break; |
1839 | |
1840 | return ERR_PTR(error: -EOPNOTSUPP); |
1841 | case LOONGSON_PMU_TYPE3: |
1842 | base_id = raw_id; |
1843 | raw_event.cntr_mask = CNTR_ALL; |
1844 | break; |
1845 | } |
1846 | break; |
1847 | } |
1848 | |
1849 | raw_event.event_id = base_id; |
1850 | |
1851 | return &raw_event; |
1852 | } |
1853 | |
1854 | static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) |
1855 | { |
1856 | unsigned int base_id = config & 0x7f; |
1857 | unsigned int event_max; |
1858 | |
1859 | |
1860 | raw_event.cntr_mask = CNTR_ALL; |
1861 | raw_event.event_id = base_id; |
1862 | |
1863 | if (current_cpu_type() == CPU_CAVIUM_OCTEON3) |
1864 | event_max = 0x5f; |
1865 | else if (current_cpu_type() == CPU_CAVIUM_OCTEON2) |
1866 | event_max = 0x42; |
1867 | else |
1868 | event_max = 0x3a; |
1869 | |
1870 | if (base_id > event_max) { |
1871 | return ERR_PTR(error: -EOPNOTSUPP); |
1872 | } |
1873 | |
1874 | switch (base_id) { |
1875 | case 0x00: |
1876 | case 0x0f: |
1877 | case 0x1e: |
1878 | case 0x1f: |
1879 | case 0x2f: |
1880 | case 0x34: |
1881 | case 0x3e ... 0x3f: |
1882 | return ERR_PTR(error: -EOPNOTSUPP); |
1883 | default: |
1884 | break; |
1885 | } |
1886 | |
1887 | return &raw_event; |
1888 | } |
1889 | |
1890 | static int __init |
1891 | init_hw_perf_events(void) |
1892 | { |
1893 | int counters, irq, pmu_type; |
1894 | |
1895 | pr_info("Performance counters: " ); |
1896 | |
1897 | counters = n_counters(); |
1898 | if (counters == 0) { |
1899 | pr_cont("No available PMU.\n" ); |
1900 | return -ENODEV; |
1901 | } |
1902 | |
1903 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS |
1904 | if (!cpu_has_mipsmt_pertccounters) |
1905 | counters = counters_total_to_per_cpu(counters); |
1906 | #endif |
1907 | |
1908 | if (get_c0_perfcount_int) |
1909 | irq = get_c0_perfcount_int(); |
1910 | else if (cp0_perfcount_irq >= 0) |
1911 | irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; |
1912 | else |
1913 | irq = -1; |
1914 | |
1915 | mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; |
1916 | |
1917 | switch (current_cpu_type()) { |
1918 | case CPU_24K: |
1919 | mipspmu.name = "mips/24K" ; |
1920 | mipspmu.general_event_map = &mipsxxcore_event_map; |
1921 | mipspmu.cache_event_map = &mipsxxcore_cache_map; |
1922 | break; |
1923 | case CPU_34K: |
1924 | mipspmu.name = "mips/34K" ; |
1925 | mipspmu.general_event_map = &mipsxxcore_event_map; |
1926 | mipspmu.cache_event_map = &mipsxxcore_cache_map; |
1927 | break; |
1928 | case CPU_74K: |
1929 | mipspmu.name = "mips/74K" ; |
1930 | mipspmu.general_event_map = &mipsxxcore_event_map2; |
1931 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; |
1932 | break; |
1933 | case CPU_PROAPTIV: |
1934 | mipspmu.name = "mips/proAptiv" ; |
1935 | mipspmu.general_event_map = &mipsxxcore_event_map2; |
1936 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; |
1937 | break; |
1938 | case CPU_P5600: |
1939 | mipspmu.name = "mips/P5600" ; |
1940 | mipspmu.general_event_map = &mipsxxcore_event_map2; |
1941 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; |
1942 | break; |
1943 | case CPU_P6600: |
1944 | mipspmu.name = "mips/P6600" ; |
1945 | mipspmu.general_event_map = &mipsxxcore_event_map2; |
1946 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; |
1947 | break; |
1948 | case CPU_I6400: |
1949 | mipspmu.name = "mips/I6400" ; |
1950 | mipspmu.general_event_map = &i6x00_event_map; |
1951 | mipspmu.cache_event_map = &i6x00_cache_map; |
1952 | break; |
1953 | case CPU_I6500: |
1954 | mipspmu.name = "mips/I6500" ; |
1955 | mipspmu.general_event_map = &i6x00_event_map; |
1956 | mipspmu.cache_event_map = &i6x00_cache_map; |
1957 | break; |
1958 | case CPU_1004K: |
1959 | mipspmu.name = "mips/1004K" ; |
1960 | mipspmu.general_event_map = &mipsxxcore_event_map; |
1961 | mipspmu.cache_event_map = &mipsxxcore_cache_map; |
1962 | break; |
1963 | case CPU_1074K: |
1964 | mipspmu.name = "mips/1074K" ; |
1965 | mipspmu.general_event_map = &mipsxxcore_event_map; |
1966 | mipspmu.cache_event_map = &mipsxxcore_cache_map; |
1967 | break; |
1968 | case CPU_INTERAPTIV: |
1969 | mipspmu.name = "mips/interAptiv" ; |
1970 | mipspmu.general_event_map = &mipsxxcore_event_map; |
1971 | mipspmu.cache_event_map = &mipsxxcore_cache_map; |
1972 | break; |
1973 | case CPU_LOONGSON32: |
1974 | mipspmu.name = "mips/loongson1" ; |
1975 | mipspmu.general_event_map = &mipsxxcore_event_map; |
1976 | mipspmu.cache_event_map = &mipsxxcore_cache_map; |
1977 | break; |
1978 | case CPU_LOONGSON64: |
1979 | mipspmu.name = "mips/loongson3" ; |
1980 | pmu_type = get_loongson3_pmu_type(); |
1981 | |
1982 | switch (pmu_type) { |
1983 | case LOONGSON_PMU_TYPE1: |
1984 | counters = 2; |
1985 | mipspmu.general_event_map = &loongson3_event_map1; |
1986 | mipspmu.cache_event_map = &loongson3_cache_map1; |
1987 | break; |
1988 | case LOONGSON_PMU_TYPE2: |
1989 | counters = 4; |
1990 | mipspmu.general_event_map = &loongson3_event_map2; |
1991 | mipspmu.cache_event_map = &loongson3_cache_map2; |
1992 | break; |
1993 | case LOONGSON_PMU_TYPE3: |
1994 | counters = 4; |
1995 | mipspmu.general_event_map = &loongson3_event_map3; |
1996 | mipspmu.cache_event_map = &loongson3_cache_map3; |
1997 | break; |
1998 | } |
1999 | break; |
2000 | case CPU_CAVIUM_OCTEON: |
2001 | case CPU_CAVIUM_OCTEON_PLUS: |
2002 | case CPU_CAVIUM_OCTEON2: |
2003 | case CPU_CAVIUM_OCTEON3: |
2004 | mipspmu.name = "octeon" ; |
2005 | mipspmu.general_event_map = &octeon_event_map; |
2006 | mipspmu.cache_event_map = &octeon_cache_map; |
2007 | mipspmu.map_raw_event = octeon_pmu_map_raw_event; |
2008 | break; |
2009 | case CPU_BMIPS5000: |
2010 | mipspmu.name = "BMIPS5000" ; |
2011 | mipspmu.general_event_map = &bmips5000_event_map; |
2012 | mipspmu.cache_event_map = &bmips5000_cache_map; |
2013 | break; |
2014 | default: |
2015 | pr_cont("Either hardware does not support performance " |
2016 | "counters, or not yet implemented.\n" ); |
2017 | return -ENODEV; |
2018 | } |
2019 | |
2020 | mipspmu.num_counters = counters; |
2021 | mipspmu.irq = irq; |
2022 | |
2023 | if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) { |
2024 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) { |
2025 | counter_bits = 48; |
2026 | mipspmu.max_period = (1ULL << 47) - 1; |
2027 | mipspmu.valid_count = (1ULL << 47) - 1; |
2028 | mipspmu.overflow = 1ULL << 47; |
2029 | } else { |
2030 | counter_bits = 64; |
2031 | mipspmu.max_period = (1ULL << 63) - 1; |
2032 | mipspmu.valid_count = (1ULL << 63) - 1; |
2033 | mipspmu.overflow = 1ULL << 63; |
2034 | } |
2035 | mipspmu.read_counter = mipsxx_pmu_read_counter_64; |
2036 | mipspmu.write_counter = mipsxx_pmu_write_counter_64; |
2037 | } else { |
2038 | counter_bits = 32; |
2039 | mipspmu.max_period = (1ULL << 31) - 1; |
2040 | mipspmu.valid_count = (1ULL << 31) - 1; |
2041 | mipspmu.overflow = 1ULL << 31; |
2042 | mipspmu.read_counter = mipsxx_pmu_read_counter; |
2043 | mipspmu.write_counter = mipsxx_pmu_write_counter; |
2044 | } |
2045 | |
2046 | on_each_cpu(func: reset_counters, info: (void *)(long)counters, wait: 1); |
2047 | |
2048 | pr_cont("%s PMU enabled, %d %d-bit counters available to each " |
2049 | "CPU, irq %d%s\n" , mipspmu.name, counters, counter_bits, irq, |
2050 | irq < 0 ? " (share with timer interrupt)" : "" ); |
2051 | |
2052 | perf_pmu_register(pmu: &pmu, name: "cpu" , type: PERF_TYPE_RAW); |
2053 | |
2054 | return 0; |
2055 | } |
2056 | early_initcall(init_hw_perf_events); |
2057 | |