1 | /* |
2 | * Performance events x86 architecture header |
3 | * |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian |
11 | * |
12 | * For licencing details see kernel-base/COPYING |
13 | */ |
14 | |
15 | #include <linux/perf_event.h> |
16 | |
17 | #include <asm/fpu/xstate.h> |
18 | #include <asm/intel_ds.h> |
19 | #include <asm/cpu.h> |
20 | |
21 | /* To enable MSR tracing please use the generic trace points. */ |
22 | |
23 | /* |
24 | * | NHM/WSM | SNB | |
25 | * register ------------------------------- |
26 | * | HT | no HT | HT | no HT | |
27 | *----------------------------------------- |
28 | * offcore | core | core | cpu | core | |
29 | * lbr_sel | core | core | cpu | core | |
30 | * ld_lat | cpu | core | cpu | core | |
31 | *----------------------------------------- |
32 | * |
33 | * Given that there is a small number of shared regs, |
34 | * we can pre-allocate their slot in the per-cpu |
35 | * per-core reg tables. |
36 | */ |
37 | enum { |
38 | = -1, /* not used */ |
39 | |
40 | = 0, /* offcore_response_0 */ |
41 | = 1, /* offcore_response_1 */ |
42 | = 2, /* lbr_select */ |
43 | = 3, /* ld_lat_threshold */ |
44 | = 4, /* fe_* */ |
45 | = 5, /* snoop response 0 */ |
46 | = 6, /* snoop response 1 */ |
47 | |
48 | /* number of entries needed */ |
49 | }; |
50 | |
51 | struct event_constraint { |
52 | union { |
53 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
54 | u64 idxmsk64; |
55 | }; |
56 | u64 code; |
57 | u64 cmask; |
58 | int weight; |
59 | int overlap; |
60 | int flags; |
61 | unsigned int size; |
62 | }; |
63 | |
64 | static inline bool constraint_match(struct event_constraint *c, u64 ecode) |
65 | { |
66 | return ((ecode & c->cmask) - c->code) <= (u64)c->size; |
67 | } |
68 | |
69 | #define PERF_ARCH(name, val) \ |
70 | PERF_X86_EVENT_##name = val, |
71 | |
72 | /* |
73 | * struct hw_perf_event.flags flags |
74 | */ |
75 | enum { |
76 | #include "perf_event_flags.h" |
77 | }; |
78 | |
79 | #undef PERF_ARCH |
80 | |
81 | #define PERF_ARCH(name, val) \ |
82 | static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) == \ |
83 | PERF_X86_EVENT_##name); |
84 | |
85 | #include "perf_event_flags.h" |
86 | |
87 | #undef PERF_ARCH |
88 | |
89 | static inline bool is_topdown_count(struct perf_event *event) |
90 | { |
91 | return event->hw.flags & PERF_X86_EVENT_TOPDOWN; |
92 | } |
93 | |
94 | static inline bool is_metric_event(struct perf_event *event) |
95 | { |
96 | u64 config = event->attr.config; |
97 | |
98 | return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && |
99 | ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && |
100 | ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); |
101 | } |
102 | |
103 | static inline bool is_slots_event(struct perf_event *event) |
104 | { |
105 | return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; |
106 | } |
107 | |
108 | static inline bool is_topdown_event(struct perf_event *event) |
109 | { |
110 | return is_metric_event(event) || is_slots_event(event); |
111 | } |
112 | |
113 | static inline bool is_branch_counters_group(struct perf_event *event) |
114 | { |
115 | return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; |
116 | } |
117 | |
118 | struct amd_nb { |
119 | int nb_id; /* NorthBridge id */ |
120 | int refcnt; /* reference count */ |
121 | struct perf_event *owners[X86_PMC_IDX_MAX]; |
122 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
123 | }; |
124 | |
125 | #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) |
126 | #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60) |
127 | #define PEBS_OUTPUT_OFFSET 61 |
128 | #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET) |
129 | #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET) |
130 | #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD) |
131 | |
132 | /* |
133 | * Flags PEBS can handle without an PMI. |
134 | * |
135 | * TID can only be handled by flushing at context switch. |
136 | * REGS_USER can be handled for events limited to ring 3. |
137 | * |
138 | */ |
139 | #define LARGE_PEBS_FLAGS \ |
140 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ |
141 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ |
142 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ |
143 | PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ |
144 | PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ |
145 | PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE | \ |
146 | PERF_SAMPLE_WEIGHT_TYPE) |
147 | |
148 | #define PEBS_GP_REGS \ |
149 | ((1ULL << PERF_REG_X86_AX) | \ |
150 | (1ULL << PERF_REG_X86_BX) | \ |
151 | (1ULL << PERF_REG_X86_CX) | \ |
152 | (1ULL << PERF_REG_X86_DX) | \ |
153 | (1ULL << PERF_REG_X86_DI) | \ |
154 | (1ULL << PERF_REG_X86_SI) | \ |
155 | (1ULL << PERF_REG_X86_SP) | \ |
156 | (1ULL << PERF_REG_X86_BP) | \ |
157 | (1ULL << PERF_REG_X86_IP) | \ |
158 | (1ULL << PERF_REG_X86_FLAGS) | \ |
159 | (1ULL << PERF_REG_X86_R8) | \ |
160 | (1ULL << PERF_REG_X86_R9) | \ |
161 | (1ULL << PERF_REG_X86_R10) | \ |
162 | (1ULL << PERF_REG_X86_R11) | \ |
163 | (1ULL << PERF_REG_X86_R12) | \ |
164 | (1ULL << PERF_REG_X86_R13) | \ |
165 | (1ULL << PERF_REG_X86_R14) | \ |
166 | (1ULL << PERF_REG_X86_R15)) |
167 | |
168 | /* |
169 | * Per register state. |
170 | */ |
171 | struct er_account { |
172 | raw_spinlock_t lock; /* per-core: protect structure */ |
173 | u64 config; /* extra MSR config */ |
174 | u64 reg; /* extra MSR number */ |
175 | atomic_t ref; /* reference count */ |
176 | }; |
177 | |
178 | /* |
179 | * Per core/cpu state |
180 | * |
181 | * Used to coordinate shared registers between HT threads or |
182 | * among events on a single PMU. |
183 | */ |
184 | struct intel_shared_regs { |
185 | struct er_account regs[EXTRA_REG_MAX]; |
186 | int refcnt; /* per-core: #HT threads */ |
187 | unsigned core_id; /* per-core: core id */ |
188 | }; |
189 | |
190 | enum intel_excl_state_type { |
191 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ |
192 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ |
193 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ |
194 | }; |
195 | |
196 | struct intel_excl_states { |
197 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; |
198 | bool sched_started; /* true if scheduling has started */ |
199 | }; |
200 | |
201 | struct intel_excl_cntrs { |
202 | raw_spinlock_t lock; |
203 | |
204 | struct intel_excl_states states[2]; |
205 | |
206 | union { |
207 | u16 has_exclusive[2]; |
208 | u32 exclusive_present; |
209 | }; |
210 | |
211 | int refcnt; /* per-core: #HT threads */ |
212 | unsigned core_id; /* per-core: core id */ |
213 | }; |
214 | |
215 | struct x86_perf_task_context; |
216 | #define MAX_LBR_ENTRIES 32 |
217 | |
218 | enum { |
219 | LBR_FORMAT_32 = 0x00, |
220 | LBR_FORMAT_LIP = 0x01, |
221 | LBR_FORMAT_EIP = 0x02, |
222 | LBR_FORMAT_EIP_FLAGS = 0x03, |
223 | LBR_FORMAT_EIP_FLAGS2 = 0x04, |
224 | LBR_FORMAT_INFO = 0x05, |
225 | LBR_FORMAT_TIME = 0x06, |
226 | LBR_FORMAT_INFO2 = 0x07, |
227 | LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO2, |
228 | }; |
229 | |
230 | enum { |
231 | X86_PERF_KFREE_SHARED = 0, |
232 | X86_PERF_KFREE_EXCL = 1, |
233 | X86_PERF_KFREE_MAX |
234 | }; |
235 | |
236 | struct cpu_hw_events { |
237 | /* |
238 | * Generic x86 PMC bits |
239 | */ |
240 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
241 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
242 | unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
243 | int enabled; |
244 | |
245 | int n_events; /* the # of events in the below arrays */ |
246 | int n_added; /* the # last events in the below arrays; |
247 | they've never been enabled yet */ |
248 | int n_txn; /* the # last events in the below arrays; |
249 | added in the current transaction */ |
250 | int n_txn_pair; |
251 | int n_txn_metric; |
252 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
253 | u64 tags[X86_PMC_IDX_MAX]; |
254 | |
255 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
256 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; |
257 | |
258 | int n_excl; /* the number of exclusive events */ |
259 | |
260 | unsigned int txn_flags; |
261 | int is_fake; |
262 | |
263 | /* |
264 | * Intel DebugStore bits |
265 | */ |
266 | struct debug_store *ds; |
267 | void *ds_pebs_vaddr; |
268 | void *ds_bts_vaddr; |
269 | u64 pebs_enabled; |
270 | int n_pebs; |
271 | int n_large_pebs; |
272 | int n_pebs_via_pt; |
273 | int pebs_output; |
274 | |
275 | /* Current super set of events hardware configuration */ |
276 | u64 pebs_data_cfg; |
277 | u64 active_pebs_data_cfg; |
278 | int pebs_record_size; |
279 | |
280 | /* Intel Fixed counter configuration */ |
281 | u64 fixed_ctrl_val; |
282 | u64 active_fixed_ctrl_val; |
283 | |
284 | /* |
285 | * Intel LBR bits |
286 | */ |
287 | int lbr_users; |
288 | int lbr_pebs_users; |
289 | struct perf_branch_stack lbr_stack; |
290 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
291 | u64 lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */ |
292 | union { |
293 | struct er_account *lbr_sel; |
294 | struct er_account *lbr_ctl; |
295 | }; |
296 | u64 br_sel; |
297 | void *last_task_ctx; |
298 | int last_log_id; |
299 | int lbr_select; |
300 | void *lbr_xsave; |
301 | |
302 | /* |
303 | * Intel host/guest exclude bits |
304 | */ |
305 | u64 intel_ctrl_guest_mask; |
306 | u64 intel_ctrl_host_mask; |
307 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; |
308 | |
309 | /* |
310 | * Intel checkpoint mask |
311 | */ |
312 | u64 intel_cp_status; |
313 | |
314 | /* |
315 | * manage shared (per-core, per-cpu) registers |
316 | * used on Intel NHM/WSM/SNB |
317 | */ |
318 | struct intel_shared_regs *shared_regs; |
319 | /* |
320 | * manage exclusive counter access between hyperthread |
321 | */ |
322 | struct event_constraint *constraint_list; /* in enable order */ |
323 | struct intel_excl_cntrs *excl_cntrs; |
324 | int excl_thread_id; /* 0 or 1 */ |
325 | |
326 | /* |
327 | * SKL TSX_FORCE_ABORT shadow |
328 | */ |
329 | u64 tfa_shadow; |
330 | |
331 | /* |
332 | * Perf Metrics |
333 | */ |
334 | /* number of accepted metrics events */ |
335 | int n_metric; |
336 | |
337 | /* |
338 | * AMD specific bits |
339 | */ |
340 | struct amd_nb *amd_nb; |
341 | int brs_active; /* BRS is enabled */ |
342 | |
343 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ |
344 | u64 perf_ctr_virt_mask; |
345 | int n_pair; /* Large increment events */ |
346 | |
347 | void *kfree_on_online[X86_PERF_KFREE_MAX]; |
348 | |
349 | struct pmu *pmu; |
350 | }; |
351 | |
352 | #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ |
353 | { .idxmsk64 = (n) }, \ |
354 | .code = (c), \ |
355 | .size = (e) - (c), \ |
356 | .cmask = (m), \ |
357 | .weight = (w), \ |
358 | .overlap = (o), \ |
359 | .flags = f, \ |
360 | } |
361 | |
362 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ |
363 | __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) |
364 | |
365 | #define EVENT_CONSTRAINT(c, n, m) \ |
366 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
367 | |
368 | /* |
369 | * The constraint_match() function only works for 'simple' event codes |
370 | * and not for extended (AMD64_EVENTSEL_EVENT) events codes. |
371 | */ |
372 | #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ |
373 | __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) |
374 | |
375 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ |
376 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ |
377 | 0, PERF_X86_EVENT_EXCL) |
378 | |
379 | /* |
380 | * The overlap flag marks event constraints with overlapping counter |
381 | * masks. This is the case if the counter mask of such an event is not |
382 | * a subset of any other counter mask of a constraint with an equal or |
383 | * higher weight, e.g.: |
384 | * |
385 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
386 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); |
387 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); |
388 | * |
389 | * The event scheduler may not select the correct counter in the first |
390 | * cycle because it needs to know which subsequent events will be |
391 | * scheduled. It may fail to schedule the events then. So we set the |
392 | * overlap flag for such constraints to give the scheduler a hint which |
393 | * events to select for counter rescheduling. |
394 | * |
395 | * Care must be taken as the rescheduling algorithm is O(n!) which |
396 | * will increase scheduling cycles for an over-committed system |
397 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros |
398 | * and its counter masks must be kept at a minimum. |
399 | */ |
400 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ |
401 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
402 | |
403 | /* |
404 | * Constraint on the Event code. |
405 | */ |
406 | #define INTEL_EVENT_CONSTRAINT(c, n) \ |
407 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) |
408 | |
409 | /* |
410 | * Constraint on a range of Event codes |
411 | */ |
412 | #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
413 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) |
414 | |
415 | /* |
416 | * Constraint on the Event code + UMask + fixed-mask |
417 | * |
418 | * filter mask to validate fixed counter events. |
419 | * the following filters disqualify for fixed counters: |
420 | * - inv |
421 | * - edge |
422 | * - cnt-mask |
423 | * - in_tx |
424 | * - in_tx_checkpointed |
425 | * The other filters are supported by fixed counters. |
426 | * The any-thread option is supported starting with v3. |
427 | */ |
428 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) |
429 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
430 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
431 | |
432 | /* |
433 | * The special metric counters do not actually exist. They are calculated from |
434 | * the combination of the FxCtr3 + MSR_PERF_METRICS. |
435 | * |
436 | * The special metric counters are mapped to a dummy offset for the scheduler. |
437 | * The sharing between multiple users of the same metric without multiplexing |
438 | * is not allowed, even though the hardware supports that in principle. |
439 | */ |
440 | |
441 | #define METRIC_EVENT_CONSTRAINT(c, n) \ |
442 | EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ |
443 | INTEL_ARCH_EVENT_MASK) |
444 | |
445 | /* |
446 | * Constraint on the Event code + UMask |
447 | */ |
448 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
449 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
450 | |
451 | /* Constraint on specific umask bit only + event */ |
452 | #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ |
453 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) |
454 | |
455 | /* Like UEVENT_CONSTRAINT, but match flags too */ |
456 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ |
457 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) |
458 | |
459 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ |
460 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
461 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) |
462 | |
463 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
464 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
465 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
466 | |
467 | #define INTEL_PSD_CONSTRAINT(c, n) \ |
468 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
469 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT) |
470 | |
471 | #define INTEL_PST_CONSTRAINT(c, n) \ |
472 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
473 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
474 | |
475 | #define INTEL_HYBRID_LAT_CONSTRAINT(c, n) \ |
476 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
477 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID) |
478 | |
479 | /* Event constraint, but match on all event flags too. */ |
480 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ |
481 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
482 | |
483 | #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
484 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
485 | |
486 | /* Check only flags, but allow all event/umask */ |
487 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ |
488 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) |
489 | |
490 | /* Check flags and event code, and set the HSW store flag */ |
491 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ |
492 | __EVENT_CONSTRAINT(code, n, \ |
493 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
494 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
495 | |
496 | /* Check flags and event code, and set the HSW load flag */ |
497 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ |
498 | __EVENT_CONSTRAINT(code, n, \ |
499 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
500 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
501 | |
502 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ |
503 | __EVENT_CONSTRAINT_RANGE(code, end, n, \ |
504 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
505 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
506 | |
507 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
508 | __EVENT_CONSTRAINT(code, n, \ |
509 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
510 | HWEIGHT(n), 0, \ |
511 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
512 | |
513 | /* Check flags and event code/umask, and set the HSW store flag */ |
514 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ |
515 | __EVENT_CONSTRAINT(code, n, \ |
516 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
517 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
518 | |
519 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ |
520 | __EVENT_CONSTRAINT(code, n, \ |
521 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
522 | HWEIGHT(n), 0, \ |
523 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) |
524 | |
525 | /* Check flags and event code/umask, and set the HSW load flag */ |
526 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ |
527 | __EVENT_CONSTRAINT(code, n, \ |
528 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
529 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
530 | |
531 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
532 | __EVENT_CONSTRAINT(code, n, \ |
533 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
534 | HWEIGHT(n), 0, \ |
535 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
536 | |
537 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
538 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ |
539 | __EVENT_CONSTRAINT(code, n, \ |
540 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
541 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) |
542 | |
543 | |
544 | /* |
545 | * We define the end marker as having a weight of -1 |
546 | * to enable blacklisting of events using a counter bitmask |
547 | * of zero and thus a weight of zero. |
548 | * The end marker has a weight that cannot possibly be |
549 | * obtained from counting the bits in the bitmask. |
550 | */ |
551 | #define EVENT_CONSTRAINT_END { .weight = -1 } |
552 | |
553 | /* |
554 | * Check for end marker with weight == -1 |
555 | */ |
556 | #define for_each_event_constraint(e, c) \ |
557 | for ((e) = (c); (e)->weight != -1; (e)++) |
558 | |
559 | /* |
560 | * Extra registers for specific events. |
561 | * |
562 | * Some events need large masks and require external MSRs. |
563 | * Those extra MSRs end up being shared for all events on |
564 | * a PMU and sometimes between PMU of sibling HT threads. |
565 | * In either case, the kernel needs to handle conflicting |
566 | * accesses to those extra, shared, regs. The data structure |
567 | * to manage those registers is stored in cpu_hw_event. |
568 | */ |
569 | struct { |
570 | unsigned int ; |
571 | unsigned int ; |
572 | u64 ; |
573 | u64 ; |
574 | int ; /* per_xxx->regs[] reg index */ |
575 | bool ; |
576 | }; |
577 | |
578 | #define (e, ms, m, vm, i) { \ |
579 | .event = (e), \ |
580 | .msr = (ms), \ |
581 | .config_mask = (m), \ |
582 | .valid_mask = (vm), \ |
583 | .idx = EXTRA_REG_##i, \ |
584 | .extra_msr_access = true, \ |
585 | } |
586 | |
587 | #define (event, msr, vm, idx) \ |
588 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) |
589 | |
590 | #define (event, msr, vm, idx) \ |
591 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ |
592 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) |
593 | |
594 | #define (c) \ |
595 | INTEL_UEVENT_EXTRA_REG(c, \ |
596 | MSR_PEBS_LD_LAT_THRESHOLD, \ |
597 | 0xffff, \ |
598 | LDLAT) |
599 | |
600 | #define EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
601 | |
602 | union perf_capabilities { |
603 | struct { |
604 | u64 lbr_format:6; |
605 | u64 pebs_trap:1; |
606 | u64 pebs_arch_reg:1; |
607 | u64 pebs_format:4; |
608 | u64 smm_freeze:1; |
609 | /* |
610 | * PMU supports separate counter range for writing |
611 | * values > 32bit. |
612 | */ |
613 | u64 full_width_write:1; |
614 | u64 pebs_baseline:1; |
615 | u64 perf_metrics:1; |
616 | u64 pebs_output_pt_available:1; |
617 | u64 pebs_timing_info:1; |
618 | u64 anythread_deprecated:1; |
619 | }; |
620 | u64 capabilities; |
621 | }; |
622 | |
623 | struct x86_pmu_quirk { |
624 | struct x86_pmu_quirk *next; |
625 | void (*func)(void); |
626 | }; |
627 | |
628 | union x86_pmu_config { |
629 | struct { |
630 | u64 event:8, |
631 | umask:8, |
632 | usr:1, |
633 | os:1, |
634 | edge:1, |
635 | pc:1, |
636 | interrupt:1, |
637 | __reserved1:1, |
638 | en:1, |
639 | inv:1, |
640 | cmask:8, |
641 | event2:4, |
642 | __reserved2:4, |
643 | go:1, |
644 | ho:1; |
645 | } bits; |
646 | u64 value; |
647 | }; |
648 | |
649 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value |
650 | |
651 | enum { |
652 | x86_lbr_exclusive_lbr, |
653 | x86_lbr_exclusive_bts, |
654 | x86_lbr_exclusive_pt, |
655 | x86_lbr_exclusive_max, |
656 | }; |
657 | |
658 | #define PERF_PEBS_DATA_SOURCE_MAX 0x10 |
659 | #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1) |
660 | |
661 | enum hybrid_cpu_type { |
662 | HYBRID_INTEL_NONE, |
663 | HYBRID_INTEL_ATOM = 0x20, |
664 | HYBRID_INTEL_CORE = 0x40, |
665 | }; |
666 | |
667 | enum hybrid_pmu_type { |
668 | not_hybrid, |
669 | hybrid_small = BIT(0), |
670 | hybrid_big = BIT(1), |
671 | |
672 | hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ |
673 | }; |
674 | |
675 | #define X86_HYBRID_PMU_ATOM_IDX 0 |
676 | #define X86_HYBRID_PMU_CORE_IDX 1 |
677 | |
678 | #define X86_HYBRID_NUM_PMUS 2 |
679 | |
680 | struct x86_hybrid_pmu { |
681 | struct pmu pmu; |
682 | const char *name; |
683 | enum hybrid_pmu_type pmu_type; |
684 | cpumask_t supported_cpus; |
685 | union perf_capabilities intel_cap; |
686 | u64 intel_ctrl; |
687 | int max_pebs_events; |
688 | int num_counters; |
689 | int num_counters_fixed; |
690 | struct event_constraint unconstrained; |
691 | |
692 | u64 hw_cache_event_ids |
693 | [PERF_COUNT_HW_CACHE_MAX] |
694 | [PERF_COUNT_HW_CACHE_OP_MAX] |
695 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
696 | u64 |
697 | [PERF_COUNT_HW_CACHE_MAX] |
698 | [PERF_COUNT_HW_CACHE_OP_MAX] |
699 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
700 | struct event_constraint *event_constraints; |
701 | struct event_constraint *pebs_constraints; |
702 | struct extra_reg *; |
703 | |
704 | unsigned int late_ack :1, |
705 | mid_ack :1, |
706 | enabled_ack :1; |
707 | |
708 | u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX]; |
709 | }; |
710 | |
711 | static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) |
712 | { |
713 | return container_of(pmu, struct x86_hybrid_pmu, pmu); |
714 | } |
715 | |
716 | extern struct static_key_false perf_is_hybrid; |
717 | #define is_hybrid() static_branch_unlikely(&perf_is_hybrid) |
718 | |
719 | #define hybrid(_pmu, _field) \ |
720 | (*({ \ |
721 | typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ |
722 | \ |
723 | if (is_hybrid() && (_pmu)) \ |
724 | __Fp = &hybrid_pmu(_pmu)->_field; \ |
725 | \ |
726 | __Fp; \ |
727 | })) |
728 | |
729 | #define hybrid_var(_pmu, _var) \ |
730 | (*({ \ |
731 | typeof(&_var) __Fp = &_var; \ |
732 | \ |
733 | if (is_hybrid() && (_pmu)) \ |
734 | __Fp = &hybrid_pmu(_pmu)->_var; \ |
735 | \ |
736 | __Fp; \ |
737 | })) |
738 | |
739 | #define hybrid_bit(_pmu, _field) \ |
740 | ({ \ |
741 | bool __Fp = x86_pmu._field; \ |
742 | \ |
743 | if (is_hybrid() && (_pmu)) \ |
744 | __Fp = hybrid_pmu(_pmu)->_field; \ |
745 | \ |
746 | __Fp; \ |
747 | }) |
748 | |
749 | /* |
750 | * struct x86_pmu - generic x86 pmu |
751 | */ |
752 | struct x86_pmu { |
753 | /* |
754 | * Generic x86 PMC bits |
755 | */ |
756 | const char *name; |
757 | int version; |
758 | int (*handle_irq)(struct pt_regs *); |
759 | void (*disable_all)(void); |
760 | void (*enable_all)(int added); |
761 | void (*enable)(struct perf_event *); |
762 | void (*disable)(struct perf_event *); |
763 | void (*assign)(struct perf_event *event, int idx); |
764 | void (*add)(struct perf_event *); |
765 | void (*del)(struct perf_event *); |
766 | void (*read)(struct perf_event *event); |
767 | int (*set_period)(struct perf_event *event); |
768 | u64 (*update)(struct perf_event *event); |
769 | int (*hw_config)(struct perf_event *event); |
770 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
771 | unsigned eventsel; |
772 | unsigned perfctr; |
773 | int (*addr_offset)(int index, bool eventsel); |
774 | int (*rdpmc_index)(int index); |
775 | u64 (*event_map)(int); |
776 | int max_events; |
777 | int num_counters; |
778 | int num_counters_fixed; |
779 | int cntval_bits; |
780 | u64 cntval_mask; |
781 | union { |
782 | unsigned long events_maskl; |
783 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; |
784 | }; |
785 | int events_mask_len; |
786 | int apic; |
787 | u64 max_period; |
788 | struct event_constraint * |
789 | (*get_event_constraints)(struct cpu_hw_events *cpuc, |
790 | int idx, |
791 | struct perf_event *event); |
792 | |
793 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
794 | struct perf_event *event); |
795 | |
796 | void (*start_scheduling)(struct cpu_hw_events *cpuc); |
797 | |
798 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); |
799 | |
800 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); |
801 | |
802 | struct event_constraint *event_constraints; |
803 | struct x86_pmu_quirk *quirks; |
804 | void (*limit_period)(struct perf_event *event, s64 *l); |
805 | |
806 | /* PMI handler bits */ |
807 | unsigned int late_ack :1, |
808 | mid_ack :1, |
809 | enabled_ack :1; |
810 | /* |
811 | * sysfs attrs |
812 | */ |
813 | int attr_rdpmc_broken; |
814 | int attr_rdpmc; |
815 | struct attribute **format_attrs; |
816 | |
817 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
818 | const struct attribute_group **attr_update; |
819 | |
820 | unsigned long attr_freeze_on_smi; |
821 | |
822 | /* |
823 | * CPU Hotplug hooks |
824 | */ |
825 | int (*cpu_prepare)(int cpu); |
826 | void (*cpu_starting)(int cpu); |
827 | void (*cpu_dying)(int cpu); |
828 | void (*cpu_dead)(int cpu); |
829 | |
830 | void (*check_microcode)(void); |
831 | void (*sched_task)(struct perf_event_pmu_context *pmu_ctx, |
832 | bool sched_in); |
833 | |
834 | /* |
835 | * Intel Arch Perfmon v2+ |
836 | */ |
837 | u64 intel_ctrl; |
838 | union perf_capabilities intel_cap; |
839 | |
840 | /* |
841 | * Intel DebugStore bits |
842 | */ |
843 | unsigned int bts :1, |
844 | bts_active :1, |
845 | pebs :1, |
846 | pebs_active :1, |
847 | pebs_broken :1, |
848 | pebs_prec_dist :1, |
849 | pebs_no_tlb :1, |
850 | pebs_no_isolation :1, |
851 | pebs_block :1, |
852 | pebs_ept :1; |
853 | int pebs_record_size; |
854 | int pebs_buffer_size; |
855 | int max_pebs_events; |
856 | void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); |
857 | struct event_constraint *pebs_constraints; |
858 | void (*pebs_aliases)(struct perf_event *event); |
859 | u64 (*pebs_latency_data)(struct perf_event *event, u64 status); |
860 | unsigned long large_pebs_flags; |
861 | u64 rtm_abort_event; |
862 | u64 pebs_capable; |
863 | |
864 | /* |
865 | * Intel LBR |
866 | */ |
867 | unsigned int lbr_tos, lbr_from, lbr_to, |
868 | lbr_info, lbr_nr; /* LBR base regs and size */ |
869 | union { |
870 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
871 | u64 lbr_ctl_mask; /* LBR_CTL valid bits */ |
872 | }; |
873 | union { |
874 | const int *lbr_sel_map; /* lbr_select mappings */ |
875 | int *lbr_ctl_map; /* LBR_CTL mappings */ |
876 | }; |
877 | bool lbr_double_abort; /* duplicated lbr aborts */ |
878 | bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ |
879 | |
880 | unsigned int lbr_has_info:1; |
881 | unsigned int lbr_has_tsx:1; |
882 | unsigned int lbr_from_flags:1; |
883 | unsigned int lbr_to_cycles:1; |
884 | |
885 | /* |
886 | * Intel Architectural LBR CPUID Enumeration |
887 | */ |
888 | unsigned int lbr_depth_mask:8; |
889 | unsigned int lbr_deep_c_reset:1; |
890 | unsigned int lbr_lip:1; |
891 | unsigned int lbr_cpl:1; |
892 | unsigned int lbr_filter:1; |
893 | unsigned int lbr_call_stack:1; |
894 | unsigned int lbr_mispred:1; |
895 | unsigned int lbr_timed_lbr:1; |
896 | unsigned int lbr_br_type:1; |
897 | unsigned int lbr_counters:4; |
898 | |
899 | void (*lbr_reset)(void); |
900 | void (*lbr_read)(struct cpu_hw_events *cpuc); |
901 | void (*lbr_save)(void *ctx); |
902 | void (*lbr_restore)(void *ctx); |
903 | |
904 | /* |
905 | * Intel PT/LBR/BTS are exclusive |
906 | */ |
907 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; |
908 | |
909 | /* |
910 | * Intel perf metrics |
911 | */ |
912 | int num_topdown_events; |
913 | |
914 | /* |
915 | * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data) |
916 | * switch helper to bridge calls from perf/core to perf/x86. |
917 | * See struct pmu::swap_task_ctx() usage for examples; |
918 | */ |
919 | void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc, |
920 | struct perf_event_pmu_context *next_epc); |
921 | |
922 | /* |
923 | * AMD bits |
924 | */ |
925 | unsigned int amd_nb_constraints : 1; |
926 | u64 perf_ctr_pair_en; |
927 | |
928 | /* |
929 | * Extra registers for events |
930 | */ |
931 | struct extra_reg *; |
932 | unsigned int flags; |
933 | |
934 | /* |
935 | * Intel host/guest support (KVM) |
936 | */ |
937 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr, void *data); |
938 | |
939 | /* |
940 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. |
941 | */ |
942 | int (*check_period) (struct perf_event *event, u64 period); |
943 | |
944 | int (*aux_output_match) (struct perf_event *event); |
945 | |
946 | void (*filter)(struct pmu *pmu, int cpu, bool *ret); |
947 | /* |
948 | * Hybrid support |
949 | * |
950 | * Most PMU capabilities are the same among different hybrid PMUs. |
951 | * The global x86_pmu saves the architecture capabilities, which |
952 | * are available for all PMUs. The hybrid_pmu only includes the |
953 | * unique capabilities. |
954 | */ |
955 | int num_hybrid_pmus; |
956 | struct x86_hybrid_pmu *hybrid_pmu; |
957 | enum hybrid_cpu_type (*get_hybrid_cpu_type) (void); |
958 | }; |
959 | |
960 | struct x86_perf_task_context_opt { |
961 | int lbr_callstack_users; |
962 | int lbr_stack_state; |
963 | int log_id; |
964 | }; |
965 | |
966 | struct x86_perf_task_context { |
967 | u64 lbr_sel; |
968 | int tos; |
969 | int valid_lbrs; |
970 | struct x86_perf_task_context_opt opt; |
971 | struct lbr_entry lbr[MAX_LBR_ENTRIES]; |
972 | }; |
973 | |
974 | struct x86_perf_task_context_arch_lbr { |
975 | struct x86_perf_task_context_opt opt; |
976 | struct lbr_entry entries[]; |
977 | }; |
978 | |
979 | /* |
980 | * Add padding to guarantee the 64-byte alignment of the state buffer. |
981 | * |
982 | * The structure is dynamically allocated. The size of the LBR state may vary |
983 | * based on the number of LBR registers. |
984 | * |
985 | * Do not put anything after the LBR state. |
986 | */ |
987 | struct x86_perf_task_context_arch_lbr_xsave { |
988 | struct x86_perf_task_context_opt opt; |
989 | |
990 | union { |
991 | struct xregs_state xsave; |
992 | struct { |
993 | struct fxregs_state i387; |
994 | struct xstate_header ; |
995 | struct arch_lbr_state lbr; |
996 | } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT))); |
997 | }; |
998 | }; |
999 | |
1000 | #define x86_add_quirk(func_) \ |
1001 | do { \ |
1002 | static struct x86_pmu_quirk __quirk __initdata = { \ |
1003 | .func = func_, \ |
1004 | }; \ |
1005 | __quirk.next = x86_pmu.quirks; \ |
1006 | x86_pmu.quirks = &__quirk; \ |
1007 | } while (0) |
1008 | |
1009 | /* |
1010 | * x86_pmu flags |
1011 | */ |
1012 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ |
1013 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ |
1014 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
1015 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
1016 | #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ |
1017 | #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ |
1018 | #define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ |
1019 | #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ |
1020 | #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ |
1021 | #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ |
1022 | #define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */ |
1023 | |
1024 | #define EVENT_VAR(_id) event_attr_##_id |
1025 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr |
1026 | |
1027 | #define EVENT_ATTR(_name, _id) \ |
1028 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ |
1029 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
1030 | .id = PERF_COUNT_HW_##_id, \ |
1031 | .event_str = NULL, \ |
1032 | }; |
1033 | |
1034 | #define EVENT_ATTR_STR(_name, v, str) \ |
1035 | static struct perf_pmu_events_attr event_attr_##v = { \ |
1036 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
1037 | .id = 0, \ |
1038 | .event_str = str, \ |
1039 | }; |
1040 | |
1041 | #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ |
1042 | static struct perf_pmu_events_ht_attr event_attr_##v = { \ |
1043 | .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ |
1044 | .id = 0, \ |
1045 | .event_str_noht = noht, \ |
1046 | .event_str_ht = ht, \ |
1047 | } |
1048 | |
1049 | #define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) \ |
1050 | static struct perf_pmu_events_hybrid_attr event_attr_##v = { \ |
1051 | .attr = __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\ |
1052 | .id = 0, \ |
1053 | .event_str = str, \ |
1054 | .pmu_type = _pmu, \ |
1055 | } |
1056 | |
1057 | #define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr) |
1058 | |
1059 | #define FORMAT_ATTR_HYBRID(_name, _pmu) \ |
1060 | static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ |
1061 | .attr = __ATTR_RO(_name), \ |
1062 | .pmu_type = _pmu, \ |
1063 | } |
1064 | |
1065 | struct pmu *x86_get_pmu(unsigned int cpu); |
1066 | extern struct x86_pmu x86_pmu __read_mostly; |
1067 | |
1068 | DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); |
1069 | DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); |
1070 | |
1071 | static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) |
1072 | { |
1073 | if (static_cpu_has(X86_FEATURE_ARCH_LBR)) |
1074 | return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; |
1075 | |
1076 | return &((struct x86_perf_task_context *)ctx)->opt; |
1077 | } |
1078 | |
1079 | static inline bool x86_pmu_has_lbr_callstack(void) |
1080 | { |
1081 | return x86_pmu.lbr_sel_map && |
1082 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; |
1083 | } |
1084 | |
1085 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
1086 | DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
1087 | |
1088 | int x86_perf_event_set_period(struct perf_event *event); |
1089 | |
1090 | /* |
1091 | * Generalized hw caching related hw_event table, filled |
1092 | * in on a per model basis. A value of 0 means |
1093 | * 'not supported', -1 means 'hw_event makes no sense on |
1094 | * this CPU', any other value means the raw hw_event |
1095 | * ID. |
1096 | */ |
1097 | |
1098 | #define C(x) PERF_COUNT_HW_CACHE_##x |
1099 | |
1100 | extern u64 __read_mostly hw_cache_event_ids |
1101 | [PERF_COUNT_HW_CACHE_MAX] |
1102 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1103 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
1104 | extern u64 __read_mostly |
1105 | [PERF_COUNT_HW_CACHE_MAX] |
1106 | [PERF_COUNT_HW_CACHE_OP_MAX] |
1107 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
1108 | |
1109 | u64 x86_perf_event_update(struct perf_event *event); |
1110 | |
1111 | static inline unsigned int x86_pmu_config_addr(int index) |
1112 | { |
1113 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
1114 | x86_pmu.addr_offset(index, true) : index); |
1115 | } |
1116 | |
1117 | static inline unsigned int x86_pmu_event_addr(int index) |
1118 | { |
1119 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
1120 | x86_pmu.addr_offset(index, false) : index); |
1121 | } |
1122 | |
1123 | static inline int x86_pmu_rdpmc_index(int index) |
1124 | { |
1125 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; |
1126 | } |
1127 | |
1128 | bool check_hw_exists(struct pmu *pmu, int num_counters, |
1129 | int num_counters_fixed); |
1130 | |
1131 | int x86_add_exclusive(unsigned int what); |
1132 | |
1133 | void x86_del_exclusive(unsigned int what); |
1134 | |
1135 | int x86_reserve_hardware(void); |
1136 | |
1137 | void x86_release_hardware(void); |
1138 | |
1139 | int x86_pmu_max_precise(void); |
1140 | |
1141 | void hw_perf_lbr_event_destroy(struct perf_event *event); |
1142 | |
1143 | int x86_setup_perfctr(struct perf_event *event); |
1144 | |
1145 | int x86_pmu_hw_config(struct perf_event *event); |
1146 | |
1147 | void x86_pmu_disable_all(void); |
1148 | |
1149 | static inline bool has_amd_brs(struct hw_perf_event *hwc) |
1150 | { |
1151 | return hwc->flags & PERF_X86_EVENT_AMD_BRS; |
1152 | } |
1153 | |
1154 | static inline bool is_counter_pair(struct hw_perf_event *hwc) |
1155 | { |
1156 | return hwc->flags & PERF_X86_EVENT_PAIR; |
1157 | } |
1158 | |
1159 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
1160 | u64 enable_mask) |
1161 | { |
1162 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
1163 | |
1164 | if (hwc->extra_reg.reg) |
1165 | wrmsrl(msr: hwc->extra_reg.reg, val: hwc->extra_reg.config); |
1166 | |
1167 | /* |
1168 | * Add enabled Merge event on next counter |
1169 | * if large increment event being enabled on this counter |
1170 | */ |
1171 | if (is_counter_pair(hwc)) |
1172 | wrmsrl(msr: x86_pmu_config_addr(index: hwc->idx + 1), val: x86_pmu.perf_ctr_pair_en); |
1173 | |
1174 | wrmsrl(msr: hwc->config_base, val: (hwc->config | enable_mask) & ~disable_mask); |
1175 | } |
1176 | |
1177 | void x86_pmu_enable_all(int added); |
1178 | |
1179 | int perf_assign_events(struct event_constraint **constraints, int n, |
1180 | int wmin, int wmax, int gpmax, int *assign); |
1181 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
1182 | |
1183 | void x86_pmu_stop(struct perf_event *event, int flags); |
1184 | |
1185 | static inline void x86_pmu_disable_event(struct perf_event *event) |
1186 | { |
1187 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
1188 | struct hw_perf_event *hwc = &event->hw; |
1189 | |
1190 | wrmsrl(msr: hwc->config_base, val: hwc->config & ~disable_mask); |
1191 | |
1192 | if (is_counter_pair(hwc)) |
1193 | wrmsrl(msr: x86_pmu_config_addr(index: hwc->idx + 1), val: 0); |
1194 | } |
1195 | |
1196 | void x86_pmu_enable_event(struct perf_event *event); |
1197 | |
1198 | int x86_pmu_handle_irq(struct pt_regs *regs); |
1199 | |
1200 | void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, |
1201 | u64 intel_ctrl); |
1202 | |
1203 | extern struct event_constraint emptyconstraint; |
1204 | |
1205 | extern struct event_constraint unconstrained; |
1206 | |
1207 | static inline bool kernel_ip(unsigned long ip) |
1208 | { |
1209 | #ifdef CONFIG_X86_32 |
1210 | return ip > PAGE_OFFSET; |
1211 | #else |
1212 | return (long)ip < 0; |
1213 | #endif |
1214 | } |
1215 | |
1216 | /* |
1217 | * Not all PMUs provide the right context information to place the reported IP |
1218 | * into full context. Specifically segment registers are typically not |
1219 | * supplied. |
1220 | * |
1221 | * Assuming the address is a linear address (it is for IBS), we fake the CS and |
1222 | * vm86 mode using the known zero-based code segment and 'fix up' the registers |
1223 | * to reflect this. |
1224 | * |
1225 | * Intel PEBS/LBR appear to typically provide the effective address, nothing |
1226 | * much we can do about that but pray and treat it like a linear address. |
1227 | */ |
1228 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) |
1229 | { |
1230 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; |
1231 | if (regs->flags & X86_VM_MASK) |
1232 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); |
1233 | regs->ip = ip; |
1234 | } |
1235 | |
1236 | /* |
1237 | * x86control flow change classification |
1238 | * x86control flow changes include branches, interrupts, traps, faults |
1239 | */ |
1240 | enum { |
1241 | X86_BR_NONE = 0, /* unknown */ |
1242 | |
1243 | X86_BR_USER = 1 << 0, /* branch target is user */ |
1244 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ |
1245 | |
1246 | X86_BR_CALL = 1 << 2, /* call */ |
1247 | X86_BR_RET = 1 << 3, /* return */ |
1248 | X86_BR_SYSCALL = 1 << 4, /* syscall */ |
1249 | X86_BR_SYSRET = 1 << 5, /* syscall return */ |
1250 | X86_BR_INT = 1 << 6, /* sw interrupt */ |
1251 | X86_BR_IRET = 1 << 7, /* return from interrupt */ |
1252 | X86_BR_JCC = 1 << 8, /* conditional */ |
1253 | X86_BR_JMP = 1 << 9, /* jump */ |
1254 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ |
1255 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ |
1256 | X86_BR_ABORT = 1 << 12,/* transaction abort */ |
1257 | X86_BR_IN_TX = 1 << 13,/* in transaction */ |
1258 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ |
1259 | X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ |
1260 | X86_BR_CALL_STACK = 1 << 16,/* call stack */ |
1261 | X86_BR_IND_JMP = 1 << 17,/* indirect jump */ |
1262 | |
1263 | X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */ |
1264 | |
1265 | }; |
1266 | |
1267 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) |
1268 | #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) |
1269 | |
1270 | #define X86_BR_ANY \ |
1271 | (X86_BR_CALL |\ |
1272 | X86_BR_RET |\ |
1273 | X86_BR_SYSCALL |\ |
1274 | X86_BR_SYSRET |\ |
1275 | X86_BR_INT |\ |
1276 | X86_BR_IRET |\ |
1277 | X86_BR_JCC |\ |
1278 | X86_BR_JMP |\ |
1279 | X86_BR_IRQ |\ |
1280 | X86_BR_ABORT |\ |
1281 | X86_BR_IND_CALL |\ |
1282 | X86_BR_IND_JMP |\ |
1283 | X86_BR_ZERO_CALL) |
1284 | |
1285 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) |
1286 | |
1287 | #define X86_BR_ANY_CALL \ |
1288 | (X86_BR_CALL |\ |
1289 | X86_BR_IND_CALL |\ |
1290 | X86_BR_ZERO_CALL |\ |
1291 | X86_BR_SYSCALL |\ |
1292 | X86_BR_IRQ |\ |
1293 | X86_BR_INT) |
1294 | |
1295 | int common_branch_type(int type); |
1296 | int branch_type(unsigned long from, unsigned long to, int abort); |
1297 | int branch_type_fused(unsigned long from, unsigned long to, int abort, |
1298 | int *offset); |
1299 | |
1300 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
1301 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
1302 | |
1303 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
1304 | char *page); |
1305 | ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, |
1306 | char *page); |
1307 | ssize_t events_hybrid_sysfs_show(struct device *dev, |
1308 | struct device_attribute *attr, |
1309 | char *page); |
1310 | |
1311 | static inline bool fixed_counter_disabled(int i, struct pmu *pmu) |
1312 | { |
1313 | u64 intel_ctrl = hybrid(pmu, intel_ctrl); |
1314 | |
1315 | return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); |
1316 | } |
1317 | |
1318 | #ifdef CONFIG_CPU_SUP_AMD |
1319 | |
1320 | int amd_pmu_init(void); |
1321 | |
1322 | int amd_pmu_lbr_init(void); |
1323 | void amd_pmu_lbr_reset(void); |
1324 | void amd_pmu_lbr_read(void); |
1325 | void amd_pmu_lbr_add(struct perf_event *event); |
1326 | void amd_pmu_lbr_del(struct perf_event *event); |
1327 | void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
1328 | void amd_pmu_lbr_enable_all(void); |
1329 | void amd_pmu_lbr_disable_all(void); |
1330 | int amd_pmu_lbr_hw_config(struct perf_event *event); |
1331 | |
1332 | #ifdef CONFIG_PERF_EVENTS_AMD_BRS |
1333 | |
1334 | #define AMD_FAM19H_BRS_EVENT 0xc4 /* RETIRED_TAKEN_BRANCH_INSTRUCTIONS */ |
1335 | |
1336 | int amd_brs_init(void); |
1337 | void amd_brs_disable(void); |
1338 | void amd_brs_enable(void); |
1339 | void amd_brs_enable_all(void); |
1340 | void amd_brs_disable_all(void); |
1341 | void amd_brs_drain(void); |
1342 | void amd_brs_lopwr_init(void); |
1343 | int amd_brs_hw_config(struct perf_event *event); |
1344 | void amd_brs_reset(void); |
1345 | |
1346 | static inline void amd_pmu_brs_add(struct perf_event *event) |
1347 | { |
1348 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1349 | |
1350 | perf_sched_cb_inc(pmu: event->pmu); |
1351 | cpuc->lbr_users++; |
1352 | /* |
1353 | * No need to reset BRS because it is reset |
1354 | * on brs_enable() and it is saturating |
1355 | */ |
1356 | } |
1357 | |
1358 | static inline void amd_pmu_brs_del(struct perf_event *event) |
1359 | { |
1360 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1361 | |
1362 | cpuc->lbr_users--; |
1363 | WARN_ON_ONCE(cpuc->lbr_users < 0); |
1364 | |
1365 | perf_sched_cb_dec(pmu: event->pmu); |
1366 | } |
1367 | |
1368 | void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
1369 | #else |
1370 | static inline int amd_brs_init(void) |
1371 | { |
1372 | return 0; |
1373 | } |
1374 | static inline void amd_brs_disable(void) {} |
1375 | static inline void amd_brs_enable(void) {} |
1376 | static inline void amd_brs_drain(void) {} |
1377 | static inline void amd_brs_lopwr_init(void) {} |
1378 | static inline void amd_brs_disable_all(void) {} |
1379 | static inline int amd_brs_hw_config(struct perf_event *event) |
1380 | { |
1381 | return 0; |
1382 | } |
1383 | static inline void amd_brs_reset(void) {} |
1384 | |
1385 | static inline void amd_pmu_brs_add(struct perf_event *event) |
1386 | { |
1387 | } |
1388 | |
1389 | static inline void amd_pmu_brs_del(struct perf_event *event) |
1390 | { |
1391 | } |
1392 | |
1393 | static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) |
1394 | { |
1395 | } |
1396 | |
1397 | static inline void amd_brs_enable_all(void) |
1398 | { |
1399 | } |
1400 | |
1401 | #endif |
1402 | |
1403 | #else /* CONFIG_CPU_SUP_AMD */ |
1404 | |
1405 | static inline int amd_pmu_init(void) |
1406 | { |
1407 | return 0; |
1408 | } |
1409 | |
1410 | static inline int amd_brs_init(void) |
1411 | { |
1412 | return -EOPNOTSUPP; |
1413 | } |
1414 | |
1415 | static inline void amd_brs_drain(void) |
1416 | { |
1417 | } |
1418 | |
1419 | static inline void amd_brs_enable_all(void) |
1420 | { |
1421 | } |
1422 | |
1423 | static inline void amd_brs_disable_all(void) |
1424 | { |
1425 | } |
1426 | #endif /* CONFIG_CPU_SUP_AMD */ |
1427 | |
1428 | static inline int is_pebs_pt(struct perf_event *event) |
1429 | { |
1430 | return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); |
1431 | } |
1432 | |
1433 | #ifdef CONFIG_CPU_SUP_INTEL |
1434 | |
1435 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
1436 | { |
1437 | struct hw_perf_event *hwc = &event->hw; |
1438 | unsigned int hw_event, bts_event; |
1439 | |
1440 | if (event->attr.freq) |
1441 | return false; |
1442 | |
1443 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
1444 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
1445 | |
1446 | return hw_event == bts_event && period == 1; |
1447 | } |
1448 | |
1449 | static inline bool intel_pmu_has_bts(struct perf_event *event) |
1450 | { |
1451 | struct hw_perf_event *hwc = &event->hw; |
1452 | |
1453 | return intel_pmu_has_bts_period(event, period: hwc->sample_period); |
1454 | } |
1455 | |
1456 | static __always_inline void __intel_pmu_pebs_disable_all(void) |
1457 | { |
1458 | wrmsrl(MSR_IA32_PEBS_ENABLE, val: 0); |
1459 | } |
1460 | |
1461 | static __always_inline void __intel_pmu_arch_lbr_disable(void) |
1462 | { |
1463 | wrmsrl(MSR_ARCH_LBR_CTL, val: 0); |
1464 | } |
1465 | |
1466 | static __always_inline void __intel_pmu_lbr_disable(void) |
1467 | { |
1468 | u64 debugctl; |
1469 | |
1470 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
1471 | debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
1472 | wrmsrl(MSR_IA32_DEBUGCTLMSR, val: debugctl); |
1473 | } |
1474 | |
1475 | int intel_pmu_save_and_restart(struct perf_event *event); |
1476 | |
1477 | struct event_constraint * |
1478 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
1479 | struct perf_event *event); |
1480 | |
1481 | extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); |
1482 | extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); |
1483 | |
1484 | int intel_pmu_init(void); |
1485 | |
1486 | void init_debug_store_on_cpu(int cpu); |
1487 | |
1488 | void fini_debug_store_on_cpu(int cpu); |
1489 | |
1490 | void release_ds_buffers(void); |
1491 | |
1492 | void reserve_ds_buffers(void); |
1493 | |
1494 | void release_lbr_buffers(void); |
1495 | |
1496 | void reserve_lbr_buffers(void); |
1497 | |
1498 | extern struct event_constraint bts_constraint; |
1499 | extern struct event_constraint vlbr_constraint; |
1500 | |
1501 | void intel_pmu_enable_bts(u64 config); |
1502 | |
1503 | void intel_pmu_disable_bts(void); |
1504 | |
1505 | int intel_pmu_drain_bts_buffer(void); |
1506 | |
1507 | u64 adl_latency_data_small(struct perf_event *event, u64 status); |
1508 | |
1509 | u64 mtl_latency_data_small(struct perf_event *event, u64 status); |
1510 | |
1511 | extern struct event_constraint intel_core2_pebs_event_constraints[]; |
1512 | |
1513 | extern struct event_constraint intel_atom_pebs_event_constraints[]; |
1514 | |
1515 | extern struct event_constraint intel_slm_pebs_event_constraints[]; |
1516 | |
1517 | extern struct event_constraint intel_glm_pebs_event_constraints[]; |
1518 | |
1519 | extern struct event_constraint intel_glp_pebs_event_constraints[]; |
1520 | |
1521 | extern struct event_constraint intel_grt_pebs_event_constraints[]; |
1522 | |
1523 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
1524 | |
1525 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; |
1526 | |
1527 | extern struct event_constraint intel_snb_pebs_event_constraints[]; |
1528 | |
1529 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
1530 | |
1531 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; |
1532 | |
1533 | extern struct event_constraint intel_bdw_pebs_event_constraints[]; |
1534 | |
1535 | extern struct event_constraint intel_skl_pebs_event_constraints[]; |
1536 | |
1537 | extern struct event_constraint intel_icl_pebs_event_constraints[]; |
1538 | |
1539 | extern struct event_constraint intel_glc_pebs_event_constraints[]; |
1540 | |
1541 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
1542 | |
1543 | void intel_pmu_pebs_add(struct perf_event *event); |
1544 | |
1545 | void intel_pmu_pebs_del(struct perf_event *event); |
1546 | |
1547 | void intel_pmu_pebs_enable(struct perf_event *event); |
1548 | |
1549 | void intel_pmu_pebs_disable(struct perf_event *event); |
1550 | |
1551 | void intel_pmu_pebs_enable_all(void); |
1552 | |
1553 | void intel_pmu_pebs_disable_all(void); |
1554 | |
1555 | void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
1556 | |
1557 | void intel_pmu_auto_reload_read(struct perf_event *event); |
1558 | |
1559 | void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); |
1560 | |
1561 | void intel_ds_init(void); |
1562 | |
1563 | void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, |
1564 | struct cpu_hw_events *cpuc, |
1565 | struct perf_event *event); |
1566 | |
1567 | void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, |
1568 | struct perf_event_pmu_context *next_epc); |
1569 | |
1570 | void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
1571 | |
1572 | u64 lbr_from_signext_quirk_wr(u64 val); |
1573 | |
1574 | void intel_pmu_lbr_reset(void); |
1575 | |
1576 | void intel_pmu_lbr_reset_32(void); |
1577 | |
1578 | void intel_pmu_lbr_reset_64(void); |
1579 | |
1580 | void intel_pmu_lbr_add(struct perf_event *event); |
1581 | |
1582 | void intel_pmu_lbr_del(struct perf_event *event); |
1583 | |
1584 | void intel_pmu_lbr_enable_all(bool pmi); |
1585 | |
1586 | void intel_pmu_lbr_disable_all(void); |
1587 | |
1588 | void intel_pmu_lbr_read(void); |
1589 | |
1590 | void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); |
1591 | |
1592 | void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc); |
1593 | |
1594 | void intel_pmu_lbr_save(void *ctx); |
1595 | |
1596 | void intel_pmu_lbr_restore(void *ctx); |
1597 | |
1598 | void intel_pmu_lbr_init_core(void); |
1599 | |
1600 | void intel_pmu_lbr_init_nhm(void); |
1601 | |
1602 | void intel_pmu_lbr_init_atom(void); |
1603 | |
1604 | void intel_pmu_lbr_init_slm(void); |
1605 | |
1606 | void intel_pmu_lbr_init_snb(void); |
1607 | |
1608 | void intel_pmu_lbr_init_hsw(void); |
1609 | |
1610 | void intel_pmu_lbr_init_skl(void); |
1611 | |
1612 | void intel_pmu_lbr_init_knl(void); |
1613 | |
1614 | void intel_pmu_lbr_init(void); |
1615 | |
1616 | void intel_pmu_arch_lbr_init(void); |
1617 | |
1618 | void intel_pmu_pebs_data_source_nhm(void); |
1619 | |
1620 | void intel_pmu_pebs_data_source_skl(bool pmem); |
1621 | |
1622 | void intel_pmu_pebs_data_source_adl(void); |
1623 | |
1624 | void intel_pmu_pebs_data_source_grt(void); |
1625 | |
1626 | void intel_pmu_pebs_data_source_mtl(void); |
1627 | |
1628 | void intel_pmu_pebs_data_source_cmt(void); |
1629 | |
1630 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
1631 | |
1632 | void intel_pt_interrupt(void); |
1633 | |
1634 | int intel_bts_interrupt(void); |
1635 | |
1636 | void intel_bts_enable_local(void); |
1637 | |
1638 | void intel_bts_disable_local(void); |
1639 | |
1640 | int p4_pmu_init(void); |
1641 | |
1642 | int p6_pmu_init(void); |
1643 | |
1644 | int knc_pmu_init(void); |
1645 | |
1646 | static inline int is_ht_workaround_enabled(void) |
1647 | { |
1648 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); |
1649 | } |
1650 | |
1651 | #else /* CONFIG_CPU_SUP_INTEL */ |
1652 | |
1653 | static inline void reserve_ds_buffers(void) |
1654 | { |
1655 | } |
1656 | |
1657 | static inline void release_ds_buffers(void) |
1658 | { |
1659 | } |
1660 | |
1661 | static inline void release_lbr_buffers(void) |
1662 | { |
1663 | } |
1664 | |
1665 | static inline void reserve_lbr_buffers(void) |
1666 | { |
1667 | } |
1668 | |
1669 | static inline int intel_pmu_init(void) |
1670 | { |
1671 | return 0; |
1672 | } |
1673 | |
1674 | static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) |
1675 | { |
1676 | return 0; |
1677 | } |
1678 | |
1679 | static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) |
1680 | { |
1681 | } |
1682 | |
1683 | static inline int is_ht_workaround_enabled(void) |
1684 | { |
1685 | return 0; |
1686 | } |
1687 | #endif /* CONFIG_CPU_SUP_INTEL */ |
1688 | |
1689 | #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN)) |
1690 | int zhaoxin_pmu_init(void); |
1691 | #else |
1692 | static inline int zhaoxin_pmu_init(void) |
1693 | { |
1694 | return 0; |
1695 | } |
1696 | #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/ |
1697 | |