1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/bitops.h> |
3 | #include <linux/types.h> |
4 | #include <linux/slab.h> |
5 | #include <linux/sched/clock.h> |
6 | |
7 | #include <asm/cpu_entry_area.h> |
8 | #include <asm/debugreg.h> |
9 | #include <asm/perf_event.h> |
10 | #include <asm/tlbflush.h> |
11 | #include <asm/insn.h> |
12 | #include <asm/io.h> |
13 | #include <asm/timer.h> |
14 | |
15 | #include "../perf_event.h" |
16 | |
17 | /* Waste a full page so it can be mapped into the cpu_entry_area */ |
18 | DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); |
19 | |
20 | /* The size of a BTS record in bytes: */ |
21 | #define BTS_RECORD_SIZE 24 |
22 | |
23 | #define PEBS_FIXUP_SIZE PAGE_SIZE |
24 | |
25 | /* |
26 | * pebs_record_32 for p4 and core not supported |
27 | |
28 | struct pebs_record_32 { |
29 | u32 flags, ip; |
30 | u32 ax, bc, cx, dx; |
31 | u32 si, di, bp, sp; |
32 | }; |
33 | |
34 | */ |
35 | |
36 | union intel_x86_pebs_dse { |
37 | u64 val; |
38 | struct { |
39 | unsigned int ld_dse:4; |
40 | unsigned int ld_stlb_miss:1; |
41 | unsigned int ld_locked:1; |
42 | unsigned int ld_data_blk:1; |
43 | unsigned int ld_addr_blk:1; |
44 | unsigned int ld_reserved:24; |
45 | }; |
46 | struct { |
47 | unsigned int st_l1d_hit:1; |
48 | unsigned int st_reserved1:3; |
49 | unsigned int st_stlb_miss:1; |
50 | unsigned int st_locked:1; |
51 | unsigned int st_reserved2:26; |
52 | }; |
53 | struct { |
54 | unsigned int st_lat_dse:4; |
55 | unsigned int st_lat_stlb_miss:1; |
56 | unsigned int st_lat_locked:1; |
57 | unsigned int ld_reserved3:26; |
58 | }; |
59 | struct { |
60 | unsigned int mtl_dse:5; |
61 | unsigned int mtl_locked:1; |
62 | unsigned int mtl_stlb_miss:1; |
63 | unsigned int mtl_fwd_blk:1; |
64 | unsigned int ld_reserved4:24; |
65 | }; |
66 | }; |
67 | |
68 | |
69 | /* |
70 | * Map PEBS Load Latency Data Source encodings to generic |
71 | * memory data source information |
72 | */ |
73 | #define P(a, b) PERF_MEM_S(a, b) |
74 | #define OP_LH (P(OP, LOAD) | P(LVL, HIT)) |
75 | #define LEVEL(x) P(LVLNUM, x) |
76 | #define REM P(REMOTE, REMOTE) |
77 | #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS)) |
78 | |
79 | /* Version for Sandy Bridge and later */ |
80 | static u64 pebs_data_source[] = { |
81 | P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */ |
82 | OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */ |
83 | OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */ |
84 | OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */ |
85 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */ |
86 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */ |
87 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */ |
88 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */ |
89 | OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */ |
90 | OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/ |
91 | OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */ |
92 | OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */ |
93 | OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */ |
94 | OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */ |
95 | OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */ |
96 | OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */ |
97 | }; |
98 | |
99 | /* Patch up minor differences in the bits */ |
100 | void __init intel_pmu_pebs_data_source_nhm(void) |
101 | { |
102 | pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); |
103 | pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); |
104 | pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); |
105 | } |
106 | |
107 | static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source) |
108 | { |
109 | u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4); |
110 | |
111 | data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT); |
112 | data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT); |
113 | data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE); |
114 | data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD); |
115 | data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM); |
116 | } |
117 | |
118 | void __init intel_pmu_pebs_data_source_skl(bool pmem) |
119 | { |
120 | __intel_pmu_pebs_data_source_skl(pmem, data_source: pebs_data_source); |
121 | } |
122 | |
123 | static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source) |
124 | { |
125 | data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); |
126 | data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); |
127 | data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); |
128 | } |
129 | |
130 | void __init intel_pmu_pebs_data_source_grt(void) |
131 | { |
132 | __intel_pmu_pebs_data_source_grt(data_source: pebs_data_source); |
133 | } |
134 | |
135 | void __init intel_pmu_pebs_data_source_adl(void) |
136 | { |
137 | u64 *data_source; |
138 | |
139 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; |
140 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); |
141 | __intel_pmu_pebs_data_source_skl(pmem: false, data_source); |
142 | |
143 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; |
144 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); |
145 | __intel_pmu_pebs_data_source_grt(data_source); |
146 | } |
147 | |
148 | static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source) |
149 | { |
150 | data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); |
151 | data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); |
152 | data_source[0x0a] = OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE); |
153 | data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE); |
154 | data_source[0x0c] = OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD); |
155 | data_source[0x0d] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM); |
156 | } |
157 | |
158 | void __init intel_pmu_pebs_data_source_mtl(void) |
159 | { |
160 | u64 *data_source; |
161 | |
162 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; |
163 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); |
164 | __intel_pmu_pebs_data_source_skl(pmem: false, data_source); |
165 | |
166 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; |
167 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); |
168 | __intel_pmu_pebs_data_source_cmt(data_source); |
169 | } |
170 | |
171 | void __init intel_pmu_pebs_data_source_cmt(void) |
172 | { |
173 | __intel_pmu_pebs_data_source_cmt(data_source: pebs_data_source); |
174 | } |
175 | |
176 | static u64 precise_store_data(u64 status) |
177 | { |
178 | union intel_x86_pebs_dse dse; |
179 | u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2); |
180 | |
181 | dse.val = status; |
182 | |
183 | /* |
184 | * bit 4: TLB access |
185 | * 1 = stored missed 2nd level TLB |
186 | * |
187 | * so it either hit the walker or the OS |
188 | * otherwise hit 2nd level TLB |
189 | */ |
190 | if (dse.st_stlb_miss) |
191 | val |= P(TLB, MISS); |
192 | else |
193 | val |= P(TLB, HIT); |
194 | |
195 | /* |
196 | * bit 0: hit L1 data cache |
197 | * if not set, then all we know is that |
198 | * it missed L1D |
199 | */ |
200 | if (dse.st_l1d_hit) |
201 | val |= P(LVL, HIT); |
202 | else |
203 | val |= P(LVL, MISS); |
204 | |
205 | /* |
206 | * bit 5: Locked prefix |
207 | */ |
208 | if (dse.st_locked) |
209 | val |= P(LOCK, LOCKED); |
210 | |
211 | return val; |
212 | } |
213 | |
214 | static u64 precise_datala_hsw(struct perf_event *event, u64 status) |
215 | { |
216 | union perf_mem_data_src dse; |
217 | |
218 | dse.val = PERF_MEM_NA; |
219 | |
220 | if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) |
221 | dse.mem_op = PERF_MEM_OP_STORE; |
222 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) |
223 | dse.mem_op = PERF_MEM_OP_LOAD; |
224 | |
225 | /* |
226 | * L1 info only valid for following events: |
227 | * |
228 | * MEM_UOPS_RETIRED.STLB_MISS_STORES |
229 | * MEM_UOPS_RETIRED.LOCK_STORES |
230 | * MEM_UOPS_RETIRED.SPLIT_STORES |
231 | * MEM_UOPS_RETIRED.ALL_STORES |
232 | */ |
233 | if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { |
234 | if (status & 1) |
235 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; |
236 | else |
237 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS; |
238 | } |
239 | return dse.val; |
240 | } |
241 | |
242 | static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock) |
243 | { |
244 | /* |
245 | * TLB access |
246 | * 0 = did not miss 2nd level TLB |
247 | * 1 = missed 2nd level TLB |
248 | */ |
249 | if (tlb) |
250 | *val |= P(TLB, MISS) | P(TLB, L2); |
251 | else |
252 | *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2); |
253 | |
254 | /* locked prefix */ |
255 | if (lock) |
256 | *val |= P(LOCK, LOCKED); |
257 | } |
258 | |
259 | /* Retrieve the latency data for e-core of ADL */ |
260 | static u64 __adl_latency_data_small(struct perf_event *event, u64 status, |
261 | u8 dse, bool tlb, bool lock, bool blk) |
262 | { |
263 | u64 val; |
264 | |
265 | WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); |
266 | |
267 | dse &= PERF_PEBS_DATA_SOURCE_MASK; |
268 | val = hybrid_var(event->pmu, pebs_data_source)[dse]; |
269 | |
270 | pebs_set_tlb_lock(val: &val, tlb, lock); |
271 | |
272 | if (blk) |
273 | val |= P(BLK, DATA); |
274 | else |
275 | val |= P(BLK, NA); |
276 | |
277 | return val; |
278 | } |
279 | |
280 | u64 adl_latency_data_small(struct perf_event *event, u64 status) |
281 | { |
282 | union intel_x86_pebs_dse dse; |
283 | |
284 | dse.val = status; |
285 | |
286 | return __adl_latency_data_small(event, status, dse: dse.ld_dse, |
287 | tlb: dse.ld_locked, lock: dse.ld_stlb_miss, |
288 | blk: dse.ld_data_blk); |
289 | } |
290 | |
291 | /* Retrieve the latency data for e-core of MTL */ |
292 | u64 mtl_latency_data_small(struct perf_event *event, u64 status) |
293 | { |
294 | union intel_x86_pebs_dse dse; |
295 | |
296 | dse.val = status; |
297 | |
298 | return __adl_latency_data_small(event, status, dse: dse.mtl_dse, |
299 | tlb: dse.mtl_stlb_miss, lock: dse.mtl_locked, |
300 | blk: dse.mtl_fwd_blk); |
301 | } |
302 | |
303 | static u64 load_latency_data(struct perf_event *event, u64 status) |
304 | { |
305 | union intel_x86_pebs_dse dse; |
306 | u64 val; |
307 | |
308 | dse.val = status; |
309 | |
310 | /* |
311 | * use the mapping table for bit 0-3 |
312 | */ |
313 | val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse]; |
314 | |
315 | /* |
316 | * Nehalem models do not support TLB, Lock infos |
317 | */ |
318 | if (x86_pmu.pebs_no_tlb) { |
319 | val |= P(TLB, NA) | P(LOCK, NA); |
320 | return val; |
321 | } |
322 | |
323 | pebs_set_tlb_lock(val: &val, tlb: dse.ld_stlb_miss, lock: dse.ld_locked); |
324 | |
325 | /* |
326 | * Ice Lake and earlier models do not support block infos. |
327 | */ |
328 | if (!x86_pmu.pebs_block) { |
329 | val |= P(BLK, NA); |
330 | return val; |
331 | } |
332 | /* |
333 | * bit 6: load was blocked since its data could not be forwarded |
334 | * from a preceding store |
335 | */ |
336 | if (dse.ld_data_blk) |
337 | val |= P(BLK, DATA); |
338 | |
339 | /* |
340 | * bit 7: load was blocked due to potential address conflict with |
341 | * a preceding store |
342 | */ |
343 | if (dse.ld_addr_blk) |
344 | val |= P(BLK, ADDR); |
345 | |
346 | if (!dse.ld_data_blk && !dse.ld_addr_blk) |
347 | val |= P(BLK, NA); |
348 | |
349 | return val; |
350 | } |
351 | |
352 | static u64 store_latency_data(struct perf_event *event, u64 status) |
353 | { |
354 | union intel_x86_pebs_dse dse; |
355 | union perf_mem_data_src src; |
356 | u64 val; |
357 | |
358 | dse.val = status; |
359 | |
360 | /* |
361 | * use the mapping table for bit 0-3 |
362 | */ |
363 | val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse]; |
364 | |
365 | pebs_set_tlb_lock(val: &val, tlb: dse.st_lat_stlb_miss, lock: dse.st_lat_locked); |
366 | |
367 | val |= P(BLK, NA); |
368 | |
369 | /* |
370 | * the pebs_data_source table is only for loads |
371 | * so override the mem_op to say STORE instead |
372 | */ |
373 | src.val = val; |
374 | src.mem_op = P(OP,STORE); |
375 | |
376 | return src.val; |
377 | } |
378 | |
379 | struct pebs_record_core { |
380 | u64 flags, ip; |
381 | u64 ax, bx, cx, dx; |
382 | u64 si, di, bp, sp; |
383 | u64 r8, r9, r10, r11; |
384 | u64 r12, r13, r14, r15; |
385 | }; |
386 | |
387 | struct pebs_record_nhm { |
388 | u64 flags, ip; |
389 | u64 ax, bx, cx, dx; |
390 | u64 si, di, bp, sp; |
391 | u64 r8, r9, r10, r11; |
392 | u64 r12, r13, r14, r15; |
393 | u64 status, dla, dse, lat; |
394 | }; |
395 | |
396 | /* |
397 | * Same as pebs_record_nhm, with two additional fields. |
398 | */ |
399 | struct pebs_record_hsw { |
400 | u64 flags, ip; |
401 | u64 ax, bx, cx, dx; |
402 | u64 si, di, bp, sp; |
403 | u64 r8, r9, r10, r11; |
404 | u64 r12, r13, r14, r15; |
405 | u64 status, dla, dse, lat; |
406 | u64 real_ip, tsx_tuning; |
407 | }; |
408 | |
409 | union hsw_tsx_tuning { |
410 | struct { |
411 | u32 cycles_last_block : 32, |
412 | hle_abort : 1, |
413 | rtm_abort : 1, |
414 | instruction_abort : 1, |
415 | non_instruction_abort : 1, |
416 | retry : 1, |
417 | data_conflict : 1, |
418 | capacity_writes : 1, |
419 | capacity_reads : 1; |
420 | }; |
421 | u64 value; |
422 | }; |
423 | |
424 | #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL |
425 | |
426 | /* Same as HSW, plus TSC */ |
427 | |
428 | struct pebs_record_skl { |
429 | u64 flags, ip; |
430 | u64 ax, bx, cx, dx; |
431 | u64 si, di, bp, sp; |
432 | u64 r8, r9, r10, r11; |
433 | u64 r12, r13, r14, r15; |
434 | u64 status, dla, dse, lat; |
435 | u64 real_ip, tsx_tuning; |
436 | u64 tsc; |
437 | }; |
438 | |
439 | void init_debug_store_on_cpu(int cpu) |
440 | { |
441 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
442 | |
443 | if (!ds) |
444 | return; |
445 | |
446 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, |
447 | l: (u32)((u64)(unsigned long)ds), |
448 | h: (u32)((u64)(unsigned long)ds >> 32)); |
449 | } |
450 | |
451 | void fini_debug_store_on_cpu(int cpu) |
452 | { |
453 | if (!per_cpu(cpu_hw_events, cpu).ds) |
454 | return; |
455 | |
456 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, l: 0, h: 0); |
457 | } |
458 | |
459 | static DEFINE_PER_CPU(void *, insn_buffer); |
460 | |
461 | static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) |
462 | { |
463 | unsigned long start = (unsigned long)cea; |
464 | phys_addr_t pa; |
465 | size_t msz = 0; |
466 | |
467 | pa = virt_to_phys(address: addr); |
468 | |
469 | preempt_disable(); |
470 | for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) |
471 | cea_set_pte(cea_vaddr: cea, pa, flags: prot); |
472 | |
473 | /* |
474 | * This is a cross-CPU update of the cpu_entry_area, we must shoot down |
475 | * all TLB entries for it. |
476 | */ |
477 | flush_tlb_kernel_range(start, end: start + size); |
478 | preempt_enable(); |
479 | } |
480 | |
481 | static void ds_clear_cea(void *cea, size_t size) |
482 | { |
483 | unsigned long start = (unsigned long)cea; |
484 | size_t msz = 0; |
485 | |
486 | preempt_disable(); |
487 | for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) |
488 | cea_set_pte(cea_vaddr: cea, pa: 0, PAGE_NONE); |
489 | |
490 | flush_tlb_kernel_range(start, end: start + size); |
491 | preempt_enable(); |
492 | } |
493 | |
494 | static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) |
495 | { |
496 | unsigned int order = get_order(size); |
497 | int node = cpu_to_node(cpu); |
498 | struct page *page; |
499 | |
500 | page = __alloc_pages_node(nid: node, gfp_mask: flags | __GFP_ZERO, order); |
501 | return page ? page_address(page) : NULL; |
502 | } |
503 | |
504 | static void dsfree_pages(const void *buffer, size_t size) |
505 | { |
506 | if (buffer) |
507 | free_pages(addr: (unsigned long)buffer, order: get_order(size)); |
508 | } |
509 | |
510 | static int alloc_pebs_buffer(int cpu) |
511 | { |
512 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); |
513 | struct debug_store *ds = hwev->ds; |
514 | size_t bsiz = x86_pmu.pebs_buffer_size; |
515 | int max, node = cpu_to_node(cpu); |
516 | void *buffer, *insn_buff, *cea; |
517 | |
518 | if (!x86_pmu.pebs) |
519 | return 0; |
520 | |
521 | buffer = dsalloc_pages(size: bsiz, GFP_KERNEL, cpu); |
522 | if (unlikely(!buffer)) |
523 | return -ENOMEM; |
524 | |
525 | /* |
526 | * HSW+ already provides us the eventing ip; no need to allocate this |
527 | * buffer then. |
528 | */ |
529 | if (x86_pmu.intel_cap.pebs_format < 2) { |
530 | insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); |
531 | if (!insn_buff) { |
532 | dsfree_pages(buffer, size: bsiz); |
533 | return -ENOMEM; |
534 | } |
535 | per_cpu(insn_buffer, cpu) = insn_buff; |
536 | } |
537 | hwev->ds_pebs_vaddr = buffer; |
538 | /* Update the cpu entry area mapping */ |
539 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; |
540 | ds->pebs_buffer_base = (unsigned long) cea; |
541 | ds_update_cea(cea, addr: buffer, size: bsiz, PAGE_KERNEL); |
542 | ds->pebs_index = ds->pebs_buffer_base; |
543 | max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); |
544 | ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; |
545 | return 0; |
546 | } |
547 | |
548 | static void release_pebs_buffer(int cpu) |
549 | { |
550 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); |
551 | void *cea; |
552 | |
553 | if (!x86_pmu.pebs) |
554 | return; |
555 | |
556 | kfree(per_cpu(insn_buffer, cpu)); |
557 | per_cpu(insn_buffer, cpu) = NULL; |
558 | |
559 | /* Clear the fixmap */ |
560 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; |
561 | ds_clear_cea(cea, size: x86_pmu.pebs_buffer_size); |
562 | dsfree_pages(buffer: hwev->ds_pebs_vaddr, size: x86_pmu.pebs_buffer_size); |
563 | hwev->ds_pebs_vaddr = NULL; |
564 | } |
565 | |
566 | static int alloc_bts_buffer(int cpu) |
567 | { |
568 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); |
569 | struct debug_store *ds = hwev->ds; |
570 | void *buffer, *cea; |
571 | int max; |
572 | |
573 | if (!x86_pmu.bts) |
574 | return 0; |
575 | |
576 | buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu); |
577 | if (unlikely(!buffer)) { |
578 | WARN_ONCE(1, "%s: BTS buffer allocation failure\n" , __func__); |
579 | return -ENOMEM; |
580 | } |
581 | hwev->ds_bts_vaddr = buffer; |
582 | /* Update the fixmap */ |
583 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; |
584 | ds->bts_buffer_base = (unsigned long) cea; |
585 | ds_update_cea(cea, addr: buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); |
586 | ds->bts_index = ds->bts_buffer_base; |
587 | max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; |
588 | ds->bts_absolute_maximum = ds->bts_buffer_base + |
589 | max * BTS_RECORD_SIZE; |
590 | ds->bts_interrupt_threshold = ds->bts_absolute_maximum - |
591 | (max / 16) * BTS_RECORD_SIZE; |
592 | return 0; |
593 | } |
594 | |
595 | static void release_bts_buffer(int cpu) |
596 | { |
597 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); |
598 | void *cea; |
599 | |
600 | if (!x86_pmu.bts) |
601 | return; |
602 | |
603 | /* Clear the fixmap */ |
604 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; |
605 | ds_clear_cea(cea, BTS_BUFFER_SIZE); |
606 | dsfree_pages(buffer: hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); |
607 | hwev->ds_bts_vaddr = NULL; |
608 | } |
609 | |
610 | static int alloc_ds_buffer(int cpu) |
611 | { |
612 | struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; |
613 | |
614 | memset(ds, 0, sizeof(*ds)); |
615 | per_cpu(cpu_hw_events, cpu).ds = ds; |
616 | return 0; |
617 | } |
618 | |
619 | static void release_ds_buffer(int cpu) |
620 | { |
621 | per_cpu(cpu_hw_events, cpu).ds = NULL; |
622 | } |
623 | |
624 | void release_ds_buffers(void) |
625 | { |
626 | int cpu; |
627 | |
628 | if (!x86_pmu.bts && !x86_pmu.pebs) |
629 | return; |
630 | |
631 | for_each_possible_cpu(cpu) |
632 | release_ds_buffer(cpu); |
633 | |
634 | for_each_possible_cpu(cpu) { |
635 | /* |
636 | * Again, ignore errors from offline CPUs, they will no longer |
637 | * observe cpu_hw_events.ds and not program the DS_AREA when |
638 | * they come up. |
639 | */ |
640 | fini_debug_store_on_cpu(cpu); |
641 | } |
642 | |
643 | for_each_possible_cpu(cpu) { |
644 | release_pebs_buffer(cpu); |
645 | release_bts_buffer(cpu); |
646 | } |
647 | } |
648 | |
649 | void reserve_ds_buffers(void) |
650 | { |
651 | int bts_err = 0, pebs_err = 0; |
652 | int cpu; |
653 | |
654 | x86_pmu.bts_active = 0; |
655 | x86_pmu.pebs_active = 0; |
656 | |
657 | if (!x86_pmu.bts && !x86_pmu.pebs) |
658 | return; |
659 | |
660 | if (!x86_pmu.bts) |
661 | bts_err = 1; |
662 | |
663 | if (!x86_pmu.pebs) |
664 | pebs_err = 1; |
665 | |
666 | for_each_possible_cpu(cpu) { |
667 | if (alloc_ds_buffer(cpu)) { |
668 | bts_err = 1; |
669 | pebs_err = 1; |
670 | } |
671 | |
672 | if (!bts_err && alloc_bts_buffer(cpu)) |
673 | bts_err = 1; |
674 | |
675 | if (!pebs_err && alloc_pebs_buffer(cpu)) |
676 | pebs_err = 1; |
677 | |
678 | if (bts_err && pebs_err) |
679 | break; |
680 | } |
681 | |
682 | if (bts_err) { |
683 | for_each_possible_cpu(cpu) |
684 | release_bts_buffer(cpu); |
685 | } |
686 | |
687 | if (pebs_err) { |
688 | for_each_possible_cpu(cpu) |
689 | release_pebs_buffer(cpu); |
690 | } |
691 | |
692 | if (bts_err && pebs_err) { |
693 | for_each_possible_cpu(cpu) |
694 | release_ds_buffer(cpu); |
695 | } else { |
696 | if (x86_pmu.bts && !bts_err) |
697 | x86_pmu.bts_active = 1; |
698 | |
699 | if (x86_pmu.pebs && !pebs_err) |
700 | x86_pmu.pebs_active = 1; |
701 | |
702 | for_each_possible_cpu(cpu) { |
703 | /* |
704 | * Ignores wrmsr_on_cpu() errors for offline CPUs they |
705 | * will get this call through intel_pmu_cpu_starting(). |
706 | */ |
707 | init_debug_store_on_cpu(cpu); |
708 | } |
709 | } |
710 | } |
711 | |
712 | /* |
713 | * BTS |
714 | */ |
715 | |
716 | struct event_constraint bts_constraint = |
717 | EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0); |
718 | |
719 | void intel_pmu_enable_bts(u64 config) |
720 | { |
721 | unsigned long debugctlmsr; |
722 | |
723 | debugctlmsr = get_debugctlmsr(); |
724 | |
725 | debugctlmsr |= DEBUGCTLMSR_TR; |
726 | debugctlmsr |= DEBUGCTLMSR_BTS; |
727 | if (config & ARCH_PERFMON_EVENTSEL_INT) |
728 | debugctlmsr |= DEBUGCTLMSR_BTINT; |
729 | |
730 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) |
731 | debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS; |
732 | |
733 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) |
734 | debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR; |
735 | |
736 | update_debugctlmsr(debugctlmsr); |
737 | } |
738 | |
739 | void intel_pmu_disable_bts(void) |
740 | { |
741 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
742 | unsigned long debugctlmsr; |
743 | |
744 | if (!cpuc->ds) |
745 | return; |
746 | |
747 | debugctlmsr = get_debugctlmsr(); |
748 | |
749 | debugctlmsr &= |
750 | ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT | |
751 | DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR); |
752 | |
753 | update_debugctlmsr(debugctlmsr); |
754 | } |
755 | |
756 | int intel_pmu_drain_bts_buffer(void) |
757 | { |
758 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
759 | struct debug_store *ds = cpuc->ds; |
760 | struct bts_record { |
761 | u64 from; |
762 | u64 to; |
763 | u64 flags; |
764 | }; |
765 | struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; |
766 | struct bts_record *at, *base, *top; |
767 | struct perf_output_handle handle; |
768 | struct perf_event_header ; |
769 | struct perf_sample_data data; |
770 | unsigned long skip = 0; |
771 | struct pt_regs regs; |
772 | |
773 | if (!event) |
774 | return 0; |
775 | |
776 | if (!x86_pmu.bts_active) |
777 | return 0; |
778 | |
779 | base = (struct bts_record *)(unsigned long)ds->bts_buffer_base; |
780 | top = (struct bts_record *)(unsigned long)ds->bts_index; |
781 | |
782 | if (top <= base) |
783 | return 0; |
784 | |
785 | memset(®s, 0, sizeof(regs)); |
786 | |
787 | ds->bts_index = ds->bts_buffer_base; |
788 | |
789 | perf_sample_data_init(data: &data, addr: 0, period: event->hw.last_period); |
790 | |
791 | /* |
792 | * BTS leaks kernel addresses in branches across the cpl boundary, |
793 | * such as traps or system calls, so unless the user is asking for |
794 | * kernel tracing (and right now it's not possible), we'd need to |
795 | * filter them out. But first we need to count how many of those we |
796 | * have in the current batch. This is an extra O(n) pass, however, |
797 | * it's much faster than the other one especially considering that |
798 | * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the |
799 | * alloc_bts_buffer()). |
800 | */ |
801 | for (at = base; at < top; at++) { |
802 | /* |
803 | * Note that right now *this* BTS code only works if |
804 | * attr::exclude_kernel is set, but let's keep this extra |
805 | * check here in case that changes. |
806 | */ |
807 | if (event->attr.exclude_kernel && |
808 | (kernel_ip(ip: at->from) || kernel_ip(ip: at->to))) |
809 | skip++; |
810 | } |
811 | |
812 | /* |
813 | * Prepare a generic sample, i.e. fill in the invariant fields. |
814 | * We will overwrite the from and to address before we output |
815 | * the sample. |
816 | */ |
817 | rcu_read_lock(); |
818 | perf_prepare_sample(data: &data, event, regs: ®s); |
819 | perf_prepare_header(header: &header, data: &data, event, regs: ®s); |
820 | |
821 | if (perf_output_begin(handle: &handle, data: &data, event, |
822 | size: header.size * (top - base - skip))) |
823 | goto unlock; |
824 | |
825 | for (at = base; at < top; at++) { |
826 | /* Filter out any records that contain kernel addresses. */ |
827 | if (event->attr.exclude_kernel && |
828 | (kernel_ip(ip: at->from) || kernel_ip(ip: at->to))) |
829 | continue; |
830 | |
831 | data.ip = at->from; |
832 | data.addr = at->to; |
833 | |
834 | perf_output_sample(handle: &handle, header: &header, data: &data, event); |
835 | } |
836 | |
837 | perf_output_end(handle: &handle); |
838 | |
839 | /* There's new data available. */ |
840 | event->hw.interrupts++; |
841 | event->pending_kill = POLL_IN; |
842 | unlock: |
843 | rcu_read_unlock(); |
844 | return 1; |
845 | } |
846 | |
847 | static inline void intel_pmu_drain_pebs_buffer(void) |
848 | { |
849 | struct perf_sample_data data; |
850 | |
851 | x86_pmu.drain_pebs(NULL, &data); |
852 | } |
853 | |
854 | /* |
855 | * PEBS |
856 | */ |
857 | struct event_constraint intel_core2_pebs_event_constraints[] = { |
858 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
859 | INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ |
860 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ |
861 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ |
862 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
863 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
864 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), |
865 | EVENT_CONSTRAINT_END |
866 | }; |
867 | |
868 | struct event_constraint intel_atom_pebs_event_constraints[] = { |
869 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
870 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ |
871 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
872 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
873 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), |
874 | /* Allow all events as PEBS with no flags */ |
875 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), |
876 | EVENT_CONSTRAINT_END |
877 | }; |
878 | |
879 | struct event_constraint intel_slm_pebs_event_constraints[] = { |
880 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
881 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1), |
882 | /* Allow all events as PEBS with no flags */ |
883 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), |
884 | EVENT_CONSTRAINT_END |
885 | }; |
886 | |
887 | struct event_constraint intel_glm_pebs_event_constraints[] = { |
888 | /* Allow all events as PEBS with no flags */ |
889 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), |
890 | EVENT_CONSTRAINT_END |
891 | }; |
892 | |
893 | struct event_constraint intel_grt_pebs_event_constraints[] = { |
894 | /* Allow all events as PEBS with no flags */ |
895 | INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3), |
896 | INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf), |
897 | EVENT_CONSTRAINT_END |
898 | }; |
899 | |
900 | struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
901 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ |
902 | INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
903 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
904 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ |
905 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
906 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
907 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ |
908 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ |
909 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
910 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ |
911 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ |
912 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
913 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), |
914 | EVENT_CONSTRAINT_END |
915 | }; |
916 | |
917 | struct event_constraint intel_westmere_pebs_event_constraints[] = { |
918 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ |
919 | INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
920 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
921 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ |
922 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
923 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
924 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ |
925 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ |
926 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
927 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ |
928 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ |
929 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
930 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), |
931 | EVENT_CONSTRAINT_END |
932 | }; |
933 | |
934 | struct event_constraint intel_snb_pebs_event_constraints[] = { |
935 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
936 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
937 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
938 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
939 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
940 | INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ |
941 | INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
942 | INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ |
943 | INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ |
944 | /* Allow all events as PEBS with no flags */ |
945 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
946 | EVENT_CONSTRAINT_END |
947 | }; |
948 | |
949 | struct event_constraint intel_ivb_pebs_event_constraints[] = { |
950 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
951 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
952 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
953 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
954 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
955 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
956 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
957 | INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ |
958 | INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
959 | INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ |
960 | INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ |
961 | /* Allow all events as PEBS with no flags */ |
962 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
963 | EVENT_CONSTRAINT_END |
964 | }; |
965 | |
966 | struct event_constraint intel_hsw_pebs_event_constraints[] = { |
967 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
968 | INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ |
969 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
970 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
971 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
972 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
973 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ |
974 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ |
975 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ |
976 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ |
977 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ |
978 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ |
979 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ |
980 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ |
981 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
982 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ |
983 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ |
984 | /* Allow all events as PEBS with no flags */ |
985 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
986 | EVENT_CONSTRAINT_END |
987 | }; |
988 | |
989 | struct event_constraint intel_bdw_pebs_event_constraints[] = { |
990 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
991 | INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ |
992 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
993 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
994 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
995 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
996 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ |
997 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ |
998 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ |
999 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ |
1000 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ |
1001 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ |
1002 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ |
1003 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ |
1004 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
1005 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ |
1006 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ |
1007 | /* Allow all events as PEBS with no flags */ |
1008 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
1009 | EVENT_CONSTRAINT_END |
1010 | }; |
1011 | |
1012 | |
1013 | struct event_constraint intel_skl_pebs_event_constraints[] = { |
1014 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
1015 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
1016 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
1017 | /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */ |
1018 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), |
1019 | INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ |
1020 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ |
1021 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ |
1022 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ |
1023 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */ |
1024 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ |
1025 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ |
1026 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ |
1027 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ |
1028 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ |
1029 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ |
1030 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */ |
1031 | /* Allow all events as PEBS with no flags */ |
1032 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), |
1033 | EVENT_CONSTRAINT_END |
1034 | }; |
1035 | |
1036 | struct event_constraint intel_icl_pebs_event_constraints[] = { |
1037 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x100000000ULL), /* old INST_RETIRED.PREC_DIST */ |
1038 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x0100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ |
1039 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ |
1040 | |
1041 | INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
1042 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ |
1043 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ |
1044 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ |
1045 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ |
1046 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ |
1047 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ |
1048 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ |
1049 | |
1050 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ |
1051 | |
1052 | INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ |
1053 | |
1054 | /* |
1055 | * Everything else is handled by PMU_FL_PEBS_ALL, because we |
1056 | * need the full constraints from the main table. |
1057 | */ |
1058 | |
1059 | EVENT_CONSTRAINT_END |
1060 | }; |
1061 | |
1062 | struct event_constraint intel_glc_pebs_event_constraints[] = { |
1063 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ |
1064 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), |
1065 | |
1066 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe), |
1067 | INTEL_PLD_CONSTRAINT(0x1cd, 0xfe), |
1068 | INTEL_PSD_CONSTRAINT(0x2cd, 0x1), |
1069 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ |
1070 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ |
1071 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ |
1072 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ |
1073 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ |
1074 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ |
1075 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ |
1076 | |
1077 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), |
1078 | |
1079 | INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), |
1080 | |
1081 | /* |
1082 | * Everything else is handled by PMU_FL_PEBS_ALL, because we |
1083 | * need the full constraints from the main table. |
1084 | */ |
1085 | |
1086 | EVENT_CONSTRAINT_END |
1087 | }; |
1088 | |
1089 | struct event_constraint *intel_pebs_constraints(struct perf_event *event) |
1090 | { |
1091 | struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints); |
1092 | struct event_constraint *c; |
1093 | |
1094 | if (!event->attr.precise_ip) |
1095 | return NULL; |
1096 | |
1097 | if (pebs_constraints) { |
1098 | for_each_event_constraint(c, pebs_constraints) { |
1099 | if (constraint_match(c, ecode: event->hw.config)) { |
1100 | event->hw.flags |= c->flags; |
1101 | return c; |
1102 | } |
1103 | } |
1104 | } |
1105 | |
1106 | /* |
1107 | * Extended PEBS support |
1108 | * Makes the PEBS code search the normal constraints. |
1109 | */ |
1110 | if (x86_pmu.flags & PMU_FL_PEBS_ALL) |
1111 | return NULL; |
1112 | |
1113 | return &emptyconstraint; |
1114 | } |
1115 | |
1116 | /* |
1117 | * We need the sched_task callback even for per-cpu events when we use |
1118 | * the large interrupt threshold, such that we can provide PID and TID |
1119 | * to PEBS samples. |
1120 | */ |
1121 | static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) |
1122 | { |
1123 | if (cpuc->n_pebs == cpuc->n_pebs_via_pt) |
1124 | return false; |
1125 | |
1126 | return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); |
1127 | } |
1128 | |
1129 | void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) |
1130 | { |
1131 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1132 | |
1133 | if (!sched_in && pebs_needs_sched_cb(cpuc)) |
1134 | intel_pmu_drain_pebs_buffer(); |
1135 | } |
1136 | |
1137 | static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) |
1138 | { |
1139 | struct debug_store *ds = cpuc->ds; |
1140 | int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events); |
1141 | int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); |
1142 | u64 threshold; |
1143 | int reserved; |
1144 | |
1145 | if (cpuc->n_pebs_via_pt) |
1146 | return; |
1147 | |
1148 | if (x86_pmu.flags & PMU_FL_PEBS_ALL) |
1149 | reserved = max_pebs_events + num_counters_fixed; |
1150 | else |
1151 | reserved = max_pebs_events; |
1152 | |
1153 | if (cpuc->n_pebs == cpuc->n_large_pebs) { |
1154 | threshold = ds->pebs_absolute_maximum - |
1155 | reserved * cpuc->pebs_record_size; |
1156 | } else { |
1157 | threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; |
1158 | } |
1159 | |
1160 | ds->pebs_interrupt_threshold = threshold; |
1161 | } |
1162 | |
1163 | static void adaptive_pebs_record_size_update(void) |
1164 | { |
1165 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1166 | u64 pebs_data_cfg = cpuc->pebs_data_cfg; |
1167 | int sz = sizeof(struct pebs_basic); |
1168 | |
1169 | if (pebs_data_cfg & PEBS_DATACFG_MEMINFO) |
1170 | sz += sizeof(struct pebs_meminfo); |
1171 | if (pebs_data_cfg & PEBS_DATACFG_GP) |
1172 | sz += sizeof(struct pebs_gprs); |
1173 | if (pebs_data_cfg & PEBS_DATACFG_XMMS) |
1174 | sz += sizeof(struct pebs_xmm); |
1175 | if (pebs_data_cfg & PEBS_DATACFG_LBRS) |
1176 | sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry); |
1177 | |
1178 | cpuc->pebs_record_size = sz; |
1179 | } |
1180 | |
1181 | #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \ |
1182 | PERF_SAMPLE_PHYS_ADDR | \ |
1183 | PERF_SAMPLE_WEIGHT_TYPE | \ |
1184 | PERF_SAMPLE_TRANSACTION | \ |
1185 | PERF_SAMPLE_DATA_PAGE_SIZE) |
1186 | |
1187 | static u64 pebs_update_adaptive_cfg(struct perf_event *event) |
1188 | { |
1189 | struct perf_event_attr *attr = &event->attr; |
1190 | u64 sample_type = attr->sample_type; |
1191 | u64 pebs_data_cfg = 0; |
1192 | bool gprs, tsx_weight; |
1193 | |
1194 | if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) && |
1195 | attr->precise_ip > 1) |
1196 | return pebs_data_cfg; |
1197 | |
1198 | if (sample_type & PERF_PEBS_MEMINFO_TYPE) |
1199 | pebs_data_cfg |= PEBS_DATACFG_MEMINFO; |
1200 | |
1201 | /* |
1202 | * We need GPRs when: |
1203 | * + user requested them |
1204 | * + precise_ip < 2 for the non event IP |
1205 | * + For RTM TSX weight we need GPRs for the abort code. |
1206 | */ |
1207 | gprs = (sample_type & PERF_SAMPLE_REGS_INTR) && |
1208 | (attr->sample_regs_intr & PEBS_GP_REGS); |
1209 | |
1210 | tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) && |
1211 | ((attr->config & INTEL_ARCH_EVENT_MASK) == |
1212 | x86_pmu.rtm_abort_event); |
1213 | |
1214 | if (gprs || (attr->precise_ip < 2) || tsx_weight) |
1215 | pebs_data_cfg |= PEBS_DATACFG_GP; |
1216 | |
1217 | if ((sample_type & PERF_SAMPLE_REGS_INTR) && |
1218 | (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) |
1219 | pebs_data_cfg |= PEBS_DATACFG_XMMS; |
1220 | |
1221 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { |
1222 | /* |
1223 | * For now always log all LBRs. Could configure this |
1224 | * later. |
1225 | */ |
1226 | pebs_data_cfg |= PEBS_DATACFG_LBRS | |
1227 | ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); |
1228 | } |
1229 | |
1230 | return pebs_data_cfg; |
1231 | } |
1232 | |
1233 | static void |
1234 | pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, |
1235 | struct perf_event *event, bool add) |
1236 | { |
1237 | struct pmu *pmu = event->pmu; |
1238 | |
1239 | /* |
1240 | * Make sure we get updated with the first PEBS event. |
1241 | * During removal, ->pebs_data_cfg is still valid for |
1242 | * the last PEBS event. Don't clear it. |
1243 | */ |
1244 | if ((cpuc->n_pebs == 1) && add) |
1245 | cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW; |
1246 | |
1247 | if (needed_cb != pebs_needs_sched_cb(cpuc)) { |
1248 | if (!needed_cb) |
1249 | perf_sched_cb_inc(pmu); |
1250 | else |
1251 | perf_sched_cb_dec(pmu); |
1252 | |
1253 | cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW; |
1254 | } |
1255 | |
1256 | /* |
1257 | * The PEBS record doesn't shrink on pmu::del(). Doing so would require |
1258 | * iterating all remaining PEBS events to reconstruct the config. |
1259 | */ |
1260 | if (x86_pmu.intel_cap.pebs_baseline && add) { |
1261 | u64 pebs_data_cfg; |
1262 | |
1263 | pebs_data_cfg = pebs_update_adaptive_cfg(event); |
1264 | /* |
1265 | * Be sure to update the thresholds when we change the record. |
1266 | */ |
1267 | if (pebs_data_cfg & ~cpuc->pebs_data_cfg) |
1268 | cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; |
1269 | } |
1270 | } |
1271 | |
1272 | void intel_pmu_pebs_add(struct perf_event *event) |
1273 | { |
1274 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1275 | struct hw_perf_event *hwc = &event->hw; |
1276 | bool needed_cb = pebs_needs_sched_cb(cpuc); |
1277 | |
1278 | cpuc->n_pebs++; |
1279 | if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) |
1280 | cpuc->n_large_pebs++; |
1281 | if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) |
1282 | cpuc->n_pebs_via_pt++; |
1283 | |
1284 | pebs_update_state(needed_cb, cpuc, event, add: true); |
1285 | } |
1286 | |
1287 | static void intel_pmu_pebs_via_pt_disable(struct perf_event *event) |
1288 | { |
1289 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1290 | |
1291 | if (!is_pebs_pt(event)) |
1292 | return; |
1293 | |
1294 | if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK)) |
1295 | cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK; |
1296 | } |
1297 | |
1298 | static void intel_pmu_pebs_via_pt_enable(struct perf_event *event) |
1299 | { |
1300 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1301 | struct hw_perf_event *hwc = &event->hw; |
1302 | struct debug_store *ds = cpuc->ds; |
1303 | u64 value = ds->pebs_event_reset[hwc->idx]; |
1304 | u32 base = MSR_RELOAD_PMC0; |
1305 | unsigned int idx = hwc->idx; |
1306 | |
1307 | if (!is_pebs_pt(event)) |
1308 | return; |
1309 | |
1310 | if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) |
1311 | cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD; |
1312 | |
1313 | cpuc->pebs_enabled |= PEBS_OUTPUT_PT; |
1314 | |
1315 | if (hwc->idx >= INTEL_PMC_IDX_FIXED) { |
1316 | base = MSR_RELOAD_FIXED_CTR0; |
1317 | idx = hwc->idx - INTEL_PMC_IDX_FIXED; |
1318 | if (x86_pmu.intel_cap.pebs_format < 5) |
1319 | value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx]; |
1320 | else |
1321 | value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx]; |
1322 | } |
1323 | wrmsrl(msr: base + idx, val: value); |
1324 | } |
1325 | |
1326 | static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) |
1327 | { |
1328 | if (cpuc->n_pebs == cpuc->n_large_pebs && |
1329 | cpuc->n_pebs != cpuc->n_pebs_via_pt) |
1330 | intel_pmu_drain_pebs_buffer(); |
1331 | } |
1332 | |
1333 | void intel_pmu_pebs_enable(struct perf_event *event) |
1334 | { |
1335 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1336 | u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW; |
1337 | struct hw_perf_event *hwc = &event->hw; |
1338 | struct debug_store *ds = cpuc->ds; |
1339 | unsigned int idx = hwc->idx; |
1340 | |
1341 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
1342 | |
1343 | cpuc->pebs_enabled |= 1ULL << hwc->idx; |
1344 | |
1345 | if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) |
1346 | cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); |
1347 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) |
1348 | cpuc->pebs_enabled |= 1ULL << 63; |
1349 | |
1350 | if (x86_pmu.intel_cap.pebs_baseline) { |
1351 | hwc->config |= ICL_EVENTSEL_ADAPTIVE; |
1352 | if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { |
1353 | /* |
1354 | * drain_pebs() assumes uniform record size; |
1355 | * hence we need to drain when changing said |
1356 | * size. |
1357 | */ |
1358 | intel_pmu_drain_large_pebs(cpuc); |
1359 | adaptive_pebs_record_size_update(); |
1360 | wrmsrl(MSR_PEBS_DATA_CFG, val: pebs_data_cfg); |
1361 | cpuc->active_pebs_data_cfg = pebs_data_cfg; |
1362 | } |
1363 | } |
1364 | if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { |
1365 | cpuc->pebs_data_cfg = pebs_data_cfg; |
1366 | pebs_update_threshold(cpuc); |
1367 | } |
1368 | |
1369 | if (idx >= INTEL_PMC_IDX_FIXED) { |
1370 | if (x86_pmu.intel_cap.pebs_format < 5) |
1371 | idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED); |
1372 | else |
1373 | idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED); |
1374 | } |
1375 | |
1376 | /* |
1377 | * Use auto-reload if possible to save a MSR write in the PMI. |
1378 | * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. |
1379 | */ |
1380 | if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { |
1381 | ds->pebs_event_reset[idx] = |
1382 | (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; |
1383 | } else { |
1384 | ds->pebs_event_reset[idx] = 0; |
1385 | } |
1386 | |
1387 | intel_pmu_pebs_via_pt_enable(event); |
1388 | } |
1389 | |
1390 | void intel_pmu_pebs_del(struct perf_event *event) |
1391 | { |
1392 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1393 | struct hw_perf_event *hwc = &event->hw; |
1394 | bool needed_cb = pebs_needs_sched_cb(cpuc); |
1395 | |
1396 | cpuc->n_pebs--; |
1397 | if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) |
1398 | cpuc->n_large_pebs--; |
1399 | if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) |
1400 | cpuc->n_pebs_via_pt--; |
1401 | |
1402 | pebs_update_state(needed_cb, cpuc, event, add: false); |
1403 | } |
1404 | |
1405 | void intel_pmu_pebs_disable(struct perf_event *event) |
1406 | { |
1407 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1408 | struct hw_perf_event *hwc = &event->hw; |
1409 | |
1410 | intel_pmu_drain_large_pebs(cpuc); |
1411 | |
1412 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); |
1413 | |
1414 | if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && |
1415 | (x86_pmu.version < 5)) |
1416 | cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); |
1417 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) |
1418 | cpuc->pebs_enabled &= ~(1ULL << 63); |
1419 | |
1420 | intel_pmu_pebs_via_pt_disable(event); |
1421 | |
1422 | if (cpuc->enabled) |
1423 | wrmsrl(MSR_IA32_PEBS_ENABLE, val: cpuc->pebs_enabled); |
1424 | |
1425 | hwc->config |= ARCH_PERFMON_EVENTSEL_INT; |
1426 | } |
1427 | |
1428 | void intel_pmu_pebs_enable_all(void) |
1429 | { |
1430 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1431 | |
1432 | if (cpuc->pebs_enabled) |
1433 | wrmsrl(MSR_IA32_PEBS_ENABLE, val: cpuc->pebs_enabled); |
1434 | } |
1435 | |
1436 | void intel_pmu_pebs_disable_all(void) |
1437 | { |
1438 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1439 | |
1440 | if (cpuc->pebs_enabled) |
1441 | __intel_pmu_pebs_disable_all(); |
1442 | } |
1443 | |
1444 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) |
1445 | { |
1446 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1447 | unsigned long from = cpuc->lbr_entries[0].from; |
1448 | unsigned long old_to, to = cpuc->lbr_entries[0].to; |
1449 | unsigned long ip = regs->ip; |
1450 | int is_64bit = 0; |
1451 | void *kaddr; |
1452 | int size; |
1453 | |
1454 | /* |
1455 | * We don't need to fixup if the PEBS assist is fault like |
1456 | */ |
1457 | if (!x86_pmu.intel_cap.pebs_trap) |
1458 | return 1; |
1459 | |
1460 | /* |
1461 | * No LBR entry, no basic block, no rewinding |
1462 | */ |
1463 | if (!cpuc->lbr_stack.nr || !from || !to) |
1464 | return 0; |
1465 | |
1466 | /* |
1467 | * Basic blocks should never cross user/kernel boundaries |
1468 | */ |
1469 | if (kernel_ip(ip) != kernel_ip(ip: to)) |
1470 | return 0; |
1471 | |
1472 | /* |
1473 | * unsigned math, either ip is before the start (impossible) or |
1474 | * the basic block is larger than 1 page (sanity) |
1475 | */ |
1476 | if ((ip - to) > PEBS_FIXUP_SIZE) |
1477 | return 0; |
1478 | |
1479 | /* |
1480 | * We sampled a branch insn, rewind using the LBR stack |
1481 | */ |
1482 | if (ip == to) { |
1483 | set_linear_ip(regs, ip: from); |
1484 | return 1; |
1485 | } |
1486 | |
1487 | size = ip - to; |
1488 | if (!kernel_ip(ip)) { |
1489 | int bytes; |
1490 | u8 *buf = this_cpu_read(insn_buffer); |
1491 | |
1492 | /* 'size' must fit our buffer, see above */ |
1493 | bytes = copy_from_user_nmi(to: buf, from: (void __user *)to, n: size); |
1494 | if (bytes != 0) |
1495 | return 0; |
1496 | |
1497 | kaddr = buf; |
1498 | } else { |
1499 | kaddr = (void *)to; |
1500 | } |
1501 | |
1502 | do { |
1503 | struct insn insn; |
1504 | |
1505 | old_to = to; |
1506 | |
1507 | #ifdef CONFIG_X86_64 |
1508 | is_64bit = kernel_ip(ip: to) || any_64bit_mode(regs); |
1509 | #endif |
1510 | insn_init(insn: &insn, kaddr, buf_len: size, x86_64: is_64bit); |
1511 | |
1512 | /* |
1513 | * Make sure there was not a problem decoding the instruction. |
1514 | * This is doubly important because we have an infinite loop if |
1515 | * insn.length=0. |
1516 | */ |
1517 | if (insn_get_length(insn: &insn)) |
1518 | break; |
1519 | |
1520 | to += insn.length; |
1521 | kaddr += insn.length; |
1522 | size -= insn.length; |
1523 | } while (to < ip); |
1524 | |
1525 | if (to == ip) { |
1526 | set_linear_ip(regs, ip: old_to); |
1527 | return 1; |
1528 | } |
1529 | |
1530 | /* |
1531 | * Even though we decoded the basic block, the instruction stream |
1532 | * never matched the given IP, either the TO or the IP got corrupted. |
1533 | */ |
1534 | return 0; |
1535 | } |
1536 | |
1537 | static inline u64 intel_get_tsx_weight(u64 tsx_tuning) |
1538 | { |
1539 | if (tsx_tuning) { |
1540 | union hsw_tsx_tuning tsx = { .value = tsx_tuning }; |
1541 | return tsx.cycles_last_block; |
1542 | } |
1543 | return 0; |
1544 | } |
1545 | |
1546 | static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax) |
1547 | { |
1548 | u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; |
1549 | |
1550 | /* For RTM XABORTs also log the abort code from AX */ |
1551 | if ((txn & PERF_TXN_TRANSACTION) && (ax & 1)) |
1552 | txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; |
1553 | return txn; |
1554 | } |
1555 | |
1556 | static inline u64 get_pebs_status(void *n) |
1557 | { |
1558 | if (x86_pmu.intel_cap.pebs_format < 4) |
1559 | return ((struct pebs_record_nhm *)n)->status; |
1560 | return ((struct pebs_basic *)n)->applicable_counters; |
1561 | } |
1562 | |
1563 | #define PERF_X86_EVENT_PEBS_HSW_PREC \ |
1564 | (PERF_X86_EVENT_PEBS_ST_HSW | \ |
1565 | PERF_X86_EVENT_PEBS_LD_HSW | \ |
1566 | PERF_X86_EVENT_PEBS_NA_HSW) |
1567 | |
1568 | static u64 get_data_src(struct perf_event *event, u64 aux) |
1569 | { |
1570 | u64 val = PERF_MEM_NA; |
1571 | int fl = event->hw.flags; |
1572 | bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); |
1573 | |
1574 | if (fl & PERF_X86_EVENT_PEBS_LDLAT) |
1575 | val = load_latency_data(event, status: aux); |
1576 | else if (fl & PERF_X86_EVENT_PEBS_STLAT) |
1577 | val = store_latency_data(event, status: aux); |
1578 | else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID) |
1579 | val = x86_pmu.pebs_latency_data(event, aux); |
1580 | else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) |
1581 | val = precise_datala_hsw(event, status: aux); |
1582 | else if (fst) |
1583 | val = precise_store_data(status: aux); |
1584 | return val; |
1585 | } |
1586 | |
1587 | static void setup_pebs_time(struct perf_event *event, |
1588 | struct perf_sample_data *data, |
1589 | u64 tsc) |
1590 | { |
1591 | /* Converting to a user-defined clock is not supported yet. */ |
1592 | if (event->attr.use_clockid != 0) |
1593 | return; |
1594 | |
1595 | /* |
1596 | * Doesn't support the conversion when the TSC is unstable. |
1597 | * The TSC unstable case is a corner case and very unlikely to |
1598 | * happen. If it happens, the TSC in a PEBS record will be |
1599 | * dropped and fall back to perf_event_clock(). |
1600 | */ |
1601 | if (!using_native_sched_clock() || !sched_clock_stable()) |
1602 | return; |
1603 | |
1604 | data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset; |
1605 | data->sample_flags |= PERF_SAMPLE_TIME; |
1606 | } |
1607 | |
1608 | #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ |
1609 | PERF_SAMPLE_PHYS_ADDR | \ |
1610 | PERF_SAMPLE_DATA_PAGE_SIZE) |
1611 | |
1612 | static void setup_pebs_fixed_sample_data(struct perf_event *event, |
1613 | struct pt_regs *iregs, void *__pebs, |
1614 | struct perf_sample_data *data, |
1615 | struct pt_regs *regs) |
1616 | { |
1617 | /* |
1618 | * We cast to the biggest pebs_record but are careful not to |
1619 | * unconditionally access the 'extra' entries. |
1620 | */ |
1621 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1622 | struct pebs_record_skl *pebs = __pebs; |
1623 | u64 sample_type; |
1624 | int fll; |
1625 | |
1626 | if (pebs == NULL) |
1627 | return; |
1628 | |
1629 | sample_type = event->attr.sample_type; |
1630 | fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; |
1631 | |
1632 | perf_sample_data_init(data, addr: 0, period: event->hw.last_period); |
1633 | |
1634 | data->period = event->hw.last_period; |
1635 | |
1636 | /* |
1637 | * Use latency for weight (only avail with PEBS-LL) |
1638 | */ |
1639 | if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE)) { |
1640 | data->weight.full = pebs->lat; |
1641 | data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; |
1642 | } |
1643 | |
1644 | /* |
1645 | * data.data_src encodes the data source |
1646 | */ |
1647 | if (sample_type & PERF_SAMPLE_DATA_SRC) { |
1648 | data->data_src.val = get_data_src(event, aux: pebs->dse); |
1649 | data->sample_flags |= PERF_SAMPLE_DATA_SRC; |
1650 | } |
1651 | |
1652 | /* |
1653 | * We must however always use iregs for the unwinder to stay sane; the |
1654 | * record BP,SP,IP can point into thin air when the record is from a |
1655 | * previous PMI context or an (I)RET happened between the record and |
1656 | * PMI. |
1657 | */ |
1658 | if (sample_type & PERF_SAMPLE_CALLCHAIN) |
1659 | perf_sample_save_callchain(data, event, regs: iregs); |
1660 | |
1661 | /* |
1662 | * We use the interrupt regs as a base because the PEBS record does not |
1663 | * contain a full regs set, specifically it seems to lack segment |
1664 | * descriptors, which get used by things like user_mode(). |
1665 | * |
1666 | * In the simple case fix up only the IP for PERF_SAMPLE_IP. |
1667 | */ |
1668 | *regs = *iregs; |
1669 | |
1670 | /* |
1671 | * Initialize regs_>flags from PEBS, |
1672 | * Clear exact bit (which uses x86 EFLAGS Reserved bit 3), |
1673 | * i.e., do not rely on it being zero: |
1674 | */ |
1675 | regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT; |
1676 | |
1677 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
1678 | regs->ax = pebs->ax; |
1679 | regs->bx = pebs->bx; |
1680 | regs->cx = pebs->cx; |
1681 | regs->dx = pebs->dx; |
1682 | regs->si = pebs->si; |
1683 | regs->di = pebs->di; |
1684 | |
1685 | regs->bp = pebs->bp; |
1686 | regs->sp = pebs->sp; |
1687 | |
1688 | #ifndef CONFIG_X86_32 |
1689 | regs->r8 = pebs->r8; |
1690 | regs->r9 = pebs->r9; |
1691 | regs->r10 = pebs->r10; |
1692 | regs->r11 = pebs->r11; |
1693 | regs->r12 = pebs->r12; |
1694 | regs->r13 = pebs->r13; |
1695 | regs->r14 = pebs->r14; |
1696 | regs->r15 = pebs->r15; |
1697 | #endif |
1698 | } |
1699 | |
1700 | if (event->attr.precise_ip > 1) { |
1701 | /* |
1702 | * Haswell and later processors have an 'eventing IP' |
1703 | * (real IP) which fixes the off-by-1 skid in hardware. |
1704 | * Use it when precise_ip >= 2 : |
1705 | */ |
1706 | if (x86_pmu.intel_cap.pebs_format >= 2) { |
1707 | set_linear_ip(regs, ip: pebs->real_ip); |
1708 | regs->flags |= PERF_EFLAGS_EXACT; |
1709 | } else { |
1710 | /* Otherwise, use PEBS off-by-1 IP: */ |
1711 | set_linear_ip(regs, ip: pebs->ip); |
1712 | |
1713 | /* |
1714 | * With precise_ip >= 2, try to fix up the off-by-1 IP |
1715 | * using the LBR. If successful, the fixup function |
1716 | * corrects regs->ip and calls set_linear_ip() on regs: |
1717 | */ |
1718 | if (intel_pmu_pebs_fixup_ip(regs)) |
1719 | regs->flags |= PERF_EFLAGS_EXACT; |
1720 | } |
1721 | } else { |
1722 | /* |
1723 | * When precise_ip == 1, return the PEBS off-by-1 IP, |
1724 | * no fixup attempted: |
1725 | */ |
1726 | set_linear_ip(regs, ip: pebs->ip); |
1727 | } |
1728 | |
1729 | |
1730 | if ((sample_type & PERF_SAMPLE_ADDR_TYPE) && |
1731 | x86_pmu.intel_cap.pebs_format >= 1) { |
1732 | data->addr = pebs->dla; |
1733 | data->sample_flags |= PERF_SAMPLE_ADDR; |
1734 | } |
1735 | |
1736 | if (x86_pmu.intel_cap.pebs_format >= 2) { |
1737 | /* Only set the TSX weight when no memory weight. */ |
1738 | if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll) { |
1739 | data->weight.full = intel_get_tsx_weight(tsx_tuning: pebs->tsx_tuning); |
1740 | data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; |
1741 | } |
1742 | if (sample_type & PERF_SAMPLE_TRANSACTION) { |
1743 | data->txn = intel_get_tsx_transaction(tsx_tuning: pebs->tsx_tuning, |
1744 | ax: pebs->ax); |
1745 | data->sample_flags |= PERF_SAMPLE_TRANSACTION; |
1746 | } |
1747 | } |
1748 | |
1749 | /* |
1750 | * v3 supplies an accurate time stamp, so we use that |
1751 | * for the time stamp. |
1752 | * |
1753 | * We can only do this for the default trace clock. |
1754 | */ |
1755 | if (x86_pmu.intel_cap.pebs_format >= 3) |
1756 | setup_pebs_time(event, data, tsc: pebs->tsc); |
1757 | |
1758 | if (has_branch_stack(event)) |
1759 | perf_sample_save_brstack(data, event, brs: &cpuc->lbr_stack, NULL); |
1760 | } |
1761 | |
1762 | static void adaptive_pebs_save_regs(struct pt_regs *regs, |
1763 | struct pebs_gprs *gprs) |
1764 | { |
1765 | regs->ax = gprs->ax; |
1766 | regs->bx = gprs->bx; |
1767 | regs->cx = gprs->cx; |
1768 | regs->dx = gprs->dx; |
1769 | regs->si = gprs->si; |
1770 | regs->di = gprs->di; |
1771 | regs->bp = gprs->bp; |
1772 | regs->sp = gprs->sp; |
1773 | #ifndef CONFIG_X86_32 |
1774 | regs->r8 = gprs->r8; |
1775 | regs->r9 = gprs->r9; |
1776 | regs->r10 = gprs->r10; |
1777 | regs->r11 = gprs->r11; |
1778 | regs->r12 = gprs->r12; |
1779 | regs->r13 = gprs->r13; |
1780 | regs->r14 = gprs->r14; |
1781 | regs->r15 = gprs->r15; |
1782 | #endif |
1783 | } |
1784 | |
1785 | #define PEBS_LATENCY_MASK 0xffff |
1786 | #define PEBS_CACHE_LATENCY_OFFSET 32 |
1787 | #define PEBS_RETIRE_LATENCY_OFFSET 32 |
1788 | |
1789 | /* |
1790 | * With adaptive PEBS the layout depends on what fields are configured. |
1791 | */ |
1792 | |
1793 | static void setup_pebs_adaptive_sample_data(struct perf_event *event, |
1794 | struct pt_regs *iregs, void *__pebs, |
1795 | struct perf_sample_data *data, |
1796 | struct pt_regs *regs) |
1797 | { |
1798 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1799 | struct pebs_basic *basic = __pebs; |
1800 | void *next_record = basic + 1; |
1801 | u64 sample_type; |
1802 | u64 format_size; |
1803 | struct pebs_meminfo *meminfo = NULL; |
1804 | struct pebs_gprs *gprs = NULL; |
1805 | struct x86_perf_regs *perf_regs; |
1806 | |
1807 | if (basic == NULL) |
1808 | return; |
1809 | |
1810 | perf_regs = container_of(regs, struct x86_perf_regs, regs); |
1811 | perf_regs->xmm_regs = NULL; |
1812 | |
1813 | sample_type = event->attr.sample_type; |
1814 | format_size = basic->format_size; |
1815 | perf_sample_data_init(data, addr: 0, period: event->hw.last_period); |
1816 | data->period = event->hw.last_period; |
1817 | |
1818 | setup_pebs_time(event, data, tsc: basic->tsc); |
1819 | |
1820 | /* |
1821 | * We must however always use iregs for the unwinder to stay sane; the |
1822 | * record BP,SP,IP can point into thin air when the record is from a |
1823 | * previous PMI context or an (I)RET happened between the record and |
1824 | * PMI. |
1825 | */ |
1826 | if (sample_type & PERF_SAMPLE_CALLCHAIN) |
1827 | perf_sample_save_callchain(data, event, regs: iregs); |
1828 | |
1829 | *regs = *iregs; |
1830 | /* The ip in basic is EventingIP */ |
1831 | set_linear_ip(regs, ip: basic->ip); |
1832 | regs->flags = PERF_EFLAGS_EXACT; |
1833 | |
1834 | if ((sample_type & PERF_SAMPLE_WEIGHT_STRUCT) && (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)) |
1835 | data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK; |
1836 | |
1837 | /* |
1838 | * The record for MEMINFO is in front of GP |
1839 | * But PERF_SAMPLE_TRANSACTION needs gprs->ax. |
1840 | * Save the pointer here but process later. |
1841 | */ |
1842 | if (format_size & PEBS_DATACFG_MEMINFO) { |
1843 | meminfo = next_record; |
1844 | next_record = meminfo + 1; |
1845 | } |
1846 | |
1847 | if (format_size & PEBS_DATACFG_GP) { |
1848 | gprs = next_record; |
1849 | next_record = gprs + 1; |
1850 | |
1851 | if (event->attr.precise_ip < 2) { |
1852 | set_linear_ip(regs, ip: gprs->ip); |
1853 | regs->flags &= ~PERF_EFLAGS_EXACT; |
1854 | } |
1855 | |
1856 | if (sample_type & PERF_SAMPLE_REGS_INTR) |
1857 | adaptive_pebs_save_regs(regs, gprs); |
1858 | } |
1859 | |
1860 | if (format_size & PEBS_DATACFG_MEMINFO) { |
1861 | if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { |
1862 | u64 weight = meminfo->latency; |
1863 | |
1864 | if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) { |
1865 | data->weight.var2_w = weight & PEBS_LATENCY_MASK; |
1866 | weight >>= PEBS_CACHE_LATENCY_OFFSET; |
1867 | } |
1868 | |
1869 | /* |
1870 | * Although meminfo::latency is defined as a u64, |
1871 | * only the lower 32 bits include the valid data |
1872 | * in practice on Ice Lake and earlier platforms. |
1873 | */ |
1874 | if (sample_type & PERF_SAMPLE_WEIGHT) { |
1875 | data->weight.full = weight ?: |
1876 | intel_get_tsx_weight(tsx_tuning: meminfo->tsx_tuning); |
1877 | } else { |
1878 | data->weight.var1_dw = (u32)(weight & PEBS_LATENCY_MASK) ?: |
1879 | intel_get_tsx_weight(tsx_tuning: meminfo->tsx_tuning); |
1880 | } |
1881 | data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; |
1882 | } |
1883 | |
1884 | if (sample_type & PERF_SAMPLE_DATA_SRC) { |
1885 | data->data_src.val = get_data_src(event, aux: meminfo->aux); |
1886 | data->sample_flags |= PERF_SAMPLE_DATA_SRC; |
1887 | } |
1888 | |
1889 | if (sample_type & PERF_SAMPLE_ADDR_TYPE) { |
1890 | data->addr = meminfo->address; |
1891 | data->sample_flags |= PERF_SAMPLE_ADDR; |
1892 | } |
1893 | |
1894 | if (sample_type & PERF_SAMPLE_TRANSACTION) { |
1895 | data->txn = intel_get_tsx_transaction(tsx_tuning: meminfo->tsx_tuning, |
1896 | ax: gprs ? gprs->ax : 0); |
1897 | data->sample_flags |= PERF_SAMPLE_TRANSACTION; |
1898 | } |
1899 | } |
1900 | |
1901 | if (format_size & PEBS_DATACFG_XMMS) { |
1902 | struct pebs_xmm *xmm = next_record; |
1903 | |
1904 | next_record = xmm + 1; |
1905 | perf_regs->xmm_regs = xmm->xmm; |
1906 | } |
1907 | |
1908 | if (format_size & PEBS_DATACFG_LBRS) { |
1909 | struct lbr_entry *lbr = next_record; |
1910 | int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT) |
1911 | & 0xff) + 1; |
1912 | next_record = next_record + num_lbr * sizeof(struct lbr_entry); |
1913 | |
1914 | if (has_branch_stack(event)) { |
1915 | intel_pmu_store_pebs_lbrs(lbr); |
1916 | intel_pmu_lbr_save_brstack(data, cpuc, event); |
1917 | } |
1918 | } |
1919 | |
1920 | WARN_ONCE(next_record != __pebs + (format_size >> 48), |
1921 | "PEBS record size %llu, expected %llu, config %llx\n" , |
1922 | format_size >> 48, |
1923 | (u64)(next_record - __pebs), |
1924 | basic->format_size); |
1925 | } |
1926 | |
1927 | static inline void * |
1928 | get_next_pebs_record_by_bit(void *base, void *top, int bit) |
1929 | { |
1930 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1931 | void *at; |
1932 | u64 pebs_status; |
1933 | |
1934 | /* |
1935 | * fmt0 does not have a status bitfield (does not use |
1936 | * perf_record_nhm format) |
1937 | */ |
1938 | if (x86_pmu.intel_cap.pebs_format < 1) |
1939 | return base; |
1940 | |
1941 | if (base == NULL) |
1942 | return NULL; |
1943 | |
1944 | for (at = base; at < top; at += cpuc->pebs_record_size) { |
1945 | unsigned long status = get_pebs_status(n: at); |
1946 | |
1947 | if (test_bit(bit, (unsigned long *)&status)) { |
1948 | /* PEBS v3 has accurate status bits */ |
1949 | if (x86_pmu.intel_cap.pebs_format >= 3) |
1950 | return at; |
1951 | |
1952 | if (status == (1 << bit)) |
1953 | return at; |
1954 | |
1955 | /* clear non-PEBS bit and re-check */ |
1956 | pebs_status = status & cpuc->pebs_enabled; |
1957 | pebs_status &= PEBS_COUNTER_MASK; |
1958 | if (pebs_status == (1 << bit)) |
1959 | return at; |
1960 | } |
1961 | } |
1962 | return NULL; |
1963 | } |
1964 | |
1965 | void intel_pmu_auto_reload_read(struct perf_event *event) |
1966 | { |
1967 | WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)); |
1968 | |
1969 | perf_pmu_disable(pmu: event->pmu); |
1970 | intel_pmu_drain_pebs_buffer(); |
1971 | perf_pmu_enable(pmu: event->pmu); |
1972 | } |
1973 | |
1974 | /* |
1975 | * Special variant of intel_pmu_save_and_restart() for auto-reload. |
1976 | */ |
1977 | static int |
1978 | intel_pmu_save_and_restart_reload(struct perf_event *event, int count) |
1979 | { |
1980 | struct hw_perf_event *hwc = &event->hw; |
1981 | int shift = 64 - x86_pmu.cntval_bits; |
1982 | u64 period = hwc->sample_period; |
1983 | u64 prev_raw_count, new_raw_count; |
1984 | s64 new, old; |
1985 | |
1986 | WARN_ON(!period); |
1987 | |
1988 | /* |
1989 | * drain_pebs() only happens when the PMU is disabled. |
1990 | */ |
1991 | WARN_ON(this_cpu_read(cpu_hw_events.enabled)); |
1992 | |
1993 | prev_raw_count = local64_read(&hwc->prev_count); |
1994 | rdpmcl(hwc->event_base_rdpmc, new_raw_count); |
1995 | local64_set(&hwc->prev_count, new_raw_count); |
1996 | |
1997 | /* |
1998 | * Since the counter increments a negative counter value and |
1999 | * overflows on the sign switch, giving the interval: |
2000 | * |
2001 | * [-period, 0] |
2002 | * |
2003 | * the difference between two consecutive reads is: |
2004 | * |
2005 | * A) value2 - value1; |
2006 | * when no overflows have happened in between, |
2007 | * |
2008 | * B) (0 - value1) + (value2 - (-period)); |
2009 | * when one overflow happened in between, |
2010 | * |
2011 | * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); |
2012 | * when @n overflows happened in between. |
2013 | * |
2014 | * Here A) is the obvious difference, B) is the extension to the |
2015 | * discrete interval, where the first term is to the top of the |
2016 | * interval and the second term is from the bottom of the next |
2017 | * interval and C) the extension to multiple intervals, where the |
2018 | * middle term is the whole intervals covered. |
2019 | * |
2020 | * An equivalent of C, by reduction, is: |
2021 | * |
2022 | * value2 - value1 + n * period |
2023 | */ |
2024 | new = ((s64)(new_raw_count << shift) >> shift); |
2025 | old = ((s64)(prev_raw_count << shift) >> shift); |
2026 | local64_add(new - old + count * period, &event->count); |
2027 | |
2028 | local64_set(&hwc->period_left, -new); |
2029 | |
2030 | perf_event_update_userpage(event); |
2031 | |
2032 | return 0; |
2033 | } |
2034 | |
2035 | static __always_inline void |
2036 | __intel_pmu_pebs_event(struct perf_event *event, |
2037 | struct pt_regs *iregs, |
2038 | struct perf_sample_data *data, |
2039 | void *base, void *top, |
2040 | int bit, int count, |
2041 | void (*setup_sample)(struct perf_event *, |
2042 | struct pt_regs *, |
2043 | void *, |
2044 | struct perf_sample_data *, |
2045 | struct pt_regs *)) |
2046 | { |
2047 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
2048 | struct hw_perf_event *hwc = &event->hw; |
2049 | struct x86_perf_regs perf_regs; |
2050 | struct pt_regs *regs = &perf_regs.regs; |
2051 | void *at = get_next_pebs_record_by_bit(base, top, bit); |
2052 | static struct pt_regs dummy_iregs; |
2053 | |
2054 | if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { |
2055 | /* |
2056 | * Now, auto-reload is only enabled in fixed period mode. |
2057 | * The reload value is always hwc->sample_period. |
2058 | * May need to change it, if auto-reload is enabled in |
2059 | * freq mode later. |
2060 | */ |
2061 | intel_pmu_save_and_restart_reload(event, count); |
2062 | } else if (!intel_pmu_save_and_restart(event)) |
2063 | return; |
2064 | |
2065 | if (!iregs) |
2066 | iregs = &dummy_iregs; |
2067 | |
2068 | while (count > 1) { |
2069 | setup_sample(event, iregs, at, data, regs); |
2070 | perf_event_output(event, data, regs); |
2071 | at += cpuc->pebs_record_size; |
2072 | at = get_next_pebs_record_by_bit(base: at, top, bit); |
2073 | count--; |
2074 | } |
2075 | |
2076 | setup_sample(event, iregs, at, data, regs); |
2077 | if (iregs == &dummy_iregs) { |
2078 | /* |
2079 | * The PEBS records may be drained in the non-overflow context, |
2080 | * e.g., large PEBS + context switch. Perf should treat the |
2081 | * last record the same as other PEBS records, and doesn't |
2082 | * invoke the generic overflow handler. |
2083 | */ |
2084 | perf_event_output(event, data, regs); |
2085 | } else { |
2086 | /* |
2087 | * All but the last records are processed. |
2088 | * The last one is left to be able to call the overflow handler. |
2089 | */ |
2090 | if (perf_event_overflow(event, data, regs)) |
2091 | x86_pmu_stop(event, flags: 0); |
2092 | } |
2093 | } |
2094 | |
2095 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data) |
2096 | { |
2097 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
2098 | struct debug_store *ds = cpuc->ds; |
2099 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ |
2100 | struct pebs_record_core *at, *top; |
2101 | int n; |
2102 | |
2103 | if (!x86_pmu.pebs_active) |
2104 | return; |
2105 | |
2106 | at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; |
2107 | top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; |
2108 | |
2109 | /* |
2110 | * Whatever else happens, drain the thing |
2111 | */ |
2112 | ds->pebs_index = ds->pebs_buffer_base; |
2113 | |
2114 | if (!test_bit(0, cpuc->active_mask)) |
2115 | return; |
2116 | |
2117 | WARN_ON_ONCE(!event); |
2118 | |
2119 | if (!event->attr.precise_ip) |
2120 | return; |
2121 | |
2122 | n = top - at; |
2123 | if (n <= 0) { |
2124 | if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) |
2125 | intel_pmu_save_and_restart_reload(event, count: 0); |
2126 | return; |
2127 | } |
2128 | |
2129 | __intel_pmu_pebs_event(event, iregs, data, base: at, top, bit: 0, count: n, |
2130 | setup_sample: setup_pebs_fixed_sample_data); |
2131 | } |
2132 | |
2133 | static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) |
2134 | { |
2135 | struct perf_event *event; |
2136 | int bit; |
2137 | |
2138 | /* |
2139 | * The drain_pebs() could be called twice in a short period |
2140 | * for auto-reload event in pmu::read(). There are no |
2141 | * overflows have happened in between. |
2142 | * It needs to call intel_pmu_save_and_restart_reload() to |
2143 | * update the event->count for this case. |
2144 | */ |
2145 | for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { |
2146 | event = cpuc->events[bit]; |
2147 | if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) |
2148 | intel_pmu_save_and_restart_reload(event, count: 0); |
2149 | } |
2150 | } |
2151 | |
2152 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data) |
2153 | { |
2154 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
2155 | struct debug_store *ds = cpuc->ds; |
2156 | struct perf_event *event; |
2157 | void *base, *at, *top; |
2158 | short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; |
2159 | short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; |
2160 | int bit, i, size; |
2161 | u64 mask; |
2162 | |
2163 | if (!x86_pmu.pebs_active) |
2164 | return; |
2165 | |
2166 | base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; |
2167 | top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; |
2168 | |
2169 | ds->pebs_index = ds->pebs_buffer_base; |
2170 | |
2171 | mask = (1ULL << x86_pmu.max_pebs_events) - 1; |
2172 | size = x86_pmu.max_pebs_events; |
2173 | if (x86_pmu.flags & PMU_FL_PEBS_ALL) { |
2174 | mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED; |
2175 | size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; |
2176 | } |
2177 | |
2178 | if (unlikely(base >= top)) { |
2179 | intel_pmu_pebs_event_update_no_drain(cpuc, size); |
2180 | return; |
2181 | } |
2182 | |
2183 | for (at = base; at < top; at += x86_pmu.pebs_record_size) { |
2184 | struct pebs_record_nhm *p = at; |
2185 | u64 pebs_status; |
2186 | |
2187 | pebs_status = p->status & cpuc->pebs_enabled; |
2188 | pebs_status &= mask; |
2189 | |
2190 | /* PEBS v3 has more accurate status bits */ |
2191 | if (x86_pmu.intel_cap.pebs_format >= 3) { |
2192 | for_each_set_bit(bit, (unsigned long *)&pebs_status, size) |
2193 | counts[bit]++; |
2194 | |
2195 | continue; |
2196 | } |
2197 | |
2198 | /* |
2199 | * On some CPUs the PEBS status can be zero when PEBS is |
2200 | * racing with clearing of GLOBAL_STATUS. |
2201 | * |
2202 | * Normally we would drop that record, but in the |
2203 | * case when there is only a single active PEBS event |
2204 | * we can assume it's for that event. |
2205 | */ |
2206 | if (!pebs_status && cpuc->pebs_enabled && |
2207 | !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) |
2208 | pebs_status = p->status = cpuc->pebs_enabled; |
2209 | |
2210 | bit = find_first_bit(addr: (unsigned long *)&pebs_status, |
2211 | size: x86_pmu.max_pebs_events); |
2212 | if (bit >= x86_pmu.max_pebs_events) |
2213 | continue; |
2214 | |
2215 | /* |
2216 | * The PEBS hardware does not deal well with the situation |
2217 | * when events happen near to each other and multiple bits |
2218 | * are set. But it should happen rarely. |
2219 | * |
2220 | * If these events include one PEBS and multiple non-PEBS |
2221 | * events, it doesn't impact PEBS record. The record will |
2222 | * be handled normally. (slow path) |
2223 | * |
2224 | * If these events include two or more PEBS events, the |
2225 | * records for the events can be collapsed into a single |
2226 | * one, and it's not possible to reconstruct all events |
2227 | * that caused the PEBS record. It's called collision. |
2228 | * If collision happened, the record will be dropped. |
2229 | */ |
2230 | if (pebs_status != (1ULL << bit)) { |
2231 | for_each_set_bit(i, (unsigned long *)&pebs_status, size) |
2232 | error[i]++; |
2233 | continue; |
2234 | } |
2235 | |
2236 | counts[bit]++; |
2237 | } |
2238 | |
2239 | for_each_set_bit(bit, (unsigned long *)&mask, size) { |
2240 | if ((counts[bit] == 0) && (error[bit] == 0)) |
2241 | continue; |
2242 | |
2243 | event = cpuc->events[bit]; |
2244 | if (WARN_ON_ONCE(!event)) |
2245 | continue; |
2246 | |
2247 | if (WARN_ON_ONCE(!event->attr.precise_ip)) |
2248 | continue; |
2249 | |
2250 | /* log dropped samples number */ |
2251 | if (error[bit]) { |
2252 | perf_log_lost_samples(event, lost: error[bit]); |
2253 | |
2254 | if (iregs && perf_event_account_interrupt(event)) |
2255 | x86_pmu_stop(event, flags: 0); |
2256 | } |
2257 | |
2258 | if (counts[bit]) { |
2259 | __intel_pmu_pebs_event(event, iregs, data, base, |
2260 | top, bit, count: counts[bit], |
2261 | setup_sample: setup_pebs_fixed_sample_data); |
2262 | } |
2263 | } |
2264 | } |
2265 | |
2266 | static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data) |
2267 | { |
2268 | short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; |
2269 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
2270 | int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events); |
2271 | int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); |
2272 | struct debug_store *ds = cpuc->ds; |
2273 | struct perf_event *event; |
2274 | void *base, *at, *top; |
2275 | int bit, size; |
2276 | u64 mask; |
2277 | |
2278 | if (!x86_pmu.pebs_active) |
2279 | return; |
2280 | |
2281 | base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; |
2282 | top = (struct pebs_basic *)(unsigned long)ds->pebs_index; |
2283 | |
2284 | ds->pebs_index = ds->pebs_buffer_base; |
2285 | |
2286 | mask = ((1ULL << max_pebs_events) - 1) | |
2287 | (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); |
2288 | size = INTEL_PMC_IDX_FIXED + num_counters_fixed; |
2289 | |
2290 | if (unlikely(base >= top)) { |
2291 | intel_pmu_pebs_event_update_no_drain(cpuc, size); |
2292 | return; |
2293 | } |
2294 | |
2295 | for (at = base; at < top; at += cpuc->pebs_record_size) { |
2296 | u64 pebs_status; |
2297 | |
2298 | pebs_status = get_pebs_status(n: at) & cpuc->pebs_enabled; |
2299 | pebs_status &= mask; |
2300 | |
2301 | for_each_set_bit(bit, (unsigned long *)&pebs_status, size) |
2302 | counts[bit]++; |
2303 | } |
2304 | |
2305 | for_each_set_bit(bit, (unsigned long *)&mask, size) { |
2306 | if (counts[bit] == 0) |
2307 | continue; |
2308 | |
2309 | event = cpuc->events[bit]; |
2310 | if (WARN_ON_ONCE(!event)) |
2311 | continue; |
2312 | |
2313 | if (WARN_ON_ONCE(!event->attr.precise_ip)) |
2314 | continue; |
2315 | |
2316 | __intel_pmu_pebs_event(event, iregs, data, base, |
2317 | top, bit, count: counts[bit], |
2318 | setup_sample: setup_pebs_adaptive_sample_data); |
2319 | } |
2320 | } |
2321 | |
2322 | /* |
2323 | * BTS, PEBS probe and setup |
2324 | */ |
2325 | |
2326 | void __init intel_ds_init(void) |
2327 | { |
2328 | /* |
2329 | * No support for 32bit formats |
2330 | */ |
2331 | if (!boot_cpu_has(X86_FEATURE_DTES64)) |
2332 | return; |
2333 | |
2334 | x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); |
2335 | x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); |
2336 | x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; |
2337 | if (x86_pmu.version <= 4) |
2338 | x86_pmu.pebs_no_isolation = 1; |
2339 | |
2340 | if (x86_pmu.pebs) { |
2341 | char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; |
2342 | char *pebs_qual = "" ; |
2343 | int format = x86_pmu.intel_cap.pebs_format; |
2344 | |
2345 | if (format < 4) |
2346 | x86_pmu.intel_cap.pebs_baseline = 0; |
2347 | |
2348 | switch (format) { |
2349 | case 0: |
2350 | pr_cont("PEBS fmt0%c, " , pebs_type); |
2351 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); |
2352 | /* |
2353 | * Using >PAGE_SIZE buffers makes the WRMSR to |
2354 | * PERF_GLOBAL_CTRL in intel_pmu_enable_all() |
2355 | * mysteriously hang on Core2. |
2356 | * |
2357 | * As a workaround, we don't do this. |
2358 | */ |
2359 | x86_pmu.pebs_buffer_size = PAGE_SIZE; |
2360 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; |
2361 | break; |
2362 | |
2363 | case 1: |
2364 | pr_cont("PEBS fmt1%c, " , pebs_type); |
2365 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); |
2366 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
2367 | break; |
2368 | |
2369 | case 2: |
2370 | pr_cont("PEBS fmt2%c, " , pebs_type); |
2371 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw); |
2372 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
2373 | break; |
2374 | |
2375 | case 3: |
2376 | pr_cont("PEBS fmt3%c, " , pebs_type); |
2377 | x86_pmu.pebs_record_size = |
2378 | sizeof(struct pebs_record_skl); |
2379 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
2380 | x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; |
2381 | break; |
2382 | |
2383 | case 5: |
2384 | x86_pmu.pebs_ept = 1; |
2385 | fallthrough; |
2386 | case 4: |
2387 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl; |
2388 | x86_pmu.pebs_record_size = sizeof(struct pebs_basic); |
2389 | if (x86_pmu.intel_cap.pebs_baseline) { |
2390 | x86_pmu.large_pebs_flags |= |
2391 | PERF_SAMPLE_BRANCH_STACK | |
2392 | PERF_SAMPLE_TIME; |
2393 | x86_pmu.flags |= PMU_FL_PEBS_ALL; |
2394 | x86_pmu.pebs_capable = ~0ULL; |
2395 | pebs_qual = "-baseline" ; |
2396 | x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; |
2397 | } else { |
2398 | /* Only basic record supported */ |
2399 | x86_pmu.large_pebs_flags &= |
2400 | ~(PERF_SAMPLE_ADDR | |
2401 | PERF_SAMPLE_TIME | |
2402 | PERF_SAMPLE_DATA_SRC | |
2403 | PERF_SAMPLE_TRANSACTION | |
2404 | PERF_SAMPLE_REGS_USER | |
2405 | PERF_SAMPLE_REGS_INTR); |
2406 | } |
2407 | pr_cont("PEBS fmt4%c%s, " , pebs_type, pebs_qual); |
2408 | |
2409 | if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) { |
2410 | pr_cont("PEBS-via-PT, " ); |
2411 | x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; |
2412 | } |
2413 | |
2414 | break; |
2415 | |
2416 | default: |
2417 | pr_cont("no PEBS fmt%d%c, " , format, pebs_type); |
2418 | x86_pmu.pebs = 0; |
2419 | } |
2420 | } |
2421 | } |
2422 | |
2423 | void perf_restore_debug_store(void) |
2424 | { |
2425 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); |
2426 | |
2427 | if (!x86_pmu.bts && !x86_pmu.pebs) |
2428 | return; |
2429 | |
2430 | wrmsrl(MSR_IA32_DS_AREA, val: (unsigned long)ds); |
2431 | } |
2432 | |