1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * builtin-stat.c |
4 | * |
5 | * Builtin stat command: Give a precise performance counters summary |
6 | * overview about any workload, CPU or specific PID. |
7 | * |
8 | * Sample output: |
9 | |
10 | $ perf stat ./hackbench 10 |
11 | |
12 | Time: 0.118 |
13 | |
14 | Performance counter stats for './hackbench 10': |
15 | |
16 | 1708.761321 task-clock # 11.037 CPUs utilized |
17 | 41,190 context-switches # 0.024 M/sec |
18 | 6,735 CPU-migrations # 0.004 M/sec |
19 | 17,318 page-faults # 0.010 M/sec |
20 | 5,205,202,243 cycles # 3.046 GHz |
21 | 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle |
22 | 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle |
23 | 2,603,501,247 instructions # 0.50 insns per cycle |
24 | # 1.48 stalled cycles per insn |
25 | 484,357,498 branches # 283.455 M/sec |
26 | 6,388,934 branch-misses # 1.32% of all branches |
27 | |
28 | 0.154822978 seconds time elapsed |
29 | |
30 | * |
31 | * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> |
32 | * |
33 | * Improvements and fixes by: |
34 | * |
35 | * Arjan van de Ven <arjan@linux.intel.com> |
36 | * Yanmin Zhang <yanmin.zhang@intel.com> |
37 | * Wu Fengguang <fengguang.wu@intel.com> |
38 | * Mike Galbraith <efault@gmx.de> |
39 | * Paul Mackerras <paulus@samba.org> |
40 | * Jaswinder Singh Rajput <jaswinder@kernel.org> |
41 | */ |
42 | |
43 | #include "builtin.h" |
44 | #include "util/cgroup.h" |
45 | #include <subcmd/parse-options.h> |
46 | #include "util/parse-events.h" |
47 | #include "util/pmus.h" |
48 | #include "util/pmu.h" |
49 | #include "util/event.h" |
50 | #include "util/evlist.h" |
51 | #include "util/evsel.h" |
52 | #include "util/debug.h" |
53 | #include "util/color.h" |
54 | #include "util/stat.h" |
55 | #include "util/header.h" |
56 | #include "util/cpumap.h" |
57 | #include "util/thread_map.h" |
58 | #include "util/counts.h" |
59 | #include "util/topdown.h" |
60 | #include "util/session.h" |
61 | #include "util/tool.h" |
62 | #include "util/string2.h" |
63 | #include "util/metricgroup.h" |
64 | #include "util/synthetic-events.h" |
65 | #include "util/target.h" |
66 | #include "util/time-utils.h" |
67 | #include "util/top.h" |
68 | #include "util/affinity.h" |
69 | #include "util/pfm.h" |
70 | #include "util/bpf_counter.h" |
71 | #include "util/iostat.h" |
72 | #include "util/util.h" |
73 | #include "asm/bug.h" |
74 | |
75 | #include <linux/time64.h> |
76 | #include <linux/zalloc.h> |
77 | #include <api/fs/fs.h> |
78 | #include <errno.h> |
79 | #include <signal.h> |
80 | #include <stdlib.h> |
81 | #include <sys/prctl.h> |
82 | #include <inttypes.h> |
83 | #include <locale.h> |
84 | #include <math.h> |
85 | #include <sys/types.h> |
86 | #include <sys/stat.h> |
87 | #include <sys/wait.h> |
88 | #include <unistd.h> |
89 | #include <sys/time.h> |
90 | #include <sys/resource.h> |
91 | #include <linux/err.h> |
92 | |
93 | #include <linux/ctype.h> |
94 | #include <perf/evlist.h> |
95 | #include <internal/threadmap.h> |
96 | |
97 | #define DEFAULT_SEPARATOR " " |
98 | #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" |
99 | |
100 | static void print_counters(struct timespec *ts, int argc, const char **argv); |
101 | |
102 | static struct evlist *evsel_list; |
103 | static struct parse_events_option_args parse_events_option_args = { |
104 | .evlistp = &evsel_list, |
105 | }; |
106 | |
107 | static bool all_counters_use_bpf = true; |
108 | |
109 | static struct target target = { |
110 | .uid = UINT_MAX, |
111 | }; |
112 | |
113 | #define METRIC_ONLY_LEN 20 |
114 | |
115 | static volatile sig_atomic_t child_pid = -1; |
116 | static int detailed_run = 0; |
117 | static bool transaction_run; |
118 | static bool topdown_run = false; |
119 | static bool smi_cost = false; |
120 | static bool smi_reset = false; |
121 | static int big_num_opt = -1; |
122 | static const char *pre_cmd = NULL; |
123 | static const char *post_cmd = NULL; |
124 | static bool sync_run = false; |
125 | static bool forever = false; |
126 | static bool force_metric_only = false; |
127 | static struct timespec ref_time; |
128 | static bool append_file; |
129 | static bool interval_count; |
130 | static const char *output_name; |
131 | static int output_fd; |
132 | static char *metrics; |
133 | |
134 | struct perf_stat { |
135 | bool record; |
136 | struct perf_data data; |
137 | struct perf_session *session; |
138 | u64 bytes_written; |
139 | struct perf_tool tool; |
140 | bool maps_allocated; |
141 | struct perf_cpu_map *cpus; |
142 | struct perf_thread_map *threads; |
143 | enum aggr_mode aggr_mode; |
144 | u32 aggr_level; |
145 | }; |
146 | |
147 | static struct perf_stat perf_stat; |
148 | #define STAT_RECORD perf_stat.record |
149 | |
150 | static volatile sig_atomic_t done = 0; |
151 | |
152 | static struct perf_stat_config stat_config = { |
153 | .aggr_mode = AGGR_GLOBAL, |
154 | .aggr_level = MAX_CACHE_LVL + 1, |
155 | .scale = true, |
156 | .unit_width = 4, /* strlen("unit") */ |
157 | .run_count = 1, |
158 | .metric_only_len = METRIC_ONLY_LEN, |
159 | .walltime_nsecs_stats = &walltime_nsecs_stats, |
160 | .ru_stats = &ru_stats, |
161 | .big_num = true, |
162 | .ctl_fd = -1, |
163 | .ctl_fd_ack = -1, |
164 | .iostat_run = false, |
165 | }; |
166 | |
167 | static bool cpus_map_matched(struct evsel *a, struct evsel *b) |
168 | { |
169 | if (!a->core.cpus && !b->core.cpus) |
170 | return true; |
171 | |
172 | if (!a->core.cpus || !b->core.cpus) |
173 | return false; |
174 | |
175 | if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) |
176 | return false; |
177 | |
178 | for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { |
179 | if (perf_cpu_map__cpu(a->core.cpus, i).cpu != |
180 | perf_cpu_map__cpu(b->core.cpus, i).cpu) |
181 | return false; |
182 | } |
183 | |
184 | return true; |
185 | } |
186 | |
187 | static void evlist__check_cpu_maps(struct evlist *evlist) |
188 | { |
189 | struct evsel *evsel, *warned_leader = NULL; |
190 | |
191 | evlist__for_each_entry(evlist, evsel) { |
192 | struct evsel *leader = evsel__leader(evsel); |
193 | |
194 | /* Check that leader matches cpus with each member. */ |
195 | if (leader == evsel) |
196 | continue; |
197 | if (cpus_map_matched(a: leader, b: evsel)) |
198 | continue; |
199 | |
200 | /* If there's mismatch disable the group and warn user. */ |
201 | if (warned_leader != leader) { |
202 | char buf[200]; |
203 | |
204 | pr_warning("WARNING: grouped events cpus do not match.\n" |
205 | "Events with CPUs not matching the leader will " |
206 | "be removed from the group.\n" ); |
207 | evsel__group_desc(evsel: leader, buf, size: sizeof(buf)); |
208 | pr_warning(" %s\n" , buf); |
209 | warned_leader = leader; |
210 | } |
211 | if (verbose > 0) { |
212 | char buf[200]; |
213 | |
214 | cpu_map__snprint(map: leader->core.cpus, buf, size: sizeof(buf)); |
215 | pr_warning(" %s: %s\n" , leader->name, buf); |
216 | cpu_map__snprint(map: evsel->core.cpus, buf, size: sizeof(buf)); |
217 | pr_warning(" %s: %s\n" , evsel->name, buf); |
218 | } |
219 | |
220 | evsel__remove_from_group(evsel, leader); |
221 | } |
222 | } |
223 | |
224 | static inline void diff_timespec(struct timespec *r, struct timespec *a, |
225 | struct timespec *b) |
226 | { |
227 | r->tv_sec = a->tv_sec - b->tv_sec; |
228 | if (a->tv_nsec < b->tv_nsec) { |
229 | r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; |
230 | r->tv_sec--; |
231 | } else { |
232 | r->tv_nsec = a->tv_nsec - b->tv_nsec ; |
233 | } |
234 | } |
235 | |
236 | static void perf_stat__reset_stats(void) |
237 | { |
238 | evlist__reset_stats(evlist: evsel_list); |
239 | perf_stat__reset_shadow_stats(); |
240 | } |
241 | |
242 | static int process_synthesized_event(struct perf_tool *tool __maybe_unused, |
243 | union perf_event *event, |
244 | struct perf_sample *sample __maybe_unused, |
245 | struct machine *machine __maybe_unused) |
246 | { |
247 | if (perf_data__write(data: &perf_stat.data, buf: event, size: event->header.size) < 0) { |
248 | pr_err("failed to write perf data, error: %m\n" ); |
249 | return -1; |
250 | } |
251 | |
252 | perf_stat.bytes_written += event->header.size; |
253 | return 0; |
254 | } |
255 | |
256 | static int write_stat_round_event(u64 tm, u64 type) |
257 | { |
258 | return perf_event__synthesize_stat_round(NULL, time: tm, type, |
259 | process: process_synthesized_event, |
260 | NULL); |
261 | } |
262 | |
263 | #define WRITE_STAT_ROUND_EVENT(time, interval) \ |
264 | write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) |
265 | |
266 | #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) |
267 | |
268 | static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, |
269 | struct perf_counts_values *count) |
270 | { |
271 | struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); |
272 | struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); |
273 | |
274 | return perf_event__synthesize_stat(NULL, cpu: cpu, thread, id: sid->id, count, |
275 | process: process_synthesized_event, NULL); |
276 | } |
277 | |
278 | static int read_single_counter(struct evsel *counter, int cpu_map_idx, |
279 | int thread, struct timespec *rs) |
280 | { |
281 | switch(counter->tool_event) { |
282 | case PERF_TOOL_DURATION_TIME: { |
283 | u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; |
284 | struct perf_counts_values *count = |
285 | perf_counts(counts: counter->counts, cpu_map_idx, thread); |
286 | count->ena = count->run = val; |
287 | count->val = val; |
288 | return 0; |
289 | } |
290 | case PERF_TOOL_USER_TIME: |
291 | case PERF_TOOL_SYSTEM_TIME: { |
292 | u64 val; |
293 | struct perf_counts_values *count = |
294 | perf_counts(counts: counter->counts, cpu_map_idx, thread); |
295 | if (counter->tool_event == PERF_TOOL_USER_TIME) |
296 | val = ru_stats.ru_utime_usec_stat.mean; |
297 | else |
298 | val = ru_stats.ru_stime_usec_stat.mean; |
299 | count->ena = count->run = val; |
300 | count->val = val; |
301 | return 0; |
302 | } |
303 | default: |
304 | case PERF_TOOL_NONE: |
305 | return evsel__read_counter(evsel: counter, cpu_map_idx, thread); |
306 | case PERF_TOOL_MAX: |
307 | /* This should never be reached */ |
308 | return 0; |
309 | } |
310 | } |
311 | |
312 | /* |
313 | * Read out the results of a single counter: |
314 | * do not aggregate counts across CPUs in system-wide mode |
315 | */ |
316 | static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) |
317 | { |
318 | int nthreads = perf_thread_map__nr(evsel_list->core.threads); |
319 | int thread; |
320 | |
321 | if (!counter->supported) |
322 | return -ENOENT; |
323 | |
324 | for (thread = 0; thread < nthreads; thread++) { |
325 | struct perf_counts_values *count; |
326 | |
327 | count = perf_counts(counts: counter->counts, cpu_map_idx, thread); |
328 | |
329 | /* |
330 | * The leader's group read loads data into its group members |
331 | * (via evsel__read_counter()) and sets their count->loaded. |
332 | */ |
333 | if (!perf_counts__is_loaded(counts: counter->counts, cpu_map_idx, thread) && |
334 | read_single_counter(counter, cpu_map_idx, thread, rs)) { |
335 | counter->counts->scaled = -1; |
336 | perf_counts(counts: counter->counts, cpu_map_idx, thread)->ena = 0; |
337 | perf_counts(counts: counter->counts, cpu_map_idx, thread)->run = 0; |
338 | return -1; |
339 | } |
340 | |
341 | perf_counts__set_loaded(counts: counter->counts, cpu_map_idx, thread, loaded: false); |
342 | |
343 | if (STAT_RECORD) { |
344 | if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { |
345 | pr_err("failed to write stat event\n" ); |
346 | return -1; |
347 | } |
348 | } |
349 | |
350 | if (verbose > 1) { |
351 | fprintf(stat_config.output, |
352 | "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n" , |
353 | evsel__name(counter), |
354 | perf_cpu_map__cpu(evsel__cpus(counter), |
355 | cpu_map_idx).cpu, |
356 | count->val, count->ena, count->run); |
357 | } |
358 | } |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | static int read_affinity_counters(struct timespec *rs) |
364 | { |
365 | struct evlist_cpu_iterator evlist_cpu_itr; |
366 | struct affinity saved_affinity, *affinity; |
367 | |
368 | if (all_counters_use_bpf) |
369 | return 0; |
370 | |
371 | if (!target__has_cpu(target: &target) || target__has_per_thread(target: &target)) |
372 | affinity = NULL; |
373 | else if (affinity__setup(a: &saved_affinity) < 0) |
374 | return -1; |
375 | else |
376 | affinity = &saved_affinity; |
377 | |
378 | evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { |
379 | struct evsel *counter = evlist_cpu_itr.evsel; |
380 | |
381 | if (evsel__is_bpf(evsel: counter)) |
382 | continue; |
383 | |
384 | if (!counter->err) { |
385 | counter->err = read_counter_cpu(counter, rs, |
386 | cpu_map_idx: evlist_cpu_itr.cpu_map_idx); |
387 | } |
388 | } |
389 | if (affinity) |
390 | affinity__cleanup(a: &saved_affinity); |
391 | |
392 | return 0; |
393 | } |
394 | |
395 | static int read_bpf_map_counters(void) |
396 | { |
397 | struct evsel *counter; |
398 | int err; |
399 | |
400 | evlist__for_each_entry(evsel_list, counter) { |
401 | if (!evsel__is_bpf(evsel: counter)) |
402 | continue; |
403 | |
404 | err = bpf_counter__read(evsel: counter); |
405 | if (err) |
406 | return err; |
407 | } |
408 | return 0; |
409 | } |
410 | |
411 | static int read_counters(struct timespec *rs) |
412 | { |
413 | if (!stat_config.stop_read_counter) { |
414 | if (read_bpf_map_counters() || |
415 | read_affinity_counters(rs)) |
416 | return -1; |
417 | } |
418 | return 0; |
419 | } |
420 | |
421 | static void process_counters(void) |
422 | { |
423 | struct evsel *counter; |
424 | |
425 | evlist__for_each_entry(evsel_list, counter) { |
426 | if (counter->err) |
427 | pr_debug("failed to read counter %s\n" , counter->name); |
428 | if (counter->err == 0 && perf_stat_process_counter(config: &stat_config, counter)) |
429 | pr_warning("failed to process counter %s\n" , counter->name); |
430 | counter->err = 0; |
431 | } |
432 | |
433 | perf_stat_merge_counters(config: &stat_config, evlist: evsel_list); |
434 | perf_stat_process_percore(config: &stat_config, evlist: evsel_list); |
435 | } |
436 | |
437 | static void process_interval(void) |
438 | { |
439 | struct timespec ts, rs; |
440 | |
441 | clock_gettime(CLOCK_MONOTONIC, &ts); |
442 | diff_timespec(r: &rs, a: &ts, b: &ref_time); |
443 | |
444 | evlist__reset_aggr_stats(evlist: evsel_list); |
445 | |
446 | if (read_counters(rs: &rs) == 0) |
447 | process_counters(); |
448 | |
449 | if (STAT_RECORD) { |
450 | if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) |
451 | pr_err("failed to write stat round event\n" ); |
452 | } |
453 | |
454 | init_stats(stats: &walltime_nsecs_stats); |
455 | update_stats(stats: &walltime_nsecs_stats, val: stat_config.interval * 1000000ULL); |
456 | print_counters(ts: &rs, argc: 0, NULL); |
457 | } |
458 | |
459 | static bool handle_interval(unsigned int interval, int *times) |
460 | { |
461 | if (interval) { |
462 | process_interval(); |
463 | if (interval_count && !(--(*times))) |
464 | return true; |
465 | } |
466 | return false; |
467 | } |
468 | |
469 | static int enable_counters(void) |
470 | { |
471 | struct evsel *evsel; |
472 | int err; |
473 | |
474 | evlist__for_each_entry(evsel_list, evsel) { |
475 | if (!evsel__is_bpf(evsel)) |
476 | continue; |
477 | |
478 | err = bpf_counter__enable(evsel); |
479 | if (err) |
480 | return err; |
481 | } |
482 | |
483 | if (!target__enable_on_exec(target: &target)) { |
484 | if (!all_counters_use_bpf) |
485 | evlist__enable(evlist: evsel_list); |
486 | } |
487 | return 0; |
488 | } |
489 | |
490 | static void disable_counters(void) |
491 | { |
492 | struct evsel *counter; |
493 | |
494 | /* |
495 | * If we don't have tracee (attaching to task or cpu), counters may |
496 | * still be running. To get accurate group ratios, we must stop groups |
497 | * from counting before reading their constituent counters. |
498 | */ |
499 | if (!target__none(target: &target)) { |
500 | evlist__for_each_entry(evsel_list, counter) |
501 | bpf_counter__disable(evsel: counter); |
502 | if (!all_counters_use_bpf) |
503 | evlist__disable(evlist: evsel_list); |
504 | } |
505 | } |
506 | |
507 | static volatile sig_atomic_t workload_exec_errno; |
508 | |
509 | /* |
510 | * evlist__prepare_workload will send a SIGUSR1 |
511 | * if the fork fails, since we asked by setting its |
512 | * want_signal to true. |
513 | */ |
514 | static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, |
515 | void *ucontext __maybe_unused) |
516 | { |
517 | workload_exec_errno = info->si_value.sival_int; |
518 | } |
519 | |
520 | static bool evsel__should_store_id(struct evsel *counter) |
521 | { |
522 | return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; |
523 | } |
524 | |
525 | static bool is_target_alive(struct target *_target, |
526 | struct perf_thread_map *threads) |
527 | { |
528 | struct stat st; |
529 | int i; |
530 | |
531 | if (!target__has_task(target: _target)) |
532 | return true; |
533 | |
534 | for (i = 0; i < threads->nr; i++) { |
535 | char path[PATH_MAX]; |
536 | |
537 | scnprintf(buf: path, PATH_MAX, fmt: "%s/%d" , procfs__mountpoint(), |
538 | threads->map[i].pid); |
539 | |
540 | if (!stat(path, &st)) |
541 | return true; |
542 | } |
543 | |
544 | return false; |
545 | } |
546 | |
547 | static void process_evlist(struct evlist *evlist, unsigned int interval) |
548 | { |
549 | enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; |
550 | |
551 | if (evlist__ctlfd_process(evlist, cmd: &cmd) > 0) { |
552 | switch (cmd) { |
553 | case EVLIST_CTL_CMD_ENABLE: |
554 | fallthrough; |
555 | case EVLIST_CTL_CMD_DISABLE: |
556 | if (interval) |
557 | process_interval(); |
558 | break; |
559 | case EVLIST_CTL_CMD_SNAPSHOT: |
560 | case EVLIST_CTL_CMD_ACK: |
561 | case EVLIST_CTL_CMD_UNSUPPORTED: |
562 | case EVLIST_CTL_CMD_EVLIST: |
563 | case EVLIST_CTL_CMD_STOP: |
564 | case EVLIST_CTL_CMD_PING: |
565 | default: |
566 | break; |
567 | } |
568 | } |
569 | } |
570 | |
571 | static void compute_tts(struct timespec *time_start, struct timespec *time_stop, |
572 | int *time_to_sleep) |
573 | { |
574 | int tts = *time_to_sleep; |
575 | struct timespec time_diff; |
576 | |
577 | diff_timespec(r: &time_diff, a: time_stop, b: time_start); |
578 | |
579 | tts -= time_diff.tv_sec * MSEC_PER_SEC + |
580 | time_diff.tv_nsec / NSEC_PER_MSEC; |
581 | |
582 | if (tts < 0) |
583 | tts = 0; |
584 | |
585 | *time_to_sleep = tts; |
586 | } |
587 | |
588 | static int dispatch_events(bool forks, int timeout, int interval, int *times) |
589 | { |
590 | int child_exited = 0, status = 0; |
591 | int time_to_sleep, sleep_time; |
592 | struct timespec time_start, time_stop; |
593 | |
594 | if (interval) |
595 | sleep_time = interval; |
596 | else if (timeout) |
597 | sleep_time = timeout; |
598 | else |
599 | sleep_time = 1000; |
600 | |
601 | time_to_sleep = sleep_time; |
602 | |
603 | while (!done) { |
604 | if (forks) |
605 | child_exited = waitpid(child_pid, &status, WNOHANG); |
606 | else |
607 | child_exited = !is_target_alive(target: &target, threads: evsel_list->core.threads) ? 1 : 0; |
608 | |
609 | if (child_exited) |
610 | break; |
611 | |
612 | clock_gettime(CLOCK_MONOTONIC, &time_start); |
613 | if (!(evlist__poll(evlist: evsel_list, timeout: time_to_sleep) > 0)) { /* poll timeout or EINTR */ |
614 | if (timeout || handle_interval(interval, times)) |
615 | break; |
616 | time_to_sleep = sleep_time; |
617 | } else { /* fd revent */ |
618 | process_evlist(evlist: evsel_list, interval); |
619 | clock_gettime(CLOCK_MONOTONIC, &time_stop); |
620 | compute_tts(time_start: &time_start, time_stop: &time_stop, time_to_sleep: &time_to_sleep); |
621 | } |
622 | } |
623 | |
624 | return status; |
625 | } |
626 | |
627 | enum counter_recovery { |
628 | COUNTER_SKIP, |
629 | COUNTER_RETRY, |
630 | COUNTER_FATAL, |
631 | }; |
632 | |
633 | static enum counter_recovery stat_handle_error(struct evsel *counter) |
634 | { |
635 | char msg[BUFSIZ]; |
636 | /* |
637 | * PPC returns ENXIO for HW counters until 2.6.37 |
638 | * (behavior changed with commit b0a873e). |
639 | */ |
640 | if (errno == EINVAL || errno == ENOSYS || |
641 | errno == ENOENT || errno == EOPNOTSUPP || |
642 | errno == ENXIO) { |
643 | if (verbose > 0) |
644 | ui__warning(format: "%s event is not supported by the kernel.\n" , |
645 | evsel__name(evsel: counter)); |
646 | counter->supported = false; |
647 | /* |
648 | * errored is a sticky flag that means one of the counter's |
649 | * cpu event had a problem and needs to be reexamined. |
650 | */ |
651 | counter->errored = true; |
652 | |
653 | if ((evsel__leader(evsel: counter) != counter) || |
654 | !(counter->core.leader->nr_members > 1)) |
655 | return COUNTER_SKIP; |
656 | } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) { |
657 | if (verbose > 0) |
658 | ui__warning(format: "%s\n" , msg); |
659 | return COUNTER_RETRY; |
660 | } else if (target__has_per_thread(target: &target) && |
661 | evsel_list->core.threads && |
662 | evsel_list->core.threads->err_thread != -1) { |
663 | /* |
664 | * For global --per-thread case, skip current |
665 | * error thread. |
666 | */ |
667 | if (!thread_map__remove(threads: evsel_list->core.threads, |
668 | idx: evsel_list->core.threads->err_thread)) { |
669 | evsel_list->core.threads->err_thread = -1; |
670 | return COUNTER_RETRY; |
671 | } |
672 | } else if (counter->skippable) { |
673 | if (verbose > 0) |
674 | ui__warning(format: "skipping event %s that kernel failed to open .\n" , |
675 | evsel__name(evsel: counter)); |
676 | counter->supported = false; |
677 | counter->errored = true; |
678 | return COUNTER_SKIP; |
679 | } |
680 | |
681 | evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); |
682 | ui__error(format: "%s\n" , msg); |
683 | |
684 | if (child_pid != -1) |
685 | kill(child_pid, SIGTERM); |
686 | return COUNTER_FATAL; |
687 | } |
688 | |
689 | static int __run_perf_stat(int argc, const char **argv, int run_idx) |
690 | { |
691 | int interval = stat_config.interval; |
692 | int times = stat_config.times; |
693 | int timeout = stat_config.timeout; |
694 | char msg[BUFSIZ]; |
695 | unsigned long long t0, t1; |
696 | struct evsel *counter; |
697 | size_t l; |
698 | int status = 0; |
699 | const bool forks = (argc > 0); |
700 | bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; |
701 | struct evlist_cpu_iterator evlist_cpu_itr; |
702 | struct affinity saved_affinity, *affinity = NULL; |
703 | int err; |
704 | bool second_pass = false; |
705 | |
706 | if (forks) { |
707 | if (evlist__prepare_workload(evlist: evsel_list, target: &target, argv, pipe_output: is_pipe, exec_error: workload_exec_failed_signal) < 0) { |
708 | perror("failed to prepare workload" ); |
709 | return -1; |
710 | } |
711 | child_pid = evsel_list->workload.pid; |
712 | } |
713 | |
714 | if (!cpu_map__is_dummy(cpus: evsel_list->core.user_requested_cpus)) { |
715 | if (affinity__setup(a: &saved_affinity) < 0) |
716 | return -1; |
717 | affinity = &saved_affinity; |
718 | } |
719 | |
720 | evlist__for_each_entry(evsel_list, counter) { |
721 | counter->reset_group = false; |
722 | if (bpf_counter__load(evsel: counter, target: &target)) |
723 | return -1; |
724 | if (!(evsel__is_bperf(evsel: counter))) |
725 | all_counters_use_bpf = false; |
726 | } |
727 | |
728 | evlist__reset_aggr_stats(evlist: evsel_list); |
729 | |
730 | evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { |
731 | counter = evlist_cpu_itr.evsel; |
732 | |
733 | /* |
734 | * bperf calls evsel__open_per_cpu() in bperf__load(), so |
735 | * no need to call it again here. |
736 | */ |
737 | if (target.use_bpf) |
738 | break; |
739 | |
740 | if (counter->reset_group || counter->errored) |
741 | continue; |
742 | if (evsel__is_bperf(evsel: counter)) |
743 | continue; |
744 | try_again: |
745 | if (create_perf_stat_counter(evsel: counter, config: &stat_config, target: &target, |
746 | cpu_map_idx: evlist_cpu_itr.cpu_map_idx) < 0) { |
747 | |
748 | /* |
749 | * Weak group failed. We cannot just undo this here |
750 | * because earlier CPUs might be in group mode, and the kernel |
751 | * doesn't support mixing group and non group reads. Defer |
752 | * it to later. |
753 | * Don't close here because we're in the wrong affinity. |
754 | */ |
755 | if ((errno == EINVAL || errno == EBADF) && |
756 | evsel__leader(counter) != counter && |
757 | counter->weak_group) { |
758 | evlist__reset_weak_group(evlist: evsel_list, evsel: counter, close: false); |
759 | assert(counter->reset_group); |
760 | second_pass = true; |
761 | continue; |
762 | } |
763 | |
764 | switch (stat_handle_error(counter)) { |
765 | case COUNTER_FATAL: |
766 | return -1; |
767 | case COUNTER_RETRY: |
768 | goto try_again; |
769 | case COUNTER_SKIP: |
770 | continue; |
771 | default: |
772 | break; |
773 | } |
774 | |
775 | } |
776 | counter->supported = true; |
777 | } |
778 | |
779 | if (second_pass) { |
780 | /* |
781 | * Now redo all the weak group after closing them, |
782 | * and also close errored counters. |
783 | */ |
784 | |
785 | /* First close errored or weak retry */ |
786 | evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { |
787 | counter = evlist_cpu_itr.evsel; |
788 | |
789 | if (!counter->reset_group && !counter->errored) |
790 | continue; |
791 | |
792 | perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); |
793 | } |
794 | /* Now reopen weak */ |
795 | evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { |
796 | counter = evlist_cpu_itr.evsel; |
797 | |
798 | if (!counter->reset_group) |
799 | continue; |
800 | try_again_reset: |
801 | pr_debug2("reopening weak %s\n" , evsel__name(counter)); |
802 | if (create_perf_stat_counter(evsel: counter, config: &stat_config, target: &target, |
803 | cpu_map_idx: evlist_cpu_itr.cpu_map_idx) < 0) { |
804 | |
805 | switch (stat_handle_error(counter)) { |
806 | case COUNTER_FATAL: |
807 | return -1; |
808 | case COUNTER_RETRY: |
809 | goto try_again_reset; |
810 | case COUNTER_SKIP: |
811 | continue; |
812 | default: |
813 | break; |
814 | } |
815 | } |
816 | counter->supported = true; |
817 | } |
818 | } |
819 | affinity__cleanup(a: affinity); |
820 | |
821 | evlist__for_each_entry(evsel_list, counter) { |
822 | if (!counter->supported) { |
823 | perf_evsel__free_fd(&counter->core); |
824 | continue; |
825 | } |
826 | |
827 | l = strlen(counter->unit); |
828 | if (l > stat_config.unit_width) |
829 | stat_config.unit_width = l; |
830 | |
831 | if (evsel__should_store_id(counter) && |
832 | evsel__store_ids(evsel: counter, evlist: evsel_list)) |
833 | return -1; |
834 | } |
835 | |
836 | if (evlist__apply_filters(evlist: evsel_list, err_evsel: &counter)) { |
837 | pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n" , |
838 | counter->filter, evsel__name(counter), errno, |
839 | str_error_r(errno, msg, sizeof(msg))); |
840 | return -1; |
841 | } |
842 | |
843 | if (STAT_RECORD) { |
844 | int fd = perf_data__fd(data: &perf_stat.data); |
845 | |
846 | if (is_pipe) { |
847 | err = perf_header__write_pipe(fd: perf_data__fd(data: &perf_stat.data)); |
848 | } else { |
849 | err = perf_session__write_header(session: perf_stat.session, evlist: evsel_list, |
850 | fd, at_exit: false); |
851 | } |
852 | |
853 | if (err < 0) |
854 | return err; |
855 | |
856 | err = perf_event__synthesize_stat_events(config: &stat_config, NULL, evlist: evsel_list, |
857 | process: process_synthesized_event, attrs: is_pipe); |
858 | if (err < 0) |
859 | return err; |
860 | } |
861 | |
862 | if (target.initial_delay) { |
863 | pr_info(EVLIST_DISABLED_MSG); |
864 | } else { |
865 | err = enable_counters(); |
866 | if (err) |
867 | return -1; |
868 | } |
869 | |
870 | /* Exec the command, if any */ |
871 | if (forks) |
872 | evlist__start_workload(evlist: evsel_list); |
873 | |
874 | if (target.initial_delay > 0) { |
875 | usleep(target.initial_delay * USEC_PER_MSEC); |
876 | err = enable_counters(); |
877 | if (err) |
878 | return -1; |
879 | |
880 | pr_info(EVLIST_ENABLED_MSG); |
881 | } |
882 | |
883 | t0 = rdclock(); |
884 | clock_gettime(CLOCK_MONOTONIC, &ref_time); |
885 | |
886 | if (forks) { |
887 | if (interval || timeout || evlist__ctlfd_initialized(evlist: evsel_list)) |
888 | status = dispatch_events(forks, timeout, interval, times: ×); |
889 | if (child_pid != -1) { |
890 | if (timeout) |
891 | kill(child_pid, SIGTERM); |
892 | wait4(child_pid, &status, 0, &stat_config.ru_data); |
893 | } |
894 | |
895 | if (workload_exec_errno) { |
896 | const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); |
897 | pr_err("Workload failed: %s\n" , emsg); |
898 | return -1; |
899 | } |
900 | |
901 | if (WIFSIGNALED(status)) |
902 | psignal(WTERMSIG(status), argv[0]); |
903 | } else { |
904 | status = dispatch_events(forks, timeout, interval, times: ×); |
905 | } |
906 | |
907 | disable_counters(); |
908 | |
909 | t1 = rdclock(); |
910 | |
911 | if (stat_config.walltime_run_table) |
912 | stat_config.walltime_run[run_idx] = t1 - t0; |
913 | |
914 | if (interval && stat_config.summary) { |
915 | stat_config.interval = 0; |
916 | stat_config.stop_read_counter = true; |
917 | init_stats(stats: &walltime_nsecs_stats); |
918 | update_stats(stats: &walltime_nsecs_stats, val: t1 - t0); |
919 | |
920 | evlist__copy_prev_raw_counts(evlist: evsel_list); |
921 | evlist__reset_prev_raw_counts(evlist: evsel_list); |
922 | evlist__reset_aggr_stats(evlist: evsel_list); |
923 | } else { |
924 | update_stats(stats: &walltime_nsecs_stats, val: t1 - t0); |
925 | update_rusage_stats(ru_stats: &ru_stats, rusage: &stat_config.ru_data); |
926 | } |
927 | |
928 | /* |
929 | * Closing a group leader splits the group, and as we only disable |
930 | * group leaders, results in remaining events becoming enabled. To |
931 | * avoid arbitrary skew, we must read all counters before closing any |
932 | * group leaders. |
933 | */ |
934 | if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0) |
935 | process_counters(); |
936 | |
937 | /* |
938 | * We need to keep evsel_list alive, because it's processed |
939 | * later the evsel_list will be closed after. |
940 | */ |
941 | if (!STAT_RECORD) |
942 | evlist__close(evlist: evsel_list); |
943 | |
944 | return WEXITSTATUS(status); |
945 | } |
946 | |
947 | static int run_perf_stat(int argc, const char **argv, int run_idx) |
948 | { |
949 | int ret; |
950 | |
951 | if (pre_cmd) { |
952 | ret = system(pre_cmd); |
953 | if (ret) |
954 | return ret; |
955 | } |
956 | |
957 | if (sync_run) |
958 | sync(); |
959 | |
960 | ret = __run_perf_stat(argc, argv, run_idx); |
961 | if (ret) |
962 | return ret; |
963 | |
964 | if (post_cmd) { |
965 | ret = system(post_cmd); |
966 | if (ret) |
967 | return ret; |
968 | } |
969 | |
970 | return ret; |
971 | } |
972 | |
973 | static void print_counters(struct timespec *ts, int argc, const char **argv) |
974 | { |
975 | /* Do not print anything if we record to the pipe. */ |
976 | if (STAT_RECORD && perf_stat.data.is_pipe) |
977 | return; |
978 | if (quiet) |
979 | return; |
980 | |
981 | evlist__print_counters(evlist: evsel_list, config: &stat_config, target: &target, ts, argc, argv); |
982 | } |
983 | |
984 | static volatile sig_atomic_t signr = -1; |
985 | |
986 | static void skip_signal(int signo) |
987 | { |
988 | if ((child_pid == -1) || stat_config.interval) |
989 | done = 1; |
990 | |
991 | signr = signo; |
992 | /* |
993 | * render child_pid harmless |
994 | * won't send SIGTERM to a random |
995 | * process in case of race condition |
996 | * and fast PID recycling |
997 | */ |
998 | child_pid = -1; |
999 | } |
1000 | |
1001 | static void sig_atexit(void) |
1002 | { |
1003 | sigset_t set, oset; |
1004 | |
1005 | /* |
1006 | * avoid race condition with SIGCHLD handler |
1007 | * in skip_signal() which is modifying child_pid |
1008 | * goal is to avoid send SIGTERM to a random |
1009 | * process |
1010 | */ |
1011 | sigemptyset(set: &set); |
1012 | sigaddset(set: &set, SIGCHLD); |
1013 | sigprocmask(SIG_BLOCK, &set, &oset); |
1014 | |
1015 | if (child_pid != -1) |
1016 | kill(child_pid, SIGTERM); |
1017 | |
1018 | sigprocmask(SIG_SETMASK, &oset, NULL); |
1019 | |
1020 | if (signr == -1) |
1021 | return; |
1022 | |
1023 | signal(signr, SIG_DFL); |
1024 | kill(getpid(), signr); |
1025 | } |
1026 | |
1027 | void perf_stat__set_big_num(int set) |
1028 | { |
1029 | stat_config.big_num = (set != 0); |
1030 | } |
1031 | |
1032 | void perf_stat__set_no_csv_summary(int set) |
1033 | { |
1034 | stat_config.no_csv_summary = (set != 0); |
1035 | } |
1036 | |
1037 | static int stat__set_big_num(const struct option *opt __maybe_unused, |
1038 | const char *s __maybe_unused, int unset) |
1039 | { |
1040 | big_num_opt = unset ? 0 : 1; |
1041 | perf_stat__set_big_num(set: !unset); |
1042 | return 0; |
1043 | } |
1044 | |
1045 | static int enable_metric_only(const struct option *opt __maybe_unused, |
1046 | const char *s __maybe_unused, int unset) |
1047 | { |
1048 | force_metric_only = true; |
1049 | stat_config.metric_only = !unset; |
1050 | return 0; |
1051 | } |
1052 | |
1053 | static int append_metric_groups(const struct option *opt __maybe_unused, |
1054 | const char *str, |
1055 | int unset __maybe_unused) |
1056 | { |
1057 | if (metrics) { |
1058 | char *tmp; |
1059 | |
1060 | if (asprintf(&tmp, "%s,%s" , metrics, str) < 0) |
1061 | return -ENOMEM; |
1062 | free(metrics); |
1063 | metrics = tmp; |
1064 | } else { |
1065 | metrics = strdup(str); |
1066 | if (!metrics) |
1067 | return -ENOMEM; |
1068 | } |
1069 | return 0; |
1070 | } |
1071 | |
1072 | static int parse_control_option(const struct option *opt, |
1073 | const char *str, |
1074 | int unset __maybe_unused) |
1075 | { |
1076 | struct perf_stat_config *config = opt->value; |
1077 | |
1078 | return evlist__parse_control(str, ctl_fd: &config->ctl_fd, ctl_fd_ack: &config->ctl_fd_ack, ctl_fd_close: &config->ctl_fd_close); |
1079 | } |
1080 | |
1081 | static int parse_stat_cgroups(const struct option *opt, |
1082 | const char *str, int unset) |
1083 | { |
1084 | if (stat_config.cgroup_list) { |
1085 | pr_err("--cgroup and --for-each-cgroup cannot be used together\n" ); |
1086 | return -1; |
1087 | } |
1088 | |
1089 | return parse_cgroups(opt, str, unset); |
1090 | } |
1091 | |
1092 | static int parse_cputype(const struct option *opt, |
1093 | const char *str, |
1094 | int unset __maybe_unused) |
1095 | { |
1096 | const struct perf_pmu *pmu; |
1097 | struct evlist *evlist = *(struct evlist **)opt->value; |
1098 | |
1099 | if (!list_empty(head: &evlist->core.entries)) { |
1100 | fprintf(stderr, "Must define cputype before events/metrics\n" ); |
1101 | return -1; |
1102 | } |
1103 | |
1104 | pmu = perf_pmus__pmu_for_pmu_filter(str); |
1105 | if (!pmu) { |
1106 | fprintf(stderr, "--cputype %s is not supported!\n" , str); |
1107 | return -1; |
1108 | } |
1109 | parse_events_option_args.pmu_filter = pmu->name; |
1110 | |
1111 | return 0; |
1112 | } |
1113 | |
1114 | static int parse_cache_level(const struct option *opt, |
1115 | const char *str, |
1116 | int unset __maybe_unused) |
1117 | { |
1118 | int level; |
1119 | u32 *aggr_mode = (u32 *)opt->value; |
1120 | u32 *aggr_level = (u32 *)opt->data; |
1121 | |
1122 | /* |
1123 | * If no string is specified, aggregate based on the topology of |
1124 | * Last Level Cache (LLC). Since the LLC level can change from |
1125 | * architecture to architecture, set level greater than |
1126 | * MAX_CACHE_LVL which will be interpreted as LLC. |
1127 | */ |
1128 | if (str == NULL) { |
1129 | level = MAX_CACHE_LVL + 1; |
1130 | goto out; |
1131 | } |
1132 | |
1133 | /* |
1134 | * The format to specify cache level is LX or lX where X is the |
1135 | * cache level. |
1136 | */ |
1137 | if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) { |
1138 | pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n" , |
1139 | MAX_CACHE_LVL, |
1140 | MAX_CACHE_LVL); |
1141 | return -EINVAL; |
1142 | } |
1143 | |
1144 | level = atoi(&str[1]); |
1145 | if (level < 1) { |
1146 | pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n" , |
1147 | MAX_CACHE_LVL, |
1148 | MAX_CACHE_LVL); |
1149 | return -EINVAL; |
1150 | } |
1151 | |
1152 | if (level > MAX_CACHE_LVL) { |
1153 | pr_err("perf only supports max cache level of %d.\n" |
1154 | "Consider increasing MAX_CACHE_LVL\n" , MAX_CACHE_LVL); |
1155 | return -EINVAL; |
1156 | } |
1157 | out: |
1158 | *aggr_mode = AGGR_CACHE; |
1159 | *aggr_level = level; |
1160 | return 0; |
1161 | } |
1162 | |
1163 | static struct option stat_options[] = { |
1164 | OPT_BOOLEAN('T', "transaction" , &transaction_run, |
1165 | "hardware transaction statistics" ), |
1166 | OPT_CALLBACK('e', "event" , &parse_events_option_args, "event" , |
1167 | "event selector. use 'perf list' to list available events" , |
1168 | parse_events_option), |
1169 | OPT_CALLBACK(0, "filter" , &evsel_list, "filter" , |
1170 | "event filter" , parse_filter), |
1171 | OPT_BOOLEAN('i', "no-inherit" , &stat_config.no_inherit, |
1172 | "child tasks do not inherit counters" ), |
1173 | OPT_STRING('p', "pid" , &target.pid, "pid" , |
1174 | "stat events on existing process id" ), |
1175 | OPT_STRING('t', "tid" , &target.tid, "tid" , |
1176 | "stat events on existing thread id" ), |
1177 | #ifdef HAVE_BPF_SKEL |
1178 | OPT_STRING('b', "bpf-prog" , &target.bpf_str, "bpf-prog-id" , |
1179 | "stat events on existing bpf program id" ), |
1180 | OPT_BOOLEAN(0, "bpf-counters" , &target.use_bpf, |
1181 | "use bpf program to count events" ), |
1182 | OPT_STRING(0, "bpf-attr-map" , &target.attr_map, "attr-map-path" , |
1183 | "path to perf_event_attr map" ), |
1184 | #endif |
1185 | OPT_BOOLEAN('a', "all-cpus" , &target.system_wide, |
1186 | "system-wide collection from all CPUs" ), |
1187 | OPT_BOOLEAN(0, "scale" , &stat_config.scale, |
1188 | "Use --no-scale to disable counter scaling for multiplexing" ), |
1189 | OPT_INCR('v', "verbose" , &verbose, |
1190 | "be more verbose (show counter open errors, etc)" ), |
1191 | OPT_INTEGER('r', "repeat" , &stat_config.run_count, |
1192 | "repeat command and print average + stddev (max: 100, forever: 0)" ), |
1193 | OPT_BOOLEAN(0, "table" , &stat_config.walltime_run_table, |
1194 | "display details about each run (only with -r option)" ), |
1195 | OPT_BOOLEAN('n', "null" , &stat_config.null_run, |
1196 | "null run - dont start any counters" ), |
1197 | OPT_INCR('d', "detailed" , &detailed_run, |
1198 | "detailed run - start a lot of events" ), |
1199 | OPT_BOOLEAN('S', "sync" , &sync_run, |
1200 | "call sync() before starting a run" ), |
1201 | OPT_CALLBACK_NOOPT('B', "big-num" , NULL, NULL, |
1202 | "print large numbers with thousands\' separators" , |
1203 | stat__set_big_num), |
1204 | OPT_STRING('C', "cpu" , &target.cpu_list, "cpu" , |
1205 | "list of cpus to monitor in system-wide" ), |
1206 | OPT_SET_UINT('A', "no-aggr" , &stat_config.aggr_mode, |
1207 | "disable aggregation across CPUs or PMUs" , AGGR_NONE), |
1208 | OPT_SET_UINT(0, "no-merge" , &stat_config.aggr_mode, |
1209 | "disable aggregation the same as -A or -no-aggr" , AGGR_NONE), |
1210 | OPT_BOOLEAN(0, "hybrid-merge" , &stat_config.hybrid_merge, |
1211 | "Merge identical named hybrid events" ), |
1212 | OPT_STRING('x', "field-separator" , &stat_config.csv_sep, "separator" , |
1213 | "print counts with custom separator" ), |
1214 | OPT_BOOLEAN('j', "json-output" , &stat_config.json_output, |
1215 | "print counts in JSON format" ), |
1216 | OPT_CALLBACK('G', "cgroup" , &evsel_list, "name" , |
1217 | "monitor event in cgroup name only" , parse_stat_cgroups), |
1218 | OPT_STRING(0, "for-each-cgroup" , &stat_config.cgroup_list, "name" , |
1219 | "expand events for each cgroup" ), |
1220 | OPT_STRING('o', "output" , &output_name, "file" , "output file name" ), |
1221 | OPT_BOOLEAN(0, "append" , &append_file, "append to the output file" ), |
1222 | OPT_INTEGER(0, "log-fd" , &output_fd, |
1223 | "log output to fd, instead of stderr" ), |
1224 | OPT_STRING(0, "pre" , &pre_cmd, "command" , |
1225 | "command to run prior to the measured command" ), |
1226 | OPT_STRING(0, "post" , &post_cmd, "command" , |
1227 | "command to run after to the measured command" ), |
1228 | OPT_UINTEGER('I', "interval-print" , &stat_config.interval, |
1229 | "print counts at regular interval in ms " |
1230 | "(overhead is possible for values <= 100ms)" ), |
1231 | OPT_INTEGER(0, "interval-count" , &stat_config.times, |
1232 | "print counts for fixed number of times" ), |
1233 | OPT_BOOLEAN(0, "interval-clear" , &stat_config.interval_clear, |
1234 | "clear screen in between new interval" ), |
1235 | OPT_UINTEGER(0, "timeout" , &stat_config.timeout, |
1236 | "stop workload and print counts after a timeout period in ms (>= 10ms)" ), |
1237 | OPT_SET_UINT(0, "per-socket" , &stat_config.aggr_mode, |
1238 | "aggregate counts per processor socket" , AGGR_SOCKET), |
1239 | OPT_SET_UINT(0, "per-die" , &stat_config.aggr_mode, |
1240 | "aggregate counts per processor die" , AGGR_DIE), |
1241 | OPT_SET_UINT(0, "per-cluster" , &stat_config.aggr_mode, |
1242 | "aggregate counts per processor cluster" , AGGR_CLUSTER), |
1243 | OPT_CALLBACK_OPTARG(0, "per-cache" , &stat_config.aggr_mode, &stat_config.aggr_level, |
1244 | "cache level" , "aggregate count at this cache level (Default: LLC)" , |
1245 | parse_cache_level), |
1246 | OPT_SET_UINT(0, "per-core" , &stat_config.aggr_mode, |
1247 | "aggregate counts per physical processor core" , AGGR_CORE), |
1248 | OPT_SET_UINT(0, "per-thread" , &stat_config.aggr_mode, |
1249 | "aggregate counts per thread" , AGGR_THREAD), |
1250 | OPT_SET_UINT(0, "per-node" , &stat_config.aggr_mode, |
1251 | "aggregate counts per numa node" , AGGR_NODE), |
1252 | OPT_INTEGER('D', "delay" , &target.initial_delay, |
1253 | "ms to wait before starting measurement after program start (-1: start with events disabled)" ), |
1254 | OPT_CALLBACK_NOOPT(0, "metric-only" , &stat_config.metric_only, NULL, |
1255 | "Only print computed metrics. No raw values" , enable_metric_only), |
1256 | OPT_BOOLEAN(0, "metric-no-group" , &stat_config.metric_no_group, |
1257 | "don't group metric events, impacts multiplexing" ), |
1258 | OPT_BOOLEAN(0, "metric-no-merge" , &stat_config.metric_no_merge, |
1259 | "don't try to share events between metrics in a group" ), |
1260 | OPT_BOOLEAN(0, "metric-no-threshold" , &stat_config.metric_no_threshold, |
1261 | "disable adding events for the metric threshold calculation" ), |
1262 | OPT_BOOLEAN(0, "topdown" , &topdown_run, |
1263 | "measure top-down statistics" ), |
1264 | OPT_UINTEGER(0, "td-level" , &stat_config.topdown_level, |
1265 | "Set the metrics level for the top-down statistics (0: max level)" ), |
1266 | OPT_BOOLEAN(0, "smi-cost" , &smi_cost, |
1267 | "measure SMI cost" ), |
1268 | OPT_CALLBACK('M', "metrics" , &evsel_list, "metric/metric group list" , |
1269 | "monitor specified metrics or metric groups (separated by ,)" , |
1270 | append_metric_groups), |
1271 | OPT_BOOLEAN_FLAG(0, "all-kernel" , &stat_config.all_kernel, |
1272 | "Configure all used events to run in kernel space." , |
1273 | PARSE_OPT_EXCLUSIVE), |
1274 | OPT_BOOLEAN_FLAG(0, "all-user" , &stat_config.all_user, |
1275 | "Configure all used events to run in user space." , |
1276 | PARSE_OPT_EXCLUSIVE), |
1277 | OPT_BOOLEAN(0, "percore-show-thread" , &stat_config.percore_show_thread, |
1278 | "Use with 'percore' event qualifier to show the event " |
1279 | "counts of one hardware thread by sum up total hardware " |
1280 | "threads of same physical core" ), |
1281 | OPT_BOOLEAN(0, "summary" , &stat_config.summary, |
1282 | "print summary for interval mode" ), |
1283 | OPT_BOOLEAN(0, "no-csv-summary" , &stat_config.no_csv_summary, |
1284 | "don't print 'summary' for CSV summary output" ), |
1285 | OPT_BOOLEAN(0, "quiet" , &quiet, |
1286 | "don't print any output, messages or warnings (useful with record)" ), |
1287 | OPT_CALLBACK(0, "cputype" , &evsel_list, "hybrid cpu type" , |
1288 | "Only enable events on applying cpu with this type " |
1289 | "for hybrid platform (e.g. core or atom)" , |
1290 | parse_cputype), |
1291 | #ifdef HAVE_LIBPFM |
1292 | OPT_CALLBACK(0, "pfm-events" , &evsel_list, "event" , |
1293 | "libpfm4 event selector. use 'perf list' to list available events" , |
1294 | parse_libpfm_events_option), |
1295 | #endif |
1296 | OPT_CALLBACK(0, "control" , &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]" , |
1297 | "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" |
1298 | "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" |
1299 | "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd." , |
1300 | parse_control_option), |
1301 | OPT_CALLBACK_OPTARG(0, "iostat" , &evsel_list, &stat_config, "default" , |
1302 | "measure I/O performance metrics provided by arch/platform" , |
1303 | iostat_parse), |
1304 | OPT_END() |
1305 | }; |
1306 | |
1307 | /** |
1308 | * Calculate the cache instance ID from the map in |
1309 | * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list |
1310 | * Cache instance ID is the first CPU reported in the shared_cpu_list file. |
1311 | */ |
1312 | static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) |
1313 | { |
1314 | int id; |
1315 | struct perf_cpu_map *cpu_map = perf_cpu_map__new(map); |
1316 | |
1317 | /* |
1318 | * If the map contains no CPU, consider the current CPU to |
1319 | * be the first online CPU in the cache domain else use the |
1320 | * first online CPU of the cache domain as the ID. |
1321 | */ |
1322 | if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map)) |
1323 | id = cpu.cpu; |
1324 | else |
1325 | id = perf_cpu_map__cpu(cpu_map, 0).cpu; |
1326 | |
1327 | /* Free the perf_cpu_map used to find the cache ID */ |
1328 | perf_cpu_map__put(cpu_map); |
1329 | |
1330 | return id; |
1331 | } |
1332 | |
1333 | /** |
1334 | * cpu__get_cache_id - Returns 0 if successful in populating the |
1335 | * cache level and cache id. Cache level is read from |
1336 | * /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID |
1337 | * is the first CPU reported by |
1338 | * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list |
1339 | */ |
1340 | static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache) |
1341 | { |
1342 | int ret = 0; |
1343 | u32 cache_level = stat_config.aggr_level; |
1344 | struct cpu_cache_level caches[MAX_CACHE_LVL]; |
1345 | u32 i = 0, caches_cnt = 0; |
1346 | |
1347 | cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; |
1348 | cache->cache = -1; |
1349 | |
1350 | ret = build_caches_for_cpu(cpu: cpu.cpu, caches, cntp: &caches_cnt); |
1351 | if (ret) { |
1352 | /* |
1353 | * If caches_cnt is not 0, cpu_cache_level data |
1354 | * was allocated when building the topology. |
1355 | * Free the allocated data before returning. |
1356 | */ |
1357 | if (caches_cnt) |
1358 | goto free_caches; |
1359 | |
1360 | return ret; |
1361 | } |
1362 | |
1363 | if (!caches_cnt) |
1364 | return -1; |
1365 | |
1366 | /* |
1367 | * Save the data for the highest level if no |
1368 | * level was specified by the user. |
1369 | */ |
1370 | if (cache_level > MAX_CACHE_LVL) { |
1371 | int max_level_index = 0; |
1372 | |
1373 | for (i = 1; i < caches_cnt; ++i) { |
1374 | if (caches[i].level > caches[max_level_index].level) |
1375 | max_level_index = i; |
1376 | } |
1377 | |
1378 | cache->cache_lvl = caches[max_level_index].level; |
1379 | cache->cache = cpu__get_cache_id_from_map(cpu: cpu, map: caches[max_level_index].map); |
1380 | |
1381 | /* Reset i to 0 to free entire caches[] */ |
1382 | i = 0; |
1383 | goto free_caches; |
1384 | } |
1385 | |
1386 | for (i = 0; i < caches_cnt; ++i) { |
1387 | if (caches[i].level == cache_level) { |
1388 | cache->cache_lvl = cache_level; |
1389 | cache->cache = cpu__get_cache_id_from_map(cpu: cpu, map: caches[i].map); |
1390 | } |
1391 | |
1392 | cpu_cache_level__free(cache: &caches[i]); |
1393 | } |
1394 | |
1395 | free_caches: |
1396 | /* |
1397 | * Free all the allocated cpu_cache_level data. |
1398 | */ |
1399 | while (i < caches_cnt) |
1400 | cpu_cache_level__free(cache: &caches[i++]); |
1401 | |
1402 | return ret; |
1403 | } |
1404 | |
1405 | /** |
1406 | * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache |
1407 | * level, die and socket populated with the cache instache ID, cache level, |
1408 | * die and socket for cpu. The function signature is compatible with |
1409 | * aggr_cpu_id_get_t. |
1410 | */ |
1411 | static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data) |
1412 | { |
1413 | int ret; |
1414 | struct aggr_cpu_id id; |
1415 | struct perf_cache cache; |
1416 | |
1417 | id = aggr_cpu_id__die(cpu: cpu, data); |
1418 | if (aggr_cpu_id__is_empty(a: &id)) |
1419 | return id; |
1420 | |
1421 | ret = cpu__get_cache_details(cpu: cpu, cache: &cache); |
1422 | if (ret) |
1423 | return id; |
1424 | |
1425 | id.cache_lvl = cache.cache_lvl; |
1426 | id.cache = cache.cache; |
1427 | return id; |
1428 | } |
1429 | |
1430 | static const char *const aggr_mode__string[] = { |
1431 | [AGGR_CORE] = "core" , |
1432 | [AGGR_CACHE] = "cache" , |
1433 | [AGGR_CLUSTER] = "cluster" , |
1434 | [AGGR_DIE] = "die" , |
1435 | [AGGR_GLOBAL] = "global" , |
1436 | [AGGR_NODE] = "node" , |
1437 | [AGGR_NONE] = "none" , |
1438 | [AGGR_SOCKET] = "socket" , |
1439 | [AGGR_THREAD] = "thread" , |
1440 | [AGGR_UNSET] = "unset" , |
1441 | }; |
1442 | |
1443 | static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, |
1444 | struct perf_cpu cpu) |
1445 | { |
1446 | return aggr_cpu_id__socket(cpu: cpu, /*data=*/NULL); |
1447 | } |
1448 | |
1449 | static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, |
1450 | struct perf_cpu cpu) |
1451 | { |
1452 | return aggr_cpu_id__die(cpu: cpu, /*data=*/NULL); |
1453 | } |
1454 | |
1455 | static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused, |
1456 | struct perf_cpu cpu) |
1457 | { |
1458 | return aggr_cpu_id__cache(cpu: cpu, /*data=*/NULL); |
1459 | } |
1460 | |
1461 | static struct aggr_cpu_id perf_stat__get_cluster(struct perf_stat_config *config __maybe_unused, |
1462 | struct perf_cpu cpu) |
1463 | { |
1464 | return aggr_cpu_id__cluster(cpu: cpu, /*data=*/NULL); |
1465 | } |
1466 | |
1467 | static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, |
1468 | struct perf_cpu cpu) |
1469 | { |
1470 | return aggr_cpu_id__core(cpu: cpu, /*data=*/NULL); |
1471 | } |
1472 | |
1473 | static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, |
1474 | struct perf_cpu cpu) |
1475 | { |
1476 | return aggr_cpu_id__node(cpu: cpu, /*data=*/NULL); |
1477 | } |
1478 | |
1479 | static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused, |
1480 | struct perf_cpu cpu) |
1481 | { |
1482 | return aggr_cpu_id__global(cpu: cpu, /*data=*/NULL); |
1483 | } |
1484 | |
1485 | static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused, |
1486 | struct perf_cpu cpu) |
1487 | { |
1488 | return aggr_cpu_id__cpu(cpu: cpu, /*data=*/NULL); |
1489 | } |
1490 | |
1491 | static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, |
1492 | aggr_get_id_t get_id, struct perf_cpu cpu) |
1493 | { |
1494 | struct aggr_cpu_id id; |
1495 | |
1496 | /* per-process mode - should use global aggr mode */ |
1497 | if (cpu.cpu == -1) |
1498 | return get_id(config, cpu); |
1499 | |
1500 | if (aggr_cpu_id__is_empty(a: &config->cpus_aggr_map->map[cpu.cpu])) |
1501 | config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); |
1502 | |
1503 | id = config->cpus_aggr_map->map[cpu.cpu]; |
1504 | return id; |
1505 | } |
1506 | |
1507 | static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, |
1508 | struct perf_cpu cpu) |
1509 | { |
1510 | return perf_stat__get_aggr(config, get_id: perf_stat__get_socket, cpu: cpu); |
1511 | } |
1512 | |
1513 | static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, |
1514 | struct perf_cpu cpu) |
1515 | { |
1516 | return perf_stat__get_aggr(config, get_id: perf_stat__get_die, cpu: cpu); |
1517 | } |
1518 | |
1519 | static struct aggr_cpu_id perf_stat__get_cluster_cached(struct perf_stat_config *config, |
1520 | struct perf_cpu cpu) |
1521 | { |
1522 | return perf_stat__get_aggr(config, get_id: perf_stat__get_cluster, cpu: cpu); |
1523 | } |
1524 | |
1525 | static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config, |
1526 | struct perf_cpu cpu) |
1527 | { |
1528 | return perf_stat__get_aggr(config, get_id: perf_stat__get_cache_id, cpu: cpu); |
1529 | } |
1530 | |
1531 | static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, |
1532 | struct perf_cpu cpu) |
1533 | { |
1534 | return perf_stat__get_aggr(config, get_id: perf_stat__get_core, cpu: cpu); |
1535 | } |
1536 | |
1537 | static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, |
1538 | struct perf_cpu cpu) |
1539 | { |
1540 | return perf_stat__get_aggr(config, get_id: perf_stat__get_node, cpu: cpu); |
1541 | } |
1542 | |
1543 | static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config, |
1544 | struct perf_cpu cpu) |
1545 | { |
1546 | return perf_stat__get_aggr(config, get_id: perf_stat__get_global, cpu: cpu); |
1547 | } |
1548 | |
1549 | static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config, |
1550 | struct perf_cpu cpu) |
1551 | { |
1552 | return perf_stat__get_aggr(config, get_id: perf_stat__get_cpu, cpu: cpu); |
1553 | } |
1554 | |
1555 | static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) |
1556 | { |
1557 | switch (aggr_mode) { |
1558 | case AGGR_SOCKET: |
1559 | return aggr_cpu_id__socket; |
1560 | case AGGR_DIE: |
1561 | return aggr_cpu_id__die; |
1562 | case AGGR_CLUSTER: |
1563 | return aggr_cpu_id__cluster; |
1564 | case AGGR_CACHE: |
1565 | return aggr_cpu_id__cache; |
1566 | case AGGR_CORE: |
1567 | return aggr_cpu_id__core; |
1568 | case AGGR_NODE: |
1569 | return aggr_cpu_id__node; |
1570 | case AGGR_NONE: |
1571 | return aggr_cpu_id__cpu; |
1572 | case AGGR_GLOBAL: |
1573 | return aggr_cpu_id__global; |
1574 | case AGGR_THREAD: |
1575 | case AGGR_UNSET: |
1576 | case AGGR_MAX: |
1577 | default: |
1578 | return NULL; |
1579 | } |
1580 | } |
1581 | |
1582 | static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) |
1583 | { |
1584 | switch (aggr_mode) { |
1585 | case AGGR_SOCKET: |
1586 | return perf_stat__get_socket_cached; |
1587 | case AGGR_DIE: |
1588 | return perf_stat__get_die_cached; |
1589 | case AGGR_CLUSTER: |
1590 | return perf_stat__get_cluster_cached; |
1591 | case AGGR_CACHE: |
1592 | return perf_stat__get_cache_id_cached; |
1593 | case AGGR_CORE: |
1594 | return perf_stat__get_core_cached; |
1595 | case AGGR_NODE: |
1596 | return perf_stat__get_node_cached; |
1597 | case AGGR_NONE: |
1598 | return perf_stat__get_cpu_cached; |
1599 | case AGGR_GLOBAL: |
1600 | return perf_stat__get_global_cached; |
1601 | case AGGR_THREAD: |
1602 | case AGGR_UNSET: |
1603 | case AGGR_MAX: |
1604 | default: |
1605 | return NULL; |
1606 | } |
1607 | } |
1608 | |
1609 | static int perf_stat_init_aggr_mode(void) |
1610 | { |
1611 | int nr; |
1612 | aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(aggr_mode: stat_config.aggr_mode); |
1613 | |
1614 | if (get_id) { |
1615 | bool needs_sort = stat_config.aggr_mode != AGGR_NONE; |
1616 | stat_config.aggr_map = cpu_aggr_map__new(cpus: evsel_list->core.user_requested_cpus, |
1617 | get_id, /*data=*/NULL, needs_sort); |
1618 | if (!stat_config.aggr_map) { |
1619 | pr_err("cannot build %s map\n" , aggr_mode__string[stat_config.aggr_mode]); |
1620 | return -1; |
1621 | } |
1622 | stat_config.aggr_get_id = aggr_mode__get_id(aggr_mode: stat_config.aggr_mode); |
1623 | } |
1624 | |
1625 | if (stat_config.aggr_mode == AGGR_THREAD) { |
1626 | nr = perf_thread_map__nr(evsel_list->core.threads); |
1627 | stat_config.aggr_map = cpu_aggr_map__empty_new(nr); |
1628 | if (stat_config.aggr_map == NULL) |
1629 | return -ENOMEM; |
1630 | |
1631 | for (int s = 0; s < nr; s++) { |
1632 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1633 | |
1634 | id.thread_idx = s; |
1635 | stat_config.aggr_map->map[s] = id; |
1636 | } |
1637 | return 0; |
1638 | } |
1639 | |
1640 | /* |
1641 | * The evsel_list->cpus is the base we operate on, |
1642 | * taking the highest cpu number to be the size of |
1643 | * the aggregation translate cpumap. |
1644 | */ |
1645 | if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) |
1646 | nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; |
1647 | else |
1648 | nr = 0; |
1649 | stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr: nr + 1); |
1650 | return stat_config.cpus_aggr_map ? 0 : -ENOMEM; |
1651 | } |
1652 | |
1653 | static void cpu_aggr_map__delete(struct cpu_aggr_map *map) |
1654 | { |
1655 | if (map) { |
1656 | WARN_ONCE(refcount_read(&map->refcnt) != 0, |
1657 | "cpu_aggr_map refcnt unbalanced\n" ); |
1658 | free(map); |
1659 | } |
1660 | } |
1661 | |
1662 | static void cpu_aggr_map__put(struct cpu_aggr_map *map) |
1663 | { |
1664 | if (map && refcount_dec_and_test(r: &map->refcnt)) |
1665 | cpu_aggr_map__delete(map); |
1666 | } |
1667 | |
1668 | static void perf_stat__exit_aggr_mode(void) |
1669 | { |
1670 | cpu_aggr_map__put(map: stat_config.aggr_map); |
1671 | cpu_aggr_map__put(map: stat_config.cpus_aggr_map); |
1672 | stat_config.aggr_map = NULL; |
1673 | stat_config.cpus_aggr_map = NULL; |
1674 | } |
1675 | |
1676 | static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) |
1677 | { |
1678 | struct perf_env *env = data; |
1679 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1680 | |
1681 | if (cpu.cpu != -1) |
1682 | id.socket = env->cpu[cpu.cpu].socket_id; |
1683 | |
1684 | return id; |
1685 | } |
1686 | |
1687 | static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) |
1688 | { |
1689 | struct perf_env *env = data; |
1690 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1691 | |
1692 | if (cpu.cpu != -1) { |
1693 | /* |
1694 | * die_id is relative to socket, so start |
1695 | * with the socket ID and then add die to |
1696 | * make a unique ID. |
1697 | */ |
1698 | id.socket = env->cpu[cpu.cpu].socket_id; |
1699 | id.die = env->cpu[cpu.cpu].die_id; |
1700 | } |
1701 | |
1702 | return id; |
1703 | } |
1704 | |
1705 | static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env, |
1706 | u32 cache_level, struct aggr_cpu_id *id) |
1707 | { |
1708 | int i; |
1709 | int caches_cnt = env->caches_cnt; |
1710 | struct cpu_cache_level *caches = env->caches; |
1711 | |
1712 | id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; |
1713 | id->cache = -1; |
1714 | |
1715 | if (!caches_cnt) |
1716 | return; |
1717 | |
1718 | for (i = caches_cnt - 1; i > -1; --i) { |
1719 | struct perf_cpu_map *cpu_map; |
1720 | int map_contains_cpu; |
1721 | |
1722 | /* |
1723 | * If user has not specified a level, find the fist level with |
1724 | * the cpu in the map. Since building the map is expensive, do |
1725 | * this only if levels match. |
1726 | */ |
1727 | if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level) |
1728 | continue; |
1729 | |
1730 | cpu_map = perf_cpu_map__new(caches[i].map); |
1731 | map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu); |
1732 | perf_cpu_map__put(cpu_map); |
1733 | |
1734 | if (map_contains_cpu != -1) { |
1735 | id->cache_lvl = caches[i].level; |
1736 | id->cache = cpu__get_cache_id_from_map(cpu: cpu, map: caches[i].map); |
1737 | return; |
1738 | } |
1739 | } |
1740 | } |
1741 | |
1742 | static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu, |
1743 | void *data) |
1744 | { |
1745 | struct perf_env *env = data; |
1746 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1747 | |
1748 | if (cpu.cpu != -1) { |
1749 | u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level; |
1750 | |
1751 | id.socket = env->cpu[cpu.cpu].socket_id; |
1752 | id.die = env->cpu[cpu.cpu].die_id; |
1753 | perf_env__get_cache_id_for_cpu(cpu: cpu, env, cache_level, id: &id); |
1754 | } |
1755 | |
1756 | return id; |
1757 | } |
1758 | |
1759 | static struct aggr_cpu_id perf_env__get_cluster_aggr_by_cpu(struct perf_cpu cpu, |
1760 | void *data) |
1761 | { |
1762 | struct perf_env *env = data; |
1763 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1764 | |
1765 | if (cpu.cpu != -1) { |
1766 | id.socket = env->cpu[cpu.cpu].socket_id; |
1767 | id.die = env->cpu[cpu.cpu].die_id; |
1768 | id.cluster = env->cpu[cpu.cpu].cluster_id; |
1769 | } |
1770 | |
1771 | return id; |
1772 | } |
1773 | |
1774 | static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) |
1775 | { |
1776 | struct perf_env *env = data; |
1777 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1778 | |
1779 | if (cpu.cpu != -1) { |
1780 | /* |
1781 | * core_id is relative to socket, die and cluster, we need a |
1782 | * global id. So we set socket, die id, cluster id and core id. |
1783 | */ |
1784 | id.socket = env->cpu[cpu.cpu].socket_id; |
1785 | id.die = env->cpu[cpu.cpu].die_id; |
1786 | id.cluster = env->cpu[cpu.cpu].cluster_id; |
1787 | id.core = env->cpu[cpu.cpu].core_id; |
1788 | } |
1789 | |
1790 | return id; |
1791 | } |
1792 | |
1793 | static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data) |
1794 | { |
1795 | struct perf_env *env = data; |
1796 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1797 | |
1798 | if (cpu.cpu != -1) { |
1799 | /* |
1800 | * core_id is relative to socket and die, |
1801 | * we need a global id. So we set |
1802 | * socket, die id and core id |
1803 | */ |
1804 | id.socket = env->cpu[cpu.cpu].socket_id; |
1805 | id.die = env->cpu[cpu.cpu].die_id; |
1806 | id.core = env->cpu[cpu.cpu].core_id; |
1807 | id.cpu = cpu; |
1808 | } |
1809 | |
1810 | return id; |
1811 | } |
1812 | |
1813 | static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) |
1814 | { |
1815 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1816 | |
1817 | id.node = perf_env__numa_node(env: data, cpu: cpu); |
1818 | return id; |
1819 | } |
1820 | |
1821 | static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused, |
1822 | void *data __maybe_unused) |
1823 | { |
1824 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1825 | |
1826 | /* it always aggregates to the cpu 0 */ |
1827 | id.cpu = (struct perf_cpu){ .cpu = 0 }; |
1828 | return id; |
1829 | } |
1830 | |
1831 | static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, |
1832 | struct perf_cpu cpu) |
1833 | { |
1834 | return perf_env__get_socket_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1835 | } |
1836 | static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, |
1837 | struct perf_cpu cpu) |
1838 | { |
1839 | return perf_env__get_die_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1840 | } |
1841 | |
1842 | static struct aggr_cpu_id perf_stat__get_cluster_file(struct perf_stat_config *config __maybe_unused, |
1843 | struct perf_cpu cpu) |
1844 | { |
1845 | return perf_env__get_cluster_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1846 | } |
1847 | |
1848 | static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused, |
1849 | struct perf_cpu cpu) |
1850 | { |
1851 | return perf_env__get_cache_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1852 | } |
1853 | |
1854 | static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, |
1855 | struct perf_cpu cpu) |
1856 | { |
1857 | return perf_env__get_core_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1858 | } |
1859 | |
1860 | static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, |
1861 | struct perf_cpu cpu) |
1862 | { |
1863 | return perf_env__get_cpu_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1864 | } |
1865 | |
1866 | static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, |
1867 | struct perf_cpu cpu) |
1868 | { |
1869 | return perf_env__get_node_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1870 | } |
1871 | |
1872 | static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, |
1873 | struct perf_cpu cpu) |
1874 | { |
1875 | return perf_env__get_global_aggr_by_cpu(cpu: cpu, data: &perf_stat.session->header.env); |
1876 | } |
1877 | |
1878 | static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) |
1879 | { |
1880 | switch (aggr_mode) { |
1881 | case AGGR_SOCKET: |
1882 | return perf_env__get_socket_aggr_by_cpu; |
1883 | case AGGR_DIE: |
1884 | return perf_env__get_die_aggr_by_cpu; |
1885 | case AGGR_CLUSTER: |
1886 | return perf_env__get_cluster_aggr_by_cpu; |
1887 | case AGGR_CACHE: |
1888 | return perf_env__get_cache_aggr_by_cpu; |
1889 | case AGGR_CORE: |
1890 | return perf_env__get_core_aggr_by_cpu; |
1891 | case AGGR_NODE: |
1892 | return perf_env__get_node_aggr_by_cpu; |
1893 | case AGGR_GLOBAL: |
1894 | return perf_env__get_global_aggr_by_cpu; |
1895 | case AGGR_NONE: |
1896 | return perf_env__get_cpu_aggr_by_cpu; |
1897 | case AGGR_THREAD: |
1898 | case AGGR_UNSET: |
1899 | case AGGR_MAX: |
1900 | default: |
1901 | return NULL; |
1902 | } |
1903 | } |
1904 | |
1905 | static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) |
1906 | { |
1907 | switch (aggr_mode) { |
1908 | case AGGR_SOCKET: |
1909 | return perf_stat__get_socket_file; |
1910 | case AGGR_DIE: |
1911 | return perf_stat__get_die_file; |
1912 | case AGGR_CLUSTER: |
1913 | return perf_stat__get_cluster_file; |
1914 | case AGGR_CACHE: |
1915 | return perf_stat__get_cache_file; |
1916 | case AGGR_CORE: |
1917 | return perf_stat__get_core_file; |
1918 | case AGGR_NODE: |
1919 | return perf_stat__get_node_file; |
1920 | case AGGR_GLOBAL: |
1921 | return perf_stat__get_global_file; |
1922 | case AGGR_NONE: |
1923 | return perf_stat__get_cpu_file; |
1924 | case AGGR_THREAD: |
1925 | case AGGR_UNSET: |
1926 | case AGGR_MAX: |
1927 | default: |
1928 | return NULL; |
1929 | } |
1930 | } |
1931 | |
1932 | static int perf_stat_init_aggr_mode_file(struct perf_stat *st) |
1933 | { |
1934 | struct perf_env *env = &st->session->header.env; |
1935 | aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(aggr_mode: stat_config.aggr_mode); |
1936 | bool needs_sort = stat_config.aggr_mode != AGGR_NONE; |
1937 | |
1938 | if (stat_config.aggr_mode == AGGR_THREAD) { |
1939 | int nr = perf_thread_map__nr(evsel_list->core.threads); |
1940 | |
1941 | stat_config.aggr_map = cpu_aggr_map__empty_new(nr); |
1942 | if (stat_config.aggr_map == NULL) |
1943 | return -ENOMEM; |
1944 | |
1945 | for (int s = 0; s < nr; s++) { |
1946 | struct aggr_cpu_id id = aggr_cpu_id__empty(); |
1947 | |
1948 | id.thread_idx = s; |
1949 | stat_config.aggr_map->map[s] = id; |
1950 | } |
1951 | return 0; |
1952 | } |
1953 | |
1954 | if (!get_id) |
1955 | return 0; |
1956 | |
1957 | stat_config.aggr_map = cpu_aggr_map__new(cpus: evsel_list->core.user_requested_cpus, |
1958 | get_id, data: env, needs_sort); |
1959 | if (!stat_config.aggr_map) { |
1960 | pr_err("cannot build %s map\n" , aggr_mode__string[stat_config.aggr_mode]); |
1961 | return -1; |
1962 | } |
1963 | stat_config.aggr_get_id = aggr_mode__get_id_file(aggr_mode: stat_config.aggr_mode); |
1964 | return 0; |
1965 | } |
1966 | |
1967 | /* |
1968 | * Add default attributes, if there were no attributes specified or |
1969 | * if -d/--detailed, -d -d or -d -d -d is used: |
1970 | */ |
1971 | static int add_default_attributes(void) |
1972 | { |
1973 | struct perf_event_attr default_attrs0[] = { |
1974 | |
1975 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, |
1976 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, |
1977 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, |
1978 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, |
1979 | |
1980 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, |
1981 | }; |
1982 | struct perf_event_attr frontend_attrs[] = { |
1983 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, |
1984 | }; |
1985 | struct perf_event_attr backend_attrs[] = { |
1986 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, |
1987 | }; |
1988 | struct perf_event_attr default_attrs1[] = { |
1989 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, |
1990 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, |
1991 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, |
1992 | |
1993 | }; |
1994 | |
1995 | /* |
1996 | * Detailed stats (-d), covering the L1 and last level data caches: |
1997 | */ |
1998 | struct perf_event_attr detailed_attrs[] = { |
1999 | |
2000 | { .type = PERF_TYPE_HW_CACHE, |
2001 | .config = |
2002 | PERF_COUNT_HW_CACHE_L1D << 0 | |
2003 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2004 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, |
2005 | |
2006 | { .type = PERF_TYPE_HW_CACHE, |
2007 | .config = |
2008 | PERF_COUNT_HW_CACHE_L1D << 0 | |
2009 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2010 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, |
2011 | |
2012 | { .type = PERF_TYPE_HW_CACHE, |
2013 | .config = |
2014 | PERF_COUNT_HW_CACHE_LL << 0 | |
2015 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2016 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, |
2017 | |
2018 | { .type = PERF_TYPE_HW_CACHE, |
2019 | .config = |
2020 | PERF_COUNT_HW_CACHE_LL << 0 | |
2021 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2022 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, |
2023 | }; |
2024 | |
2025 | /* |
2026 | * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: |
2027 | */ |
2028 | struct perf_event_attr very_detailed_attrs[] = { |
2029 | |
2030 | { .type = PERF_TYPE_HW_CACHE, |
2031 | .config = |
2032 | PERF_COUNT_HW_CACHE_L1I << 0 | |
2033 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2034 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, |
2035 | |
2036 | { .type = PERF_TYPE_HW_CACHE, |
2037 | .config = |
2038 | PERF_COUNT_HW_CACHE_L1I << 0 | |
2039 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2040 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, |
2041 | |
2042 | { .type = PERF_TYPE_HW_CACHE, |
2043 | .config = |
2044 | PERF_COUNT_HW_CACHE_DTLB << 0 | |
2045 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2046 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, |
2047 | |
2048 | { .type = PERF_TYPE_HW_CACHE, |
2049 | .config = |
2050 | PERF_COUNT_HW_CACHE_DTLB << 0 | |
2051 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2052 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, |
2053 | |
2054 | { .type = PERF_TYPE_HW_CACHE, |
2055 | .config = |
2056 | PERF_COUNT_HW_CACHE_ITLB << 0 | |
2057 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2058 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, |
2059 | |
2060 | { .type = PERF_TYPE_HW_CACHE, |
2061 | .config = |
2062 | PERF_COUNT_HW_CACHE_ITLB << 0 | |
2063 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | |
2064 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, |
2065 | |
2066 | }; |
2067 | |
2068 | /* |
2069 | * Very, very detailed stats (-d -d -d), adding prefetch events: |
2070 | */ |
2071 | struct perf_event_attr very_very_detailed_attrs[] = { |
2072 | |
2073 | { .type = PERF_TYPE_HW_CACHE, |
2074 | .config = |
2075 | PERF_COUNT_HW_CACHE_L1D << 0 | |
2076 | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | |
2077 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, |
2078 | |
2079 | { .type = PERF_TYPE_HW_CACHE, |
2080 | .config = |
2081 | PERF_COUNT_HW_CACHE_L1D << 0 | |
2082 | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | |
2083 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, |
2084 | }; |
2085 | |
2086 | struct perf_event_attr default_null_attrs[] = {}; |
2087 | const char *pmu = parse_events_option_args.pmu_filter ?: "all" ; |
2088 | |
2089 | /* Set attrs if no event is selected and !null_run: */ |
2090 | if (stat_config.null_run) |
2091 | return 0; |
2092 | |
2093 | if (transaction_run) { |
2094 | /* Handle -T as -M transaction. Once platform specific metrics |
2095 | * support has been added to the json files, all architectures |
2096 | * will use this approach. To determine transaction support |
2097 | * on an architecture test for such a metric name. |
2098 | */ |
2099 | if (!metricgroup__has_metric(pmu, metric: "transaction" )) { |
2100 | pr_err("Missing transaction metrics\n" ); |
2101 | return -1; |
2102 | } |
2103 | return metricgroup__parse_groups(perf_evlist: evsel_list, pmu, str: "transaction" , |
2104 | metric_no_group: stat_config.metric_no_group, |
2105 | metric_no_merge: stat_config.metric_no_merge, |
2106 | metric_no_threshold: stat_config.metric_no_threshold, |
2107 | user_requested_cpu_list: stat_config.user_requested_cpu_list, |
2108 | system_wide: stat_config.system_wide, |
2109 | metric_events: &stat_config.metric_events); |
2110 | } |
2111 | |
2112 | if (smi_cost) { |
2113 | int smi; |
2114 | |
2115 | if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { |
2116 | pr_err("freeze_on_smi is not supported.\n" ); |
2117 | return -1; |
2118 | } |
2119 | |
2120 | if (!smi) { |
2121 | if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { |
2122 | fprintf(stderr, "Failed to set freeze_on_smi.\n" ); |
2123 | return -1; |
2124 | } |
2125 | smi_reset = true; |
2126 | } |
2127 | |
2128 | if (!metricgroup__has_metric(pmu, metric: "smi" )) { |
2129 | pr_err("Missing smi metrics\n" ); |
2130 | return -1; |
2131 | } |
2132 | |
2133 | if (!force_metric_only) |
2134 | stat_config.metric_only = true; |
2135 | |
2136 | return metricgroup__parse_groups(perf_evlist: evsel_list, pmu, str: "smi" , |
2137 | metric_no_group: stat_config.metric_no_group, |
2138 | metric_no_merge: stat_config.metric_no_merge, |
2139 | metric_no_threshold: stat_config.metric_no_threshold, |
2140 | user_requested_cpu_list: stat_config.user_requested_cpu_list, |
2141 | system_wide: stat_config.system_wide, |
2142 | metric_events: &stat_config.metric_events); |
2143 | } |
2144 | |
2145 | if (topdown_run) { |
2146 | unsigned int max_level = metricgroups__topdown_max_level(); |
2147 | char str[] = "TopdownL1" ; |
2148 | |
2149 | if (!force_metric_only) |
2150 | stat_config.metric_only = true; |
2151 | |
2152 | if (!max_level) { |
2153 | pr_err("Topdown requested but the topdown metric groups aren't present.\n" |
2154 | "(See perf list the metric groups have names like TopdownL1)\n" ); |
2155 | return -1; |
2156 | } |
2157 | if (stat_config.topdown_level > max_level) { |
2158 | pr_err("Invalid top-down metrics level. The max level is %u.\n" , max_level); |
2159 | return -1; |
2160 | } else if (!stat_config.topdown_level) |
2161 | stat_config.topdown_level = 1; |
2162 | |
2163 | if (!stat_config.interval && !stat_config.metric_only) { |
2164 | fprintf(stat_config.output, |
2165 | "Topdown accuracy may decrease when measuring long periods.\n" |
2166 | "Please print the result regularly, e.g. -I1000\n" ); |
2167 | } |
2168 | str[8] = stat_config.topdown_level + '0'; |
2169 | if (metricgroup__parse_groups(perf_evlist: evsel_list, |
2170 | pmu, str, |
2171 | /*metric_no_group=*/false, |
2172 | /*metric_no_merge=*/false, |
2173 | /*metric_no_threshold=*/true, |
2174 | user_requested_cpu_list: stat_config.user_requested_cpu_list, |
2175 | system_wide: stat_config.system_wide, |
2176 | metric_events: &stat_config.metric_events) < 0) |
2177 | return -1; |
2178 | } |
2179 | |
2180 | if (!stat_config.topdown_level) |
2181 | stat_config.topdown_level = 1; |
2182 | |
2183 | if (!evsel_list->core.nr_entries) { |
2184 | /* No events so add defaults. */ |
2185 | if (target__has_cpu(target: &target)) |
2186 | default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; |
2187 | |
2188 | if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) |
2189 | return -1; |
2190 | if (perf_pmus__have_event(pname: "cpu" , name: "stalled-cycles-frontend" )) { |
2191 | if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) |
2192 | return -1; |
2193 | } |
2194 | if (perf_pmus__have_event(pname: "cpu" , name: "stalled-cycles-backend" )) { |
2195 | if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) |
2196 | return -1; |
2197 | } |
2198 | if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) |
2199 | return -1; |
2200 | /* |
2201 | * Add TopdownL1 metrics if they exist. To minimize |
2202 | * multiplexing, don't request threshold computation. |
2203 | */ |
2204 | if (metricgroup__has_metric(pmu, metric: "Default" )) { |
2205 | struct evlist *metric_evlist = evlist__new(); |
2206 | struct evsel *metric_evsel; |
2207 | |
2208 | if (!metric_evlist) |
2209 | return -1; |
2210 | |
2211 | if (metricgroup__parse_groups(perf_evlist: metric_evlist, pmu, str: "Default" , |
2212 | /*metric_no_group=*/false, |
2213 | /*metric_no_merge=*/false, |
2214 | /*metric_no_threshold=*/true, |
2215 | user_requested_cpu_list: stat_config.user_requested_cpu_list, |
2216 | system_wide: stat_config.system_wide, |
2217 | metric_events: &stat_config.metric_events) < 0) |
2218 | return -1; |
2219 | |
2220 | evlist__for_each_entry(metric_evlist, metric_evsel) { |
2221 | metric_evsel->skippable = true; |
2222 | metric_evsel->default_metricgroup = true; |
2223 | } |
2224 | evlist__splice_list_tail(evlist: evsel_list, list: &metric_evlist->core.entries); |
2225 | evlist__delete(evlist: metric_evlist); |
2226 | } |
2227 | |
2228 | /* Platform specific attrs */ |
2229 | if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) |
2230 | return -1; |
2231 | } |
2232 | |
2233 | /* Detailed events get appended to the event list: */ |
2234 | |
2235 | if (detailed_run < 1) |
2236 | return 0; |
2237 | |
2238 | /* Append detailed run extra attributes: */ |
2239 | if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) |
2240 | return -1; |
2241 | |
2242 | if (detailed_run < 2) |
2243 | return 0; |
2244 | |
2245 | /* Append very detailed run extra attributes: */ |
2246 | if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) |
2247 | return -1; |
2248 | |
2249 | if (detailed_run < 3) |
2250 | return 0; |
2251 | |
2252 | /* Append very, very detailed run extra attributes: */ |
2253 | return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); |
2254 | } |
2255 | |
2256 | static const char * const stat_record_usage[] = { |
2257 | "perf stat record [<options>]" , |
2258 | NULL, |
2259 | }; |
2260 | |
2261 | static void init_features(struct perf_session *session) |
2262 | { |
2263 | int feat; |
2264 | |
2265 | for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) |
2266 | perf_header__set_feat(header: &session->header, feat); |
2267 | |
2268 | perf_header__clear_feat(header: &session->header, feat: HEADER_DIR_FORMAT); |
2269 | perf_header__clear_feat(header: &session->header, feat: HEADER_BUILD_ID); |
2270 | perf_header__clear_feat(header: &session->header, feat: HEADER_TRACING_DATA); |
2271 | perf_header__clear_feat(header: &session->header, feat: HEADER_BRANCH_STACK); |
2272 | perf_header__clear_feat(header: &session->header, feat: HEADER_AUXTRACE); |
2273 | } |
2274 | |
2275 | static int __cmd_record(int argc, const char **argv) |
2276 | { |
2277 | struct perf_session *session; |
2278 | struct perf_data *data = &perf_stat.data; |
2279 | |
2280 | argc = parse_options(argc, argv, stat_options, stat_record_usage, |
2281 | PARSE_OPT_STOP_AT_NON_OPTION); |
2282 | |
2283 | if (output_name) |
2284 | data->path = output_name; |
2285 | |
2286 | if (stat_config.run_count != 1 || forever) { |
2287 | pr_err("Cannot use -r option with perf stat record.\n" ); |
2288 | return -1; |
2289 | } |
2290 | |
2291 | session = perf_session__new(data, NULL); |
2292 | if (IS_ERR(ptr: session)) { |
2293 | pr_err("Perf session creation failed\n" ); |
2294 | return PTR_ERR(ptr: session); |
2295 | } |
2296 | |
2297 | init_features(session); |
2298 | |
2299 | session->evlist = evsel_list; |
2300 | perf_stat.session = session; |
2301 | perf_stat.record = true; |
2302 | return argc; |
2303 | } |
2304 | |
2305 | static int process_stat_round_event(struct perf_session *session, |
2306 | union perf_event *event) |
2307 | { |
2308 | struct perf_record_stat_round *stat_round = &event->stat_round; |
2309 | struct timespec tsh, *ts = NULL; |
2310 | const char **argv = session->header.env.cmdline_argv; |
2311 | int argc = session->header.env.nr_cmdline; |
2312 | |
2313 | process_counters(); |
2314 | |
2315 | if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) |
2316 | update_stats(stats: &walltime_nsecs_stats, val: stat_round->time); |
2317 | |
2318 | if (stat_config.interval && stat_round->time) { |
2319 | tsh.tv_sec = stat_round->time / NSEC_PER_SEC; |
2320 | tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; |
2321 | ts = &tsh; |
2322 | } |
2323 | |
2324 | print_counters(ts, argc, argv); |
2325 | return 0; |
2326 | } |
2327 | |
2328 | static |
2329 | int process_stat_config_event(struct perf_session *session, |
2330 | union perf_event *event) |
2331 | { |
2332 | struct perf_tool *tool = session->tool; |
2333 | struct perf_stat *st = container_of(tool, struct perf_stat, tool); |
2334 | |
2335 | perf_event__read_stat_config(config: &stat_config, event: &event->stat_config); |
2336 | |
2337 | if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) { |
2338 | if (st->aggr_mode != AGGR_UNSET) |
2339 | pr_warning("warning: processing task data, aggregation mode not set\n" ); |
2340 | } else if (st->aggr_mode != AGGR_UNSET) { |
2341 | stat_config.aggr_mode = st->aggr_mode; |
2342 | } |
2343 | |
2344 | if (perf_stat.data.is_pipe) |
2345 | perf_stat_init_aggr_mode(); |
2346 | else |
2347 | perf_stat_init_aggr_mode_file(st); |
2348 | |
2349 | if (stat_config.aggr_map) { |
2350 | int nr_aggr = stat_config.aggr_map->nr; |
2351 | |
2352 | if (evlist__alloc_aggr_stats(evlist: session->evlist, nr_aggr) < 0) { |
2353 | pr_err("cannot allocate aggr counts\n" ); |
2354 | return -1; |
2355 | } |
2356 | } |
2357 | return 0; |
2358 | } |
2359 | |
2360 | static int set_maps(struct perf_stat *st) |
2361 | { |
2362 | if (!st->cpus || !st->threads) |
2363 | return 0; |
2364 | |
2365 | if (WARN_ONCE(st->maps_allocated, "stats double allocation\n" )) |
2366 | return -EINVAL; |
2367 | |
2368 | perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); |
2369 | |
2370 | if (evlist__alloc_stats(config: &stat_config, evlist: evsel_list, /*alloc_raw=*/true)) |
2371 | return -ENOMEM; |
2372 | |
2373 | st->maps_allocated = true; |
2374 | return 0; |
2375 | } |
2376 | |
2377 | static |
2378 | int process_thread_map_event(struct perf_session *session, |
2379 | union perf_event *event) |
2380 | { |
2381 | struct perf_tool *tool = session->tool; |
2382 | struct perf_stat *st = container_of(tool, struct perf_stat, tool); |
2383 | |
2384 | if (st->threads) { |
2385 | pr_warning("Extra thread map event, ignoring.\n" ); |
2386 | return 0; |
2387 | } |
2388 | |
2389 | st->threads = thread_map__new_event(event: &event->thread_map); |
2390 | if (!st->threads) |
2391 | return -ENOMEM; |
2392 | |
2393 | return set_maps(st); |
2394 | } |
2395 | |
2396 | static |
2397 | int process_cpu_map_event(struct perf_session *session, |
2398 | union perf_event *event) |
2399 | { |
2400 | struct perf_tool *tool = session->tool; |
2401 | struct perf_stat *st = container_of(tool, struct perf_stat, tool); |
2402 | struct perf_cpu_map *cpus; |
2403 | |
2404 | if (st->cpus) { |
2405 | pr_warning("Extra cpu map event, ignoring.\n" ); |
2406 | return 0; |
2407 | } |
2408 | |
2409 | cpus = cpu_map__new_data(data: &event->cpu_map.data); |
2410 | if (!cpus) |
2411 | return -ENOMEM; |
2412 | |
2413 | st->cpus = cpus; |
2414 | return set_maps(st); |
2415 | } |
2416 | |
2417 | static const char * const stat_report_usage[] = { |
2418 | "perf stat report [<options>]" , |
2419 | NULL, |
2420 | }; |
2421 | |
2422 | static struct perf_stat perf_stat = { |
2423 | .tool = { |
2424 | .attr = perf_event__process_attr, |
2425 | .event_update = perf_event__process_event_update, |
2426 | .thread_map = process_thread_map_event, |
2427 | .cpu_map = process_cpu_map_event, |
2428 | .stat_config = process_stat_config_event, |
2429 | .stat = perf_event__process_stat_event, |
2430 | .stat_round = process_stat_round_event, |
2431 | }, |
2432 | .aggr_mode = AGGR_UNSET, |
2433 | .aggr_level = 0, |
2434 | }; |
2435 | |
2436 | static int __cmd_report(int argc, const char **argv) |
2437 | { |
2438 | struct perf_session *session; |
2439 | const struct option options[] = { |
2440 | OPT_STRING('i', "input" , &input_name, "file" , "input file name" ), |
2441 | OPT_SET_UINT(0, "per-socket" , &perf_stat.aggr_mode, |
2442 | "aggregate counts per processor socket" , AGGR_SOCKET), |
2443 | OPT_SET_UINT(0, "per-die" , &perf_stat.aggr_mode, |
2444 | "aggregate counts per processor die" , AGGR_DIE), |
2445 | OPT_SET_UINT(0, "per-cluster" , &perf_stat.aggr_mode, |
2446 | "aggregate counts perf processor cluster" , AGGR_CLUSTER), |
2447 | OPT_CALLBACK_OPTARG(0, "per-cache" , &perf_stat.aggr_mode, &perf_stat.aggr_level, |
2448 | "cache level" , |
2449 | "aggregate count at this cache level (Default: LLC)" , |
2450 | parse_cache_level), |
2451 | OPT_SET_UINT(0, "per-core" , &perf_stat.aggr_mode, |
2452 | "aggregate counts per physical processor core" , AGGR_CORE), |
2453 | OPT_SET_UINT(0, "per-node" , &perf_stat.aggr_mode, |
2454 | "aggregate counts per numa node" , AGGR_NODE), |
2455 | OPT_SET_UINT('A', "no-aggr" , &perf_stat.aggr_mode, |
2456 | "disable CPU count aggregation" , AGGR_NONE), |
2457 | OPT_END() |
2458 | }; |
2459 | struct stat st; |
2460 | int ret; |
2461 | |
2462 | argc = parse_options(argc, argv, options, stat_report_usage, 0); |
2463 | |
2464 | if (!input_name || !strlen(input_name)) { |
2465 | if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) |
2466 | input_name = "-" ; |
2467 | else |
2468 | input_name = "perf.data" ; |
2469 | } |
2470 | |
2471 | perf_stat.data.path = input_name; |
2472 | perf_stat.data.mode = PERF_DATA_MODE_READ; |
2473 | |
2474 | session = perf_session__new(data: &perf_stat.data, tool: &perf_stat.tool); |
2475 | if (IS_ERR(ptr: session)) |
2476 | return PTR_ERR(ptr: session); |
2477 | |
2478 | perf_stat.session = session; |
2479 | stat_config.output = stderr; |
2480 | evlist__delete(evlist: evsel_list); |
2481 | evsel_list = session->evlist; |
2482 | |
2483 | ret = perf_session__process_events(session); |
2484 | if (ret) |
2485 | return ret; |
2486 | |
2487 | perf_session__delete(session); |
2488 | return 0; |
2489 | } |
2490 | |
2491 | static void setup_system_wide(int forks) |
2492 | { |
2493 | /* |
2494 | * Make system wide (-a) the default target if |
2495 | * no target was specified and one of following |
2496 | * conditions is met: |
2497 | * |
2498 | * - there's no workload specified |
2499 | * - there is workload specified but all requested |
2500 | * events are system wide events |
2501 | */ |
2502 | if (!target__none(target: &target)) |
2503 | return; |
2504 | |
2505 | if (!forks) |
2506 | target.system_wide = true; |
2507 | else { |
2508 | struct evsel *counter; |
2509 | |
2510 | evlist__for_each_entry(evsel_list, counter) { |
2511 | if (!counter->core.requires_cpu && |
2512 | !evsel__name_is(evsel: counter, name: "duration_time" )) { |
2513 | return; |
2514 | } |
2515 | } |
2516 | |
2517 | if (evsel_list->core.nr_entries) |
2518 | target.system_wide = true; |
2519 | } |
2520 | } |
2521 | |
2522 | int cmd_stat(int argc, const char **argv) |
2523 | { |
2524 | const char * const stat_usage[] = { |
2525 | "perf stat [<options>] [<command>]" , |
2526 | NULL |
2527 | }; |
2528 | int status = -EINVAL, run_idx, err; |
2529 | const char *mode; |
2530 | FILE *output = stderr; |
2531 | unsigned int interval, timeout; |
2532 | const char * const stat_subcommands[] = { "record" , "report" }; |
2533 | char errbuf[BUFSIZ]; |
2534 | |
2535 | setlocale(LC_ALL, "" ); |
2536 | |
2537 | evsel_list = evlist__new(); |
2538 | if (evsel_list == NULL) |
2539 | return -ENOMEM; |
2540 | |
2541 | parse_events__shrink_config_terms(); |
2542 | |
2543 | /* String-parsing callback-based options would segfault when negated */ |
2544 | set_option_flag(stat_options, 'e', "event" , PARSE_OPT_NONEG); |
2545 | set_option_flag(stat_options, 'M', "metrics" , PARSE_OPT_NONEG); |
2546 | set_option_flag(stat_options, 'G', "cgroup" , PARSE_OPT_NONEG); |
2547 | |
2548 | argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, |
2549 | (const char **) stat_usage, |
2550 | PARSE_OPT_STOP_AT_NON_OPTION); |
2551 | |
2552 | if (stat_config.csv_sep) { |
2553 | stat_config.csv_output = true; |
2554 | if (!strcmp(stat_config.csv_sep, "\\t" )) |
2555 | stat_config.csv_sep = "\t" ; |
2556 | } else |
2557 | stat_config.csv_sep = DEFAULT_SEPARATOR; |
2558 | |
2559 | if (argc && strlen(argv[0]) > 2 && strstarts(str: "record" , prefix: argv[0])) { |
2560 | argc = __cmd_record(argc, argv); |
2561 | if (argc < 0) |
2562 | return -1; |
2563 | } else if (argc && strlen(argv[0]) > 2 && strstarts(str: "report" , prefix: argv[0])) |
2564 | return __cmd_report(argc, argv); |
2565 | |
2566 | interval = stat_config.interval; |
2567 | timeout = stat_config.timeout; |
2568 | |
2569 | /* |
2570 | * For record command the -o is already taken care of. |
2571 | */ |
2572 | if (!STAT_RECORD && output_name && strcmp(output_name, "-" )) |
2573 | output = NULL; |
2574 | |
2575 | if (output_name && output_fd) { |
2576 | fprintf(stderr, "cannot use both --output and --log-fd\n" ); |
2577 | parse_options_usage(stat_usage, stat_options, "o" , 1); |
2578 | parse_options_usage(NULL, stat_options, "log-fd" , 0); |
2579 | goto out; |
2580 | } |
2581 | |
2582 | if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { |
2583 | fprintf(stderr, "--metric-only is not supported with --per-thread\n" ); |
2584 | goto out; |
2585 | } |
2586 | |
2587 | if (stat_config.metric_only && stat_config.run_count > 1) { |
2588 | fprintf(stderr, "--metric-only is not supported with -r\n" ); |
2589 | goto out; |
2590 | } |
2591 | |
2592 | if (stat_config.walltime_run_table && stat_config.run_count <= 1) { |
2593 | fprintf(stderr, "--table is only supported with -r\n" ); |
2594 | parse_options_usage(stat_usage, stat_options, "r" , 1); |
2595 | parse_options_usage(NULL, stat_options, "table" , 0); |
2596 | goto out; |
2597 | } |
2598 | |
2599 | if (output_fd < 0) { |
2600 | fprintf(stderr, "argument to --log-fd must be a > 0\n" ); |
2601 | parse_options_usage(stat_usage, stat_options, "log-fd" , 0); |
2602 | goto out; |
2603 | } |
2604 | |
2605 | if (!output && !quiet) { |
2606 | struct timespec tm; |
2607 | mode = append_file ? "a" : "w" ; |
2608 | |
2609 | output = fopen(output_name, mode); |
2610 | if (!output) { |
2611 | perror("failed to create output file" ); |
2612 | return -1; |
2613 | } |
2614 | if (!stat_config.json_output) { |
2615 | clock_gettime(CLOCK_REALTIME, &tm); |
2616 | fprintf(output, "# started on %s\n" , ctime(&tm.tv_sec)); |
2617 | } |
2618 | } else if (output_fd > 0) { |
2619 | mode = append_file ? "a" : "w" ; |
2620 | output = fdopen(output_fd, mode); |
2621 | if (!output) { |
2622 | perror("Failed opening logfd" ); |
2623 | return -errno; |
2624 | } |
2625 | } |
2626 | |
2627 | if (stat_config.interval_clear && !isatty(fileno(output))) { |
2628 | fprintf(stderr, "--interval-clear does not work with output\n" ); |
2629 | parse_options_usage(stat_usage, stat_options, "o" , 1); |
2630 | parse_options_usage(NULL, stat_options, "log-fd" , 0); |
2631 | parse_options_usage(NULL, stat_options, "interval-clear" , 0); |
2632 | return -1; |
2633 | } |
2634 | |
2635 | stat_config.output = output; |
2636 | |
2637 | /* |
2638 | * let the spreadsheet do the pretty-printing |
2639 | */ |
2640 | if (stat_config.csv_output) { |
2641 | /* User explicitly passed -B? */ |
2642 | if (big_num_opt == 1) { |
2643 | fprintf(stderr, "-B option not supported with -x\n" ); |
2644 | parse_options_usage(stat_usage, stat_options, "B" , 1); |
2645 | parse_options_usage(NULL, stat_options, "x" , 1); |
2646 | goto out; |
2647 | } else /* Nope, so disable big number formatting */ |
2648 | stat_config.big_num = false; |
2649 | } else if (big_num_opt == 0) /* User passed --no-big-num */ |
2650 | stat_config.big_num = false; |
2651 | |
2652 | err = target__validate(target: &target); |
2653 | if (err) { |
2654 | target__strerror(&target, err, errbuf, BUFSIZ); |
2655 | pr_warning("%s\n" , errbuf); |
2656 | } |
2657 | |
2658 | setup_system_wide(argc); |
2659 | |
2660 | /* |
2661 | * Display user/system times only for single |
2662 | * run and when there's specified tracee. |
2663 | */ |
2664 | if ((stat_config.run_count == 1) && target__none(target: &target)) |
2665 | stat_config.ru_display = true; |
2666 | |
2667 | if (stat_config.run_count < 0) { |
2668 | pr_err("Run count must be a positive number\n" ); |
2669 | parse_options_usage(stat_usage, stat_options, "r" , 1); |
2670 | goto out; |
2671 | } else if (stat_config.run_count == 0) { |
2672 | forever = true; |
2673 | stat_config.run_count = 1; |
2674 | } |
2675 | |
2676 | if (stat_config.walltime_run_table) { |
2677 | stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); |
2678 | if (!stat_config.walltime_run) { |
2679 | pr_err("failed to setup -r option" ); |
2680 | goto out; |
2681 | } |
2682 | } |
2683 | |
2684 | if ((stat_config.aggr_mode == AGGR_THREAD) && |
2685 | !target__has_task(target: &target)) { |
2686 | if (!target.system_wide || target.cpu_list) { |
2687 | fprintf(stderr, "The --per-thread option is only " |
2688 | "available when monitoring via -p -t -a " |
2689 | "options or only --per-thread.\n" ); |
2690 | parse_options_usage(NULL, stat_options, "p" , 1); |
2691 | parse_options_usage(NULL, stat_options, "t" , 1); |
2692 | goto out; |
2693 | } |
2694 | } |
2695 | |
2696 | /* |
2697 | * no_aggr, cgroup are for system-wide only |
2698 | * --per-thread is aggregated per thread, we dont mix it with cpu mode |
2699 | */ |
2700 | if (((stat_config.aggr_mode != AGGR_GLOBAL && |
2701 | stat_config.aggr_mode != AGGR_THREAD) || |
2702 | (nr_cgroups || stat_config.cgroup_list)) && |
2703 | !target__has_cpu(target: &target)) { |
2704 | fprintf(stderr, "both cgroup and no-aggregation " |
2705 | "modes only available in system-wide mode\n" ); |
2706 | |
2707 | parse_options_usage(stat_usage, stat_options, "G" , 1); |
2708 | parse_options_usage(NULL, stat_options, "A" , 1); |
2709 | parse_options_usage(NULL, stat_options, "a" , 1); |
2710 | parse_options_usage(NULL, stat_options, "for-each-cgroup" , 0); |
2711 | goto out; |
2712 | } |
2713 | |
2714 | if (stat_config.iostat_run) { |
2715 | status = iostat_prepare(evlist: evsel_list, config: &stat_config); |
2716 | if (status) |
2717 | goto out; |
2718 | if (iostat_mode == IOSTAT_LIST) { |
2719 | iostat_list(evlist: evsel_list, config: &stat_config); |
2720 | goto out; |
2721 | } else if (verbose > 0) |
2722 | iostat_list(evlist: evsel_list, config: &stat_config); |
2723 | if (iostat_mode == IOSTAT_RUN && !target__has_cpu(target: &target)) |
2724 | target.system_wide = true; |
2725 | } |
2726 | |
2727 | if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) |
2728 | target.per_thread = true; |
2729 | |
2730 | stat_config.system_wide = target.system_wide; |
2731 | if (target.cpu_list) { |
2732 | stat_config.user_requested_cpu_list = strdup(target.cpu_list); |
2733 | if (!stat_config.user_requested_cpu_list) { |
2734 | status = -ENOMEM; |
2735 | goto out; |
2736 | } |
2737 | } |
2738 | |
2739 | /* |
2740 | * Metric parsing needs to be delayed as metrics may optimize events |
2741 | * knowing the target is system-wide. |
2742 | */ |
2743 | if (metrics) { |
2744 | const char *pmu = parse_events_option_args.pmu_filter ?: "all" ; |
2745 | int ret = metricgroup__parse_groups(perf_evlist: evsel_list, pmu, str: metrics, |
2746 | metric_no_group: stat_config.metric_no_group, |
2747 | metric_no_merge: stat_config.metric_no_merge, |
2748 | metric_no_threshold: stat_config.metric_no_threshold, |
2749 | user_requested_cpu_list: stat_config.user_requested_cpu_list, |
2750 | system_wide: stat_config.system_wide, |
2751 | metric_events: &stat_config.metric_events); |
2752 | |
2753 | zfree(&metrics); |
2754 | if (ret) { |
2755 | status = ret; |
2756 | goto out; |
2757 | } |
2758 | } |
2759 | |
2760 | if (add_default_attributes()) |
2761 | goto out; |
2762 | |
2763 | if (stat_config.cgroup_list) { |
2764 | if (nr_cgroups > 0) { |
2765 | pr_err("--cgroup and --for-each-cgroup cannot be used together\n" ); |
2766 | parse_options_usage(stat_usage, stat_options, "G" , 1); |
2767 | parse_options_usage(NULL, stat_options, "for-each-cgroup" , 0); |
2768 | goto out; |
2769 | } |
2770 | |
2771 | if (evlist__expand_cgroup(evlist: evsel_list, cgroups: stat_config.cgroup_list, |
2772 | metric_events: &stat_config.metric_events, open_cgroup: true) < 0) { |
2773 | parse_options_usage(stat_usage, stat_options, |
2774 | "for-each-cgroup" , 0); |
2775 | goto out; |
2776 | } |
2777 | } |
2778 | |
2779 | evlist__warn_user_requested_cpus(evlist: evsel_list, cpu_list: target.cpu_list); |
2780 | |
2781 | if (evlist__create_maps(evlist: evsel_list, target: &target) < 0) { |
2782 | if (target__has_task(target: &target)) { |
2783 | pr_err("Problems finding threads of monitor\n" ); |
2784 | parse_options_usage(stat_usage, stat_options, "p" , 1); |
2785 | parse_options_usage(NULL, stat_options, "t" , 1); |
2786 | } else if (target__has_cpu(target: &target)) { |
2787 | perror("failed to parse CPUs map" ); |
2788 | parse_options_usage(stat_usage, stat_options, "C" , 1); |
2789 | parse_options_usage(NULL, stat_options, "a" , 1); |
2790 | } |
2791 | goto out; |
2792 | } |
2793 | |
2794 | evlist__check_cpu_maps(evlist: evsel_list); |
2795 | |
2796 | /* |
2797 | * Initialize thread_map with comm names, |
2798 | * so we could print it out on output. |
2799 | */ |
2800 | if (stat_config.aggr_mode == AGGR_THREAD) { |
2801 | thread_map__read_comms(threads: evsel_list->core.threads); |
2802 | } |
2803 | |
2804 | if (stat_config.aggr_mode == AGGR_NODE) |
2805 | cpu__setup_cpunode_map(); |
2806 | |
2807 | if (stat_config.times && interval) |
2808 | interval_count = true; |
2809 | else if (stat_config.times && !interval) { |
2810 | pr_err("interval-count option should be used together with " |
2811 | "interval-print.\n" ); |
2812 | parse_options_usage(stat_usage, stat_options, "interval-count" , 0); |
2813 | parse_options_usage(stat_usage, stat_options, "I" , 1); |
2814 | goto out; |
2815 | } |
2816 | |
2817 | if (timeout && timeout < 100) { |
2818 | if (timeout < 10) { |
2819 | pr_err("timeout must be >= 10ms.\n" ); |
2820 | parse_options_usage(stat_usage, stat_options, "timeout" , 0); |
2821 | goto out; |
2822 | } else |
2823 | pr_warning("timeout < 100ms. " |
2824 | "The overhead percentage could be high in some cases. " |
2825 | "Please proceed with caution.\n" ); |
2826 | } |
2827 | if (timeout && interval) { |
2828 | pr_err("timeout option is not supported with interval-print.\n" ); |
2829 | parse_options_usage(stat_usage, stat_options, "timeout" , 0); |
2830 | parse_options_usage(stat_usage, stat_options, "I" , 1); |
2831 | goto out; |
2832 | } |
2833 | |
2834 | if (perf_stat_init_aggr_mode()) |
2835 | goto out; |
2836 | |
2837 | if (evlist__alloc_stats(config: &stat_config, evlist: evsel_list, alloc_raw: interval)) |
2838 | goto out; |
2839 | |
2840 | /* |
2841 | * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless |
2842 | * while avoiding that older tools show confusing messages. |
2843 | * |
2844 | * However for pipe sessions we need to keep it zero, |
2845 | * because script's perf_evsel__check_attr is triggered |
2846 | * by attr->sample_type != 0, and we can't run it on |
2847 | * stat sessions. |
2848 | */ |
2849 | stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); |
2850 | |
2851 | /* |
2852 | * We dont want to block the signals - that would cause |
2853 | * child tasks to inherit that and Ctrl-C would not work. |
2854 | * What we want is for Ctrl-C to work in the exec()-ed |
2855 | * task, but being ignored by perf stat itself: |
2856 | */ |
2857 | atexit(sig_atexit); |
2858 | if (!forever) |
2859 | signal(SIGINT, skip_signal); |
2860 | signal(SIGCHLD, skip_signal); |
2861 | signal(SIGALRM, skip_signal); |
2862 | signal(SIGABRT, skip_signal); |
2863 | |
2864 | if (evlist__initialize_ctlfd(evlist: evsel_list, ctl_fd: stat_config.ctl_fd, ctl_fd_ack: stat_config.ctl_fd_ack)) |
2865 | goto out; |
2866 | |
2867 | /* Enable ignoring missing threads when -p option is defined. */ |
2868 | evlist__first(evlist: evsel_list)->ignore_missing_thread = target.pid; |
2869 | status = 0; |
2870 | for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { |
2871 | if (stat_config.run_count != 1 && verbose > 0) |
2872 | fprintf(output, "[ perf stat: executing run #%d ... ]\n" , |
2873 | run_idx + 1); |
2874 | |
2875 | if (run_idx != 0) |
2876 | evlist__reset_prev_raw_counts(evlist: evsel_list); |
2877 | |
2878 | status = run_perf_stat(argc, argv, run_idx); |
2879 | if (forever && status != -1 && !interval) { |
2880 | print_counters(NULL, argc, argv); |
2881 | perf_stat__reset_stats(); |
2882 | } |
2883 | } |
2884 | |
2885 | if (!forever && status != -1 && (!interval || stat_config.summary)) { |
2886 | if (stat_config.run_count > 1) |
2887 | evlist__copy_res_stats(config: &stat_config, evlist: evsel_list); |
2888 | print_counters(NULL, argc, argv); |
2889 | } |
2890 | |
2891 | evlist__finalize_ctlfd(evlist: evsel_list); |
2892 | |
2893 | if (STAT_RECORD) { |
2894 | /* |
2895 | * We synthesize the kernel mmap record just so that older tools |
2896 | * don't emit warnings about not being able to resolve symbols |
2897 | * due to /proc/sys/kernel/kptr_restrict settings and instead provide |
2898 | * a saner message about no samples being in the perf.data file. |
2899 | * |
2900 | * This also serves to suppress a warning about f_header.data.size == 0 |
2901 | * in header.c at the moment 'perf stat record' gets introduced, which |
2902 | * is not really needed once we start adding the stat specific PERF_RECORD_ |
2903 | * records, but the need to suppress the kptr_restrict messages in older |
2904 | * tools remain -acme |
2905 | */ |
2906 | int fd = perf_data__fd(data: &perf_stat.data); |
2907 | |
2908 | err = perf_event__synthesize_kernel_mmap(tool: (void *)&perf_stat, |
2909 | process: process_synthesized_event, |
2910 | machine: &perf_stat.session->machines.host); |
2911 | if (err) { |
2912 | pr_warning("Couldn't synthesize the kernel mmap record, harmless, " |
2913 | "older tools may produce warnings about this file\n." ); |
2914 | } |
2915 | |
2916 | if (!interval) { |
2917 | if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) |
2918 | pr_err("failed to write stat round event\n" ); |
2919 | } |
2920 | |
2921 | if (!perf_stat.data.is_pipe) { |
2922 | perf_stat.session->header.data_size += perf_stat.bytes_written; |
2923 | perf_session__write_header(session: perf_stat.session, evlist: evsel_list, fd, at_exit: true); |
2924 | } |
2925 | |
2926 | evlist__close(evlist: evsel_list); |
2927 | perf_session__delete(session: perf_stat.session); |
2928 | } |
2929 | |
2930 | perf_stat__exit_aggr_mode(); |
2931 | evlist__free_stats(evlist: evsel_list); |
2932 | out: |
2933 | if (stat_config.iostat_run) |
2934 | iostat_release(evlist: evsel_list); |
2935 | |
2936 | zfree(&stat_config.walltime_run); |
2937 | zfree(&stat_config.user_requested_cpu_list); |
2938 | |
2939 | if (smi_cost && smi_reset) |
2940 | sysfs__write_int(FREEZE_ON_SMI_PATH, 0); |
2941 | |
2942 | evlist__delete(evlist: evsel_list); |
2943 | |
2944 | metricgroup__rblist_exit(metric_events: &stat_config.metric_events); |
2945 | evlist__close_control(ctl_fd: stat_config.ctl_fd, ctl_fd_ack: stat_config.ctl_fd_ack, ctl_fd_close: &stat_config.ctl_fd_close); |
2946 | |
2947 | return status; |
2948 | } |
2949 | |