1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
5 | */ |
6 | |
7 | #include <linux/bitfield.h> |
8 | #include <linux/coresight.h> |
9 | #include <linux/coresight-pmu.h> |
10 | #include <linux/cpumask.h> |
11 | #include <linux/device.h> |
12 | #include <linux/list.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/init.h> |
15 | #include <linux/perf_event.h> |
16 | #include <linux/percpu-defs.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/stringhash.h> |
19 | #include <linux/types.h> |
20 | #include <linux/workqueue.h> |
21 | |
22 | #include "coresight-config.h" |
23 | #include "coresight-etm-perf.h" |
24 | #include "coresight-priv.h" |
25 | #include "coresight-syscfg.h" |
26 | #include "coresight-trace-id.h" |
27 | |
28 | static struct pmu etm_pmu; |
29 | static bool etm_perf_up; |
30 | |
31 | /* |
32 | * An ETM context for a running event includes the perf aux handle |
33 | * and aux_data. For ETM, the aux_data (etm_event_data), consists of |
34 | * the trace path and the sink configuration. The event data is accessible |
35 | * via perf_get_aux(handle). However, a sink could "end" a perf output |
36 | * handle via the IRQ handler. And if the "sink" encounters a failure |
37 | * to "begin" another session (e.g due to lack of space in the buffer), |
38 | * the handle will be cleared. Thus, the event_data may not be accessible |
39 | * from the handle when we get to the etm_event_stop(), which is required |
40 | * for stopping the trace path. The event_data is guaranteed to stay alive |
41 | * until "free_aux()", which cannot happen as long as the event is active on |
42 | * the ETM. Thus the event_data for the session must be part of the ETM context |
43 | * to make sure we can disable the trace path. |
44 | */ |
45 | struct etm_ctxt { |
46 | struct perf_output_handle handle; |
47 | struct etm_event_data *event_data; |
48 | }; |
49 | |
50 | static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt); |
51 | static DEFINE_PER_CPU(struct coresight_device *, csdev_src); |
52 | |
53 | /* |
54 | * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config'; |
55 | * now take them as general formats and apply on all ETMs. |
56 | */ |
57 | PMU_FORMAT_ATTR(branch_broadcast, "config:" __stringify(ETM_OPT_BRANCH_BROADCAST)); |
58 | PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); |
59 | /* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */ |
60 | PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID)); |
61 | /* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */ |
62 | PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2)); |
63 | PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); |
64 | PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); |
65 | /* preset - if sink ID is used as a configuration selector */ |
66 | PMU_FORMAT_ATTR(preset, "config:0-3" ); |
67 | /* Sink ID - same for all ETMs */ |
68 | PMU_FORMAT_ATTR(sinkid, "config2:0-31" ); |
69 | /* config ID - set if a system configuration is selected */ |
70 | PMU_FORMAT_ATTR(configid, "config2:32-63" ); |
71 | PMU_FORMAT_ATTR(cc_threshold, "config3:0-11" ); |
72 | |
73 | |
74 | /* |
75 | * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1 |
76 | * when the kernel is running at EL1; when the kernel is at EL2, |
77 | * the PID is in CONTEXTIDR_EL2. |
78 | */ |
79 | static ssize_t format_attr_contextid_show(struct device *dev, |
80 | struct device_attribute *attr, |
81 | char *page) |
82 | { |
83 | int pid_fmt = ETM_OPT_CTXTID; |
84 | |
85 | #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) |
86 | pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID; |
87 | #endif |
88 | return sprintf(buf: page, fmt: "config:%d\n" , pid_fmt); |
89 | } |
90 | |
91 | static struct device_attribute format_attr_contextid = |
92 | __ATTR(contextid, 0444, format_attr_contextid_show, NULL); |
93 | |
94 | static struct attribute *etm_config_formats_attr[] = { |
95 | &format_attr_cycacc.attr, |
96 | &format_attr_contextid.attr, |
97 | &format_attr_contextid1.attr, |
98 | &format_attr_contextid2.attr, |
99 | &format_attr_timestamp.attr, |
100 | &format_attr_retstack.attr, |
101 | &format_attr_sinkid.attr, |
102 | &format_attr_preset.attr, |
103 | &format_attr_configid.attr, |
104 | &format_attr_branch_broadcast.attr, |
105 | &format_attr_cc_threshold.attr, |
106 | NULL, |
107 | }; |
108 | |
109 | static const struct attribute_group etm_pmu_format_group = { |
110 | .name = "format" , |
111 | .attrs = etm_config_formats_attr, |
112 | }; |
113 | |
114 | static struct attribute *etm_config_sinks_attr[] = { |
115 | NULL, |
116 | }; |
117 | |
118 | static const struct attribute_group etm_pmu_sinks_group = { |
119 | .name = "sinks" , |
120 | .attrs = etm_config_sinks_attr, |
121 | }; |
122 | |
123 | static struct attribute *etm_config_events_attr[] = { |
124 | NULL, |
125 | }; |
126 | |
127 | static const struct attribute_group etm_pmu_events_group = { |
128 | .name = "events" , |
129 | .attrs = etm_config_events_attr, |
130 | }; |
131 | |
132 | static const struct attribute_group *etm_pmu_attr_groups[] = { |
133 | &etm_pmu_format_group, |
134 | &etm_pmu_sinks_group, |
135 | &etm_pmu_events_group, |
136 | NULL, |
137 | }; |
138 | |
139 | static inline struct list_head ** |
140 | etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) |
141 | { |
142 | return per_cpu_ptr(data->path, cpu); |
143 | } |
144 | |
145 | static inline struct list_head * |
146 | etm_event_cpu_path(struct etm_event_data *data, int cpu) |
147 | { |
148 | return *etm_event_cpu_path_ptr(data, cpu); |
149 | } |
150 | |
151 | static void etm_event_read(struct perf_event *event) {} |
152 | |
153 | static int etm_addr_filters_alloc(struct perf_event *event) |
154 | { |
155 | struct etm_filters *filters; |
156 | int node = event->cpu == -1 ? -1 : cpu_to_node(cpu: event->cpu); |
157 | |
158 | filters = kzalloc_node(size: sizeof(struct etm_filters), GFP_KERNEL, node); |
159 | if (!filters) |
160 | return -ENOMEM; |
161 | |
162 | if (event->parent) |
163 | memcpy(filters, event->parent->hw.addr_filters, |
164 | sizeof(*filters)); |
165 | |
166 | event->hw.addr_filters = filters; |
167 | |
168 | return 0; |
169 | } |
170 | |
171 | static void etm_event_destroy(struct perf_event *event) |
172 | { |
173 | kfree(objp: event->hw.addr_filters); |
174 | event->hw.addr_filters = NULL; |
175 | } |
176 | |
177 | static int etm_event_init(struct perf_event *event) |
178 | { |
179 | int ret = 0; |
180 | |
181 | if (event->attr.type != etm_pmu.type) { |
182 | ret = -ENOENT; |
183 | goto out; |
184 | } |
185 | |
186 | ret = etm_addr_filters_alloc(event); |
187 | if (ret) |
188 | goto out; |
189 | |
190 | event->destroy = etm_event_destroy; |
191 | out: |
192 | return ret; |
193 | } |
194 | |
195 | static void free_sink_buffer(struct etm_event_data *event_data) |
196 | { |
197 | int cpu; |
198 | cpumask_t *mask = &event_data->mask; |
199 | struct coresight_device *sink; |
200 | |
201 | if (!event_data->snk_config) |
202 | return; |
203 | |
204 | if (WARN_ON(cpumask_empty(mask))) |
205 | return; |
206 | |
207 | cpu = cpumask_first(srcp: mask); |
208 | sink = coresight_get_sink(path: etm_event_cpu_path(data: event_data, cpu)); |
209 | sink_ops(sink)->free_buffer(event_data->snk_config); |
210 | } |
211 | |
212 | static void free_event_data(struct work_struct *work) |
213 | { |
214 | int cpu; |
215 | cpumask_t *mask; |
216 | struct etm_event_data *event_data; |
217 | |
218 | event_data = container_of(work, struct etm_event_data, work); |
219 | mask = &event_data->mask; |
220 | |
221 | /* Free the sink buffers, if there are any */ |
222 | free_sink_buffer(event_data); |
223 | |
224 | /* clear any configuration we were using */ |
225 | if (event_data->cfg_hash) |
226 | cscfg_deactivate_config(cfg_hash: event_data->cfg_hash); |
227 | |
228 | for_each_cpu(cpu, mask) { |
229 | struct list_head **ppath; |
230 | |
231 | ppath = etm_event_cpu_path_ptr(data: event_data, cpu); |
232 | if (!(IS_ERR_OR_NULL(ptr: *ppath))) |
233 | coresight_release_path(path: *ppath); |
234 | *ppath = NULL; |
235 | coresight_trace_id_put_cpu_id(cpu); |
236 | } |
237 | |
238 | /* mark perf event as done for trace id allocator */ |
239 | coresight_trace_id_perf_stop(); |
240 | |
241 | free_percpu(pdata: event_data->path); |
242 | kfree(objp: event_data); |
243 | } |
244 | |
245 | static void *alloc_event_data(int cpu) |
246 | { |
247 | cpumask_t *mask; |
248 | struct etm_event_data *event_data; |
249 | |
250 | /* First get memory for the session's data */ |
251 | event_data = kzalloc(size: sizeof(struct etm_event_data), GFP_KERNEL); |
252 | if (!event_data) |
253 | return NULL; |
254 | |
255 | |
256 | mask = &event_data->mask; |
257 | if (cpu != -1) |
258 | cpumask_set_cpu(cpu, dstp: mask); |
259 | else |
260 | cpumask_copy(dstp: mask, cpu_present_mask); |
261 | |
262 | /* |
263 | * Each CPU has a single path between source and destination. As such |
264 | * allocate an array using CPU numbers as indexes. That way a path |
265 | * for any CPU can easily be accessed at any given time. We proceed |
266 | * the same way for sessions involving a single CPU. The cost of |
267 | * unused memory when dealing with single CPU trace scenarios is small |
268 | * compared to the cost of searching through an optimized array. |
269 | */ |
270 | event_data->path = alloc_percpu(struct list_head *); |
271 | |
272 | if (!event_data->path) { |
273 | kfree(objp: event_data); |
274 | return NULL; |
275 | } |
276 | |
277 | return event_data; |
278 | } |
279 | |
280 | static void etm_free_aux(void *data) |
281 | { |
282 | struct etm_event_data *event_data = data; |
283 | |
284 | schedule_work(work: &event_data->work); |
285 | } |
286 | |
287 | /* |
288 | * Check if two given sinks are compatible with each other, |
289 | * so that they can use the same sink buffers, when an event |
290 | * moves around. |
291 | */ |
292 | static bool sinks_compatible(struct coresight_device *a, |
293 | struct coresight_device *b) |
294 | { |
295 | if (!a || !b) |
296 | return false; |
297 | /* |
298 | * If the sinks are of the same subtype and driven |
299 | * by the same driver, we can use the same buffer |
300 | * on these sinks. |
301 | */ |
302 | return (a->subtype.sink_subtype == b->subtype.sink_subtype) && |
303 | (sink_ops(a) == sink_ops(b)); |
304 | } |
305 | |
306 | static void *etm_setup_aux(struct perf_event *event, void **pages, |
307 | int nr_pages, bool overwrite) |
308 | { |
309 | u32 id, cfg_hash; |
310 | int cpu = event->cpu; |
311 | int trace_id; |
312 | cpumask_t *mask; |
313 | struct coresight_device *sink = NULL; |
314 | struct coresight_device *user_sink = NULL, *last_sink = NULL; |
315 | struct etm_event_data *event_data = NULL; |
316 | |
317 | event_data = alloc_event_data(cpu); |
318 | if (!event_data) |
319 | return NULL; |
320 | INIT_WORK(&event_data->work, free_event_data); |
321 | |
322 | /* First get the selected sink from user space. */ |
323 | if (event->attr.config2 & GENMASK_ULL(31, 0)) { |
324 | id = (u32)event->attr.config2; |
325 | sink = user_sink = coresight_get_sink_by_id(id); |
326 | } |
327 | |
328 | /* tell the trace ID allocator that a perf event is starting up */ |
329 | coresight_trace_id_perf_start(); |
330 | |
331 | /* check if user wants a coresight configuration selected */ |
332 | cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); |
333 | if (cfg_hash) { |
334 | if (cscfg_activate_config(cfg_hash)) |
335 | goto err; |
336 | event_data->cfg_hash = cfg_hash; |
337 | } |
338 | |
339 | mask = &event_data->mask; |
340 | |
341 | /* |
342 | * Setup the path for each CPU in a trace session. We try to build |
343 | * trace path for each CPU in the mask. If we don't find an ETM |
344 | * for the CPU or fail to build a path, we clear the CPU from the |
345 | * mask and continue with the rest. If ever we try to trace on those |
346 | * CPUs, we can handle it and fail the session. |
347 | */ |
348 | for_each_cpu(cpu, mask) { |
349 | struct list_head *path; |
350 | struct coresight_device *csdev; |
351 | |
352 | csdev = per_cpu(csdev_src, cpu); |
353 | /* |
354 | * If there is no ETM associated with this CPU clear it from |
355 | * the mask and continue with the rest. If ever we try to trace |
356 | * on this CPU, we handle it accordingly. |
357 | */ |
358 | if (!csdev) { |
359 | cpumask_clear_cpu(cpu, dstp: mask); |
360 | continue; |
361 | } |
362 | |
363 | /* |
364 | * No sink provided - look for a default sink for all the ETMs, |
365 | * where this event can be scheduled. |
366 | * We allocate the sink specific buffers only once for this |
367 | * event. If the ETMs have different default sink devices, we |
368 | * can only use a single "type" of sink as the event can carry |
369 | * only one sink specific buffer. Thus we have to make sure |
370 | * that the sinks are of the same type and driven by the same |
371 | * driver, as the one we allocate the buffer for. As such |
372 | * we choose the first sink and check if the remaining ETMs |
373 | * have a compatible default sink. We don't trace on a CPU |
374 | * if the sink is not compatible. |
375 | */ |
376 | if (!user_sink) { |
377 | /* Find the default sink for this ETM */ |
378 | sink = coresight_find_default_sink(csdev); |
379 | if (!sink) { |
380 | cpumask_clear_cpu(cpu, dstp: mask); |
381 | continue; |
382 | } |
383 | |
384 | /* Check if this sink compatible with the last sink */ |
385 | if (last_sink && !sinks_compatible(a: last_sink, b: sink)) { |
386 | cpumask_clear_cpu(cpu, dstp: mask); |
387 | continue; |
388 | } |
389 | last_sink = sink; |
390 | } |
391 | |
392 | /* |
393 | * Building a path doesn't enable it, it simply builds a |
394 | * list of devices from source to sink that can be |
395 | * referenced later when the path is actually needed. |
396 | */ |
397 | path = coresight_build_path(csdev, sink); |
398 | if (IS_ERR(ptr: path)) { |
399 | cpumask_clear_cpu(cpu, dstp: mask); |
400 | continue; |
401 | } |
402 | |
403 | /* ensure we can allocate a trace ID for this CPU */ |
404 | trace_id = coresight_trace_id_get_cpu_id(cpu); |
405 | if (!IS_VALID_CS_TRACE_ID(trace_id)) { |
406 | cpumask_clear_cpu(cpu, dstp: mask); |
407 | coresight_release_path(path); |
408 | continue; |
409 | } |
410 | |
411 | *etm_event_cpu_path_ptr(data: event_data, cpu) = path; |
412 | } |
413 | |
414 | /* no sink found for any CPU - cannot trace */ |
415 | if (!sink) |
416 | goto err; |
417 | |
418 | /* If we don't have any CPUs ready for tracing, abort */ |
419 | cpu = cpumask_first(srcp: mask); |
420 | if (cpu >= nr_cpu_ids) |
421 | goto err; |
422 | |
423 | if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) |
424 | goto err; |
425 | |
426 | /* |
427 | * Allocate the sink buffer for this session. All the sinks |
428 | * where this event can be scheduled are ensured to be of the |
429 | * same type. Thus the same sink configuration is used by the |
430 | * sinks. |
431 | */ |
432 | event_data->snk_config = |
433 | sink_ops(sink)->alloc_buffer(sink, event, pages, |
434 | nr_pages, overwrite); |
435 | if (!event_data->snk_config) |
436 | goto err; |
437 | |
438 | out: |
439 | return event_data; |
440 | |
441 | err: |
442 | etm_free_aux(data: event_data); |
443 | event_data = NULL; |
444 | goto out; |
445 | } |
446 | |
447 | static void etm_event_start(struct perf_event *event, int flags) |
448 | { |
449 | int cpu = smp_processor_id(); |
450 | struct etm_event_data *event_data; |
451 | struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); |
452 | struct perf_output_handle *handle = &ctxt->handle; |
453 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); |
454 | struct list_head *path; |
455 | u64 hw_id; |
456 | |
457 | if (!csdev) |
458 | goto fail; |
459 | |
460 | /* Have we messed up our tracking ? */ |
461 | if (WARN_ON(ctxt->event_data)) |
462 | goto fail; |
463 | |
464 | /* |
465 | * Deal with the ring buffer API and get a handle on the |
466 | * session's information. |
467 | */ |
468 | event_data = perf_aux_output_begin(handle, event); |
469 | if (!event_data) |
470 | goto fail; |
471 | |
472 | /* |
473 | * Check if this ETM is allowed to trace, as decided |
474 | * at etm_setup_aux(). This could be due to an unreachable |
475 | * sink from this ETM. We can't do much in this case if |
476 | * the sink was specified or hinted to the driver. For |
477 | * now, simply don't record anything on this ETM. |
478 | * |
479 | * As such we pretend that everything is fine, and let |
480 | * it continue without actually tracing. The event could |
481 | * continue tracing when it moves to a CPU where it is |
482 | * reachable to a sink. |
483 | */ |
484 | if (!cpumask_test_cpu(cpu, cpumask: &event_data->mask)) |
485 | goto out; |
486 | |
487 | path = etm_event_cpu_path(data: event_data, cpu); |
488 | /* We need a sink, no need to continue without one */ |
489 | sink = coresight_get_sink(path); |
490 | if (WARN_ON_ONCE(!sink)) |
491 | goto fail_end_stop; |
492 | |
493 | /* Nothing will happen without a path */ |
494 | if (coresight_enable_path(path, mode: CS_MODE_PERF, sink_data: handle)) |
495 | goto fail_end_stop; |
496 | |
497 | /* Finally enable the tracer */ |
498 | if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) |
499 | goto fail_disable_path; |
500 | |
501 | /* |
502 | * output cpu / trace ID in perf record, once for the lifetime |
503 | * of the event. |
504 | */ |
505 | if (!cpumask_test_cpu(cpu, cpumask: &event_data->aux_hwid_done)) { |
506 | cpumask_set_cpu(cpu, dstp: &event_data->aux_hwid_done); |
507 | hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK, |
508 | CS_AUX_HW_ID_CURR_VERSION); |
509 | hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, |
510 | coresight_trace_id_read_cpu_id(cpu)); |
511 | perf_report_aux_output_id(event, hw_id); |
512 | } |
513 | |
514 | out: |
515 | /* Tell the perf core the event is alive */ |
516 | event->hw.state = 0; |
517 | /* Save the event_data for this ETM */ |
518 | ctxt->event_data = event_data; |
519 | return; |
520 | |
521 | fail_disable_path: |
522 | coresight_disable_path(path); |
523 | fail_end_stop: |
524 | /* |
525 | * Check if the handle is still associated with the event, |
526 | * to handle cases where if the sink failed to start the |
527 | * trace and TRUNCATED the handle already. |
528 | */ |
529 | if (READ_ONCE(handle->event)) { |
530 | perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); |
531 | perf_aux_output_end(handle, size: 0); |
532 | } |
533 | fail: |
534 | event->hw.state = PERF_HES_STOPPED; |
535 | return; |
536 | } |
537 | |
538 | static void etm_event_stop(struct perf_event *event, int mode) |
539 | { |
540 | int cpu = smp_processor_id(); |
541 | unsigned long size; |
542 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); |
543 | struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); |
544 | struct perf_output_handle *handle = &ctxt->handle; |
545 | struct etm_event_data *event_data; |
546 | struct list_head *path; |
547 | |
548 | /* |
549 | * If we still have access to the event_data via handle, |
550 | * confirm that we haven't messed up the tracking. |
551 | */ |
552 | if (handle->event && |
553 | WARN_ON(perf_get_aux(handle) != ctxt->event_data)) |
554 | return; |
555 | |
556 | event_data = ctxt->event_data; |
557 | /* Clear the event_data as this ETM is stopping the trace. */ |
558 | ctxt->event_data = NULL; |
559 | |
560 | if (event->hw.state == PERF_HES_STOPPED) |
561 | return; |
562 | |
563 | /* We must have a valid event_data for a running event */ |
564 | if (WARN_ON(!event_data)) |
565 | return; |
566 | |
567 | /* |
568 | * Check if this ETM was allowed to trace, as decided at |
569 | * etm_setup_aux(). If it wasn't allowed to trace, then |
570 | * nothing needs to be torn down other than outputting a |
571 | * zero sized record. |
572 | */ |
573 | if (handle->event && (mode & PERF_EF_UPDATE) && |
574 | !cpumask_test_cpu(cpu, cpumask: &event_data->mask)) { |
575 | event->hw.state = PERF_HES_STOPPED; |
576 | perf_aux_output_end(handle, size: 0); |
577 | return; |
578 | } |
579 | |
580 | if (!csdev) |
581 | return; |
582 | |
583 | path = etm_event_cpu_path(data: event_data, cpu); |
584 | if (!path) |
585 | return; |
586 | |
587 | sink = coresight_get_sink(path); |
588 | if (!sink) |
589 | return; |
590 | |
591 | /* stop tracer */ |
592 | coresight_disable_source(csdev, data: event); |
593 | |
594 | /* tell the core */ |
595 | event->hw.state = PERF_HES_STOPPED; |
596 | |
597 | /* |
598 | * If the handle is not bound to an event anymore |
599 | * (e.g, the sink driver was unable to restart the |
600 | * handle due to lack of buffer space), we don't |
601 | * have to do anything here. |
602 | */ |
603 | if (handle->event && (mode & PERF_EF_UPDATE)) { |
604 | if (WARN_ON_ONCE(handle->event != event)) |
605 | return; |
606 | |
607 | /* update trace information */ |
608 | if (!sink_ops(sink)->update_buffer) |
609 | return; |
610 | |
611 | size = sink_ops(sink)->update_buffer(sink, handle, |
612 | event_data->snk_config); |
613 | /* |
614 | * Make sure the handle is still valid as the |
615 | * sink could have closed it from an IRQ. |
616 | * The sink driver must handle the race with |
617 | * update_buffer() and IRQ. Thus either we |
618 | * should get a valid handle and valid size |
619 | * (which may be 0). |
620 | * |
621 | * But we should never get a non-zero size with |
622 | * an invalid handle. |
623 | */ |
624 | if (READ_ONCE(handle->event)) |
625 | perf_aux_output_end(handle, size); |
626 | else |
627 | WARN_ON(size); |
628 | } |
629 | |
630 | /* Disabling the path make its elements available to other sessions */ |
631 | coresight_disable_path(path); |
632 | } |
633 | |
634 | static int etm_event_add(struct perf_event *event, int mode) |
635 | { |
636 | int ret = 0; |
637 | struct hw_perf_event *hwc = &event->hw; |
638 | |
639 | if (mode & PERF_EF_START) { |
640 | etm_event_start(event, flags: 0); |
641 | if (hwc->state & PERF_HES_STOPPED) |
642 | ret = -EINVAL; |
643 | } else { |
644 | hwc->state = PERF_HES_STOPPED; |
645 | } |
646 | |
647 | return ret; |
648 | } |
649 | |
650 | static void etm_event_del(struct perf_event *event, int mode) |
651 | { |
652 | etm_event_stop(event, PERF_EF_UPDATE); |
653 | } |
654 | |
655 | static int etm_addr_filters_validate(struct list_head *filters) |
656 | { |
657 | bool range = false, address = false; |
658 | int index = 0; |
659 | struct perf_addr_filter *filter; |
660 | |
661 | list_for_each_entry(filter, filters, entry) { |
662 | /* |
663 | * No need to go further if there's no more |
664 | * room for filters. |
665 | */ |
666 | if (++index > ETM_ADDR_CMP_MAX) |
667 | return -EOPNOTSUPP; |
668 | |
669 | /* filter::size==0 means single address trigger */ |
670 | if (filter->size) { |
671 | /* |
672 | * The existing code relies on START/STOP filters |
673 | * being address filters. |
674 | */ |
675 | if (filter->action == PERF_ADDR_FILTER_ACTION_START || |
676 | filter->action == PERF_ADDR_FILTER_ACTION_STOP) |
677 | return -EOPNOTSUPP; |
678 | |
679 | range = true; |
680 | } else |
681 | address = true; |
682 | |
683 | /* |
684 | * At this time we don't allow range and start/stop filtering |
685 | * to cohabitate, they have to be mutually exclusive. |
686 | */ |
687 | if (range && address) |
688 | return -EOPNOTSUPP; |
689 | } |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | static void etm_addr_filters_sync(struct perf_event *event) |
695 | { |
696 | struct perf_addr_filters_head *head = perf_event_addr_filters(event); |
697 | unsigned long start, stop; |
698 | struct perf_addr_filter_range *fr = event->addr_filter_ranges; |
699 | struct etm_filters *filters = event->hw.addr_filters; |
700 | struct etm_filter *etm_filter; |
701 | struct perf_addr_filter *filter; |
702 | int i = 0; |
703 | |
704 | list_for_each_entry(filter, &head->list, entry) { |
705 | start = fr[i].start; |
706 | stop = start + fr[i].size; |
707 | etm_filter = &filters->etm_filter[i]; |
708 | |
709 | switch (filter->action) { |
710 | case PERF_ADDR_FILTER_ACTION_FILTER: |
711 | etm_filter->start_addr = start; |
712 | etm_filter->stop_addr = stop; |
713 | etm_filter->type = ETM_ADDR_TYPE_RANGE; |
714 | break; |
715 | case PERF_ADDR_FILTER_ACTION_START: |
716 | etm_filter->start_addr = start; |
717 | etm_filter->type = ETM_ADDR_TYPE_START; |
718 | break; |
719 | case PERF_ADDR_FILTER_ACTION_STOP: |
720 | etm_filter->stop_addr = stop; |
721 | etm_filter->type = ETM_ADDR_TYPE_STOP; |
722 | break; |
723 | } |
724 | i++; |
725 | } |
726 | |
727 | filters->nr_filters = i; |
728 | } |
729 | |
730 | int etm_perf_symlink(struct coresight_device *csdev, bool link) |
731 | { |
732 | char entry[sizeof("cpu9999999" )]; |
733 | int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev); |
734 | struct device *pmu_dev = etm_pmu.dev; |
735 | struct device *cs_dev = &csdev->dev; |
736 | |
737 | sprintf(buf: entry, fmt: "cpu%d" , cpu); |
738 | |
739 | if (!etm_perf_up) |
740 | return -EPROBE_DEFER; |
741 | |
742 | if (link) { |
743 | ret = sysfs_create_link(kobj: &pmu_dev->kobj, target: &cs_dev->kobj, name: entry); |
744 | if (ret) |
745 | return ret; |
746 | per_cpu(csdev_src, cpu) = csdev; |
747 | } else { |
748 | sysfs_remove_link(kobj: &pmu_dev->kobj, name: entry); |
749 | per_cpu(csdev_src, cpu) = NULL; |
750 | } |
751 | |
752 | return 0; |
753 | } |
754 | EXPORT_SYMBOL_GPL(etm_perf_symlink); |
755 | |
756 | static ssize_t etm_perf_sink_name_show(struct device *dev, |
757 | struct device_attribute *dattr, |
758 | char *buf) |
759 | { |
760 | struct dev_ext_attribute *ea; |
761 | |
762 | ea = container_of(dattr, struct dev_ext_attribute, attr); |
763 | return scnprintf(buf, PAGE_SIZE, fmt: "0x%lx\n" , (unsigned long)(ea->var)); |
764 | } |
765 | |
766 | static struct dev_ext_attribute * |
767 | etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name) |
768 | { |
769 | struct dev_ext_attribute *ea; |
770 | unsigned long hash; |
771 | int ret; |
772 | struct device *pmu_dev = etm_pmu.dev; |
773 | |
774 | if (!etm_perf_up) |
775 | return ERR_PTR(error: -EPROBE_DEFER); |
776 | |
777 | ea = devm_kzalloc(dev, size: sizeof(*ea), GFP_KERNEL); |
778 | if (!ea) |
779 | return ERR_PTR(error: -ENOMEM); |
780 | |
781 | /* |
782 | * If this function is called adding a sink then the hash is used for |
783 | * sink selection - see function coresight_get_sink_by_id(). |
784 | * If adding a configuration then the hash is used for selection in |
785 | * cscfg_activate_config() |
786 | */ |
787 | hash = hashlen_hash(hashlen_string(NULL, name)); |
788 | |
789 | sysfs_attr_init(&ea->attr.attr); |
790 | ea->attr.attr.name = devm_kstrdup(dev, s: name, GFP_KERNEL); |
791 | if (!ea->attr.attr.name) |
792 | return ERR_PTR(error: -ENOMEM); |
793 | |
794 | ea->attr.attr.mode = 0444; |
795 | ea->var = (unsigned long *)hash; |
796 | |
797 | ret = sysfs_add_file_to_group(kobj: &pmu_dev->kobj, |
798 | attr: &ea->attr.attr, group: group_name); |
799 | |
800 | return ret ? ERR_PTR(error: ret) : ea; |
801 | } |
802 | |
803 | int etm_perf_add_symlink_sink(struct coresight_device *csdev) |
804 | { |
805 | const char *name; |
806 | struct device *dev = &csdev->dev; |
807 | int err = 0; |
808 | |
809 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && |
810 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) |
811 | return -EINVAL; |
812 | |
813 | if (csdev->ea != NULL) |
814 | return -EINVAL; |
815 | |
816 | name = dev_name(dev); |
817 | csdev->ea = etm_perf_add_symlink_group(dev, name, group_name: "sinks" ); |
818 | if (IS_ERR(ptr: csdev->ea)) { |
819 | err = PTR_ERR(ptr: csdev->ea); |
820 | csdev->ea = NULL; |
821 | } else |
822 | csdev->ea->attr.show = etm_perf_sink_name_show; |
823 | |
824 | return err; |
825 | } |
826 | |
827 | static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name) |
828 | { |
829 | struct device *pmu_dev = etm_pmu.dev; |
830 | |
831 | sysfs_remove_file_from_group(kobj: &pmu_dev->kobj, |
832 | attr: &ea->attr.attr, group: group_name); |
833 | } |
834 | |
835 | void etm_perf_del_symlink_sink(struct coresight_device *csdev) |
836 | { |
837 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && |
838 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) |
839 | return; |
840 | |
841 | if (!csdev->ea) |
842 | return; |
843 | |
844 | etm_perf_del_symlink_group(ea: csdev->ea, group_name: "sinks" ); |
845 | csdev->ea = NULL; |
846 | } |
847 | |
848 | static ssize_t etm_perf_cscfg_event_show(struct device *dev, |
849 | struct device_attribute *dattr, |
850 | char *buf) |
851 | { |
852 | struct dev_ext_attribute *ea; |
853 | |
854 | ea = container_of(dattr, struct dev_ext_attribute, attr); |
855 | return scnprintf(buf, PAGE_SIZE, fmt: "configid=0x%lx\n" , (unsigned long)(ea->var)); |
856 | } |
857 | |
858 | int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc) |
859 | { |
860 | int err = 0; |
861 | |
862 | if (config_desc->event_ea != NULL) |
863 | return 0; |
864 | |
865 | config_desc->event_ea = etm_perf_add_symlink_group(dev, name: config_desc->name, group_name: "events" ); |
866 | |
867 | /* set the show function to the custom cscfg event */ |
868 | if (!IS_ERR(ptr: config_desc->event_ea)) |
869 | config_desc->event_ea->attr.show = etm_perf_cscfg_event_show; |
870 | else { |
871 | err = PTR_ERR(ptr: config_desc->event_ea); |
872 | config_desc->event_ea = NULL; |
873 | } |
874 | |
875 | return err; |
876 | } |
877 | |
878 | void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) |
879 | { |
880 | if (!config_desc->event_ea) |
881 | return; |
882 | |
883 | etm_perf_del_symlink_group(ea: config_desc->event_ea, group_name: "events" ); |
884 | config_desc->event_ea = NULL; |
885 | } |
886 | |
887 | int __init etm_perf_init(void) |
888 | { |
889 | int ret; |
890 | |
891 | etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE | |
892 | PERF_PMU_CAP_ITRACE); |
893 | |
894 | etm_pmu.attr_groups = etm_pmu_attr_groups; |
895 | etm_pmu.task_ctx_nr = perf_sw_context; |
896 | etm_pmu.read = etm_event_read; |
897 | etm_pmu.event_init = etm_event_init; |
898 | etm_pmu.setup_aux = etm_setup_aux; |
899 | etm_pmu.free_aux = etm_free_aux; |
900 | etm_pmu.start = etm_event_start; |
901 | etm_pmu.stop = etm_event_stop; |
902 | etm_pmu.add = etm_event_add; |
903 | etm_pmu.del = etm_event_del; |
904 | etm_pmu.addr_filters_sync = etm_addr_filters_sync; |
905 | etm_pmu.addr_filters_validate = etm_addr_filters_validate; |
906 | etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX; |
907 | etm_pmu.module = THIS_MODULE; |
908 | |
909 | ret = perf_pmu_register(pmu: &etm_pmu, CORESIGHT_ETM_PMU_NAME, type: -1); |
910 | if (ret == 0) |
911 | etm_perf_up = true; |
912 | |
913 | return ret; |
914 | } |
915 | |
916 | void etm_perf_exit(void) |
917 | { |
918 | perf_pmu_unregister(pmu: &etm_pmu); |
919 | } |
920 | |