1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
5 | */ |
6 | |
7 | #include <linux/pid_namespace.h> |
8 | #include <linux/pm_runtime.h> |
9 | #include <linux/sysfs.h> |
10 | #include "coresight-etm.h" |
11 | #include "coresight-priv.h" |
12 | |
13 | static ssize_t nr_addr_cmp_show(struct device *dev, |
14 | struct device_attribute *attr, char *buf) |
15 | { |
16 | unsigned long val; |
17 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
18 | |
19 | val = drvdata->nr_addr_cmp; |
20 | return sprintf(buf, fmt: "%#lx\n" , val); |
21 | } |
22 | static DEVICE_ATTR_RO(nr_addr_cmp); |
23 | |
24 | static ssize_t nr_cntr_show(struct device *dev, |
25 | struct device_attribute *attr, char *buf) |
26 | { unsigned long val; |
27 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
28 | |
29 | val = drvdata->nr_cntr; |
30 | return sprintf(buf, fmt: "%#lx\n" , val); |
31 | } |
32 | static DEVICE_ATTR_RO(nr_cntr); |
33 | |
34 | static ssize_t nr_ctxid_cmp_show(struct device *dev, |
35 | struct device_attribute *attr, char *buf) |
36 | { |
37 | unsigned long val; |
38 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
39 | |
40 | val = drvdata->nr_ctxid_cmp; |
41 | return sprintf(buf, fmt: "%#lx\n" , val); |
42 | } |
43 | static DEVICE_ATTR_RO(nr_ctxid_cmp); |
44 | |
45 | static ssize_t etmsr_show(struct device *dev, |
46 | struct device_attribute *attr, char *buf) |
47 | { |
48 | unsigned long flags, val; |
49 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
50 | |
51 | pm_runtime_get_sync(dev: dev->parent); |
52 | spin_lock_irqsave(&drvdata->spinlock, flags); |
53 | CS_UNLOCK(addr: drvdata->base); |
54 | |
55 | val = etm_readl(drvdata, ETMSR); |
56 | |
57 | CS_LOCK(addr: drvdata->base); |
58 | spin_unlock_irqrestore(lock: &drvdata->spinlock, flags); |
59 | pm_runtime_put(dev: dev->parent); |
60 | |
61 | return sprintf(buf, fmt: "%#lx\n" , val); |
62 | } |
63 | static DEVICE_ATTR_RO(etmsr); |
64 | |
65 | static ssize_t reset_store(struct device *dev, |
66 | struct device_attribute *attr, |
67 | const char *buf, size_t size) |
68 | { |
69 | int i, ret; |
70 | unsigned long val; |
71 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
72 | struct etm_config *config = &drvdata->config; |
73 | |
74 | ret = kstrtoul(s: buf, base: 16, res: &val); |
75 | if (ret) |
76 | return ret; |
77 | |
78 | if (val) { |
79 | spin_lock(lock: &drvdata->spinlock); |
80 | memset(config, 0, sizeof(struct etm_config)); |
81 | config->mode = ETM_MODE_EXCLUDE; |
82 | config->trigger_event = ETM_DEFAULT_EVENT_VAL; |
83 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { |
84 | config->addr_type[i] = ETM_ADDR_TYPE_NONE; |
85 | } |
86 | |
87 | etm_set_default(config); |
88 | etm_release_trace_id(drvdata); |
89 | spin_unlock(lock: &drvdata->spinlock); |
90 | } |
91 | |
92 | return size; |
93 | } |
94 | static DEVICE_ATTR_WO(reset); |
95 | |
96 | static ssize_t mode_show(struct device *dev, |
97 | struct device_attribute *attr, char *buf) |
98 | { |
99 | unsigned long val; |
100 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
101 | struct etm_config *config = &drvdata->config; |
102 | |
103 | val = config->mode; |
104 | return sprintf(buf, fmt: "%#lx\n" , val); |
105 | } |
106 | |
107 | static ssize_t mode_store(struct device *dev, |
108 | struct device_attribute *attr, |
109 | const char *buf, size_t size) |
110 | { |
111 | int ret; |
112 | unsigned long val; |
113 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
114 | struct etm_config *config = &drvdata->config; |
115 | |
116 | ret = kstrtoul(s: buf, base: 16, res: &val); |
117 | if (ret) |
118 | return ret; |
119 | |
120 | spin_lock(lock: &drvdata->spinlock); |
121 | config->mode = val & ETM_MODE_ALL; |
122 | |
123 | if (config->mode & ETM_MODE_EXCLUDE) |
124 | config->enable_ctrl1 |= ETMTECR1_INC_EXC; |
125 | else |
126 | config->enable_ctrl1 &= ~ETMTECR1_INC_EXC; |
127 | |
128 | if (config->mode & ETM_MODE_CYCACC) |
129 | config->ctrl |= ETMCR_CYC_ACC; |
130 | else |
131 | config->ctrl &= ~ETMCR_CYC_ACC; |
132 | |
133 | if (config->mode & ETM_MODE_STALL) { |
134 | if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { |
135 | dev_warn(dev, "stall mode not supported\n" ); |
136 | ret = -EINVAL; |
137 | goto err_unlock; |
138 | } |
139 | config->ctrl |= ETMCR_STALL_MODE; |
140 | } else |
141 | config->ctrl &= ~ETMCR_STALL_MODE; |
142 | |
143 | if (config->mode & ETM_MODE_TIMESTAMP) { |
144 | if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { |
145 | dev_warn(dev, "timestamp not supported\n" ); |
146 | ret = -EINVAL; |
147 | goto err_unlock; |
148 | } |
149 | config->ctrl |= ETMCR_TIMESTAMP_EN; |
150 | } else |
151 | config->ctrl &= ~ETMCR_TIMESTAMP_EN; |
152 | |
153 | if (config->mode & ETM_MODE_CTXID) |
154 | config->ctrl |= ETMCR_CTXID_SIZE; |
155 | else |
156 | config->ctrl &= ~ETMCR_CTXID_SIZE; |
157 | |
158 | if (config->mode & ETM_MODE_BBROAD) |
159 | config->ctrl |= ETMCR_BRANCH_BROADCAST; |
160 | else |
161 | config->ctrl &= ~ETMCR_BRANCH_BROADCAST; |
162 | |
163 | if (config->mode & ETM_MODE_RET_STACK) |
164 | config->ctrl |= ETMCR_RETURN_STACK; |
165 | else |
166 | config->ctrl &= ~ETMCR_RETURN_STACK; |
167 | |
168 | if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) |
169 | etm_config_trace_mode(config); |
170 | |
171 | spin_unlock(lock: &drvdata->spinlock); |
172 | |
173 | return size; |
174 | |
175 | err_unlock: |
176 | spin_unlock(lock: &drvdata->spinlock); |
177 | return ret; |
178 | } |
179 | static DEVICE_ATTR_RW(mode); |
180 | |
181 | static ssize_t trigger_event_show(struct device *dev, |
182 | struct device_attribute *attr, char *buf) |
183 | { |
184 | unsigned long val; |
185 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
186 | struct etm_config *config = &drvdata->config; |
187 | |
188 | val = config->trigger_event; |
189 | return sprintf(buf, fmt: "%#lx\n" , val); |
190 | } |
191 | |
192 | static ssize_t trigger_event_store(struct device *dev, |
193 | struct device_attribute *attr, |
194 | const char *buf, size_t size) |
195 | { |
196 | int ret; |
197 | unsigned long val; |
198 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
199 | struct etm_config *config = &drvdata->config; |
200 | |
201 | ret = kstrtoul(s: buf, base: 16, res: &val); |
202 | if (ret) |
203 | return ret; |
204 | |
205 | config->trigger_event = val & ETM_EVENT_MASK; |
206 | |
207 | return size; |
208 | } |
209 | static DEVICE_ATTR_RW(trigger_event); |
210 | |
211 | static ssize_t enable_event_show(struct device *dev, |
212 | struct device_attribute *attr, char *buf) |
213 | { |
214 | unsigned long val; |
215 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
216 | struct etm_config *config = &drvdata->config; |
217 | |
218 | val = config->enable_event; |
219 | return sprintf(buf, fmt: "%#lx\n" , val); |
220 | } |
221 | |
222 | static ssize_t enable_event_store(struct device *dev, |
223 | struct device_attribute *attr, |
224 | const char *buf, size_t size) |
225 | { |
226 | int ret; |
227 | unsigned long val; |
228 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
229 | struct etm_config *config = &drvdata->config; |
230 | |
231 | ret = kstrtoul(s: buf, base: 16, res: &val); |
232 | if (ret) |
233 | return ret; |
234 | |
235 | config->enable_event = val & ETM_EVENT_MASK; |
236 | |
237 | return size; |
238 | } |
239 | static DEVICE_ATTR_RW(enable_event); |
240 | |
241 | static ssize_t fifofull_level_show(struct device *dev, |
242 | struct device_attribute *attr, char *buf) |
243 | { |
244 | unsigned long val; |
245 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
246 | struct etm_config *config = &drvdata->config; |
247 | |
248 | val = config->fifofull_level; |
249 | return sprintf(buf, fmt: "%#lx\n" , val); |
250 | } |
251 | |
252 | static ssize_t fifofull_level_store(struct device *dev, |
253 | struct device_attribute *attr, |
254 | const char *buf, size_t size) |
255 | { |
256 | int ret; |
257 | unsigned long val; |
258 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
259 | struct etm_config *config = &drvdata->config; |
260 | |
261 | ret = kstrtoul(s: buf, base: 16, res: &val); |
262 | if (ret) |
263 | return ret; |
264 | |
265 | config->fifofull_level = val; |
266 | |
267 | return size; |
268 | } |
269 | static DEVICE_ATTR_RW(fifofull_level); |
270 | |
271 | static ssize_t addr_idx_show(struct device *dev, |
272 | struct device_attribute *attr, char *buf) |
273 | { |
274 | unsigned long val; |
275 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
276 | struct etm_config *config = &drvdata->config; |
277 | |
278 | val = config->addr_idx; |
279 | return sprintf(buf, fmt: "%#lx\n" , val); |
280 | } |
281 | |
282 | static ssize_t addr_idx_store(struct device *dev, |
283 | struct device_attribute *attr, |
284 | const char *buf, size_t size) |
285 | { |
286 | int ret; |
287 | unsigned long val; |
288 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
289 | struct etm_config *config = &drvdata->config; |
290 | |
291 | ret = kstrtoul(s: buf, base: 16, res: &val); |
292 | if (ret) |
293 | return ret; |
294 | |
295 | if (val >= drvdata->nr_addr_cmp) |
296 | return -EINVAL; |
297 | |
298 | /* |
299 | * Use spinlock to ensure index doesn't change while it gets |
300 | * dereferenced multiple times within a spinlock block elsewhere. |
301 | */ |
302 | spin_lock(lock: &drvdata->spinlock); |
303 | config->addr_idx = val; |
304 | spin_unlock(lock: &drvdata->spinlock); |
305 | |
306 | return size; |
307 | } |
308 | static DEVICE_ATTR_RW(addr_idx); |
309 | |
310 | static ssize_t addr_single_show(struct device *dev, |
311 | struct device_attribute *attr, char *buf) |
312 | { |
313 | u8 idx; |
314 | unsigned long val; |
315 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
316 | struct etm_config *config = &drvdata->config; |
317 | |
318 | spin_lock(lock: &drvdata->spinlock); |
319 | idx = config->addr_idx; |
320 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || |
321 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { |
322 | spin_unlock(lock: &drvdata->spinlock); |
323 | return -EINVAL; |
324 | } |
325 | |
326 | val = config->addr_val[idx]; |
327 | spin_unlock(lock: &drvdata->spinlock); |
328 | |
329 | return sprintf(buf, fmt: "%#lx\n" , val); |
330 | } |
331 | |
332 | static ssize_t addr_single_store(struct device *dev, |
333 | struct device_attribute *attr, |
334 | const char *buf, size_t size) |
335 | { |
336 | u8 idx; |
337 | int ret; |
338 | unsigned long val; |
339 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
340 | struct etm_config *config = &drvdata->config; |
341 | |
342 | ret = kstrtoul(s: buf, base: 16, res: &val); |
343 | if (ret) |
344 | return ret; |
345 | |
346 | spin_lock(lock: &drvdata->spinlock); |
347 | idx = config->addr_idx; |
348 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || |
349 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { |
350 | spin_unlock(lock: &drvdata->spinlock); |
351 | return -EINVAL; |
352 | } |
353 | |
354 | config->addr_val[idx] = val; |
355 | config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; |
356 | spin_unlock(lock: &drvdata->spinlock); |
357 | |
358 | return size; |
359 | } |
360 | static DEVICE_ATTR_RW(addr_single); |
361 | |
362 | static ssize_t addr_range_show(struct device *dev, |
363 | struct device_attribute *attr, char *buf) |
364 | { |
365 | u8 idx; |
366 | unsigned long val1, val2; |
367 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
368 | struct etm_config *config = &drvdata->config; |
369 | |
370 | spin_lock(lock: &drvdata->spinlock); |
371 | idx = config->addr_idx; |
372 | if (idx % 2 != 0) { |
373 | spin_unlock(lock: &drvdata->spinlock); |
374 | return -EPERM; |
375 | } |
376 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && |
377 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || |
378 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && |
379 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { |
380 | spin_unlock(lock: &drvdata->spinlock); |
381 | return -EPERM; |
382 | } |
383 | |
384 | val1 = config->addr_val[idx]; |
385 | val2 = config->addr_val[idx + 1]; |
386 | spin_unlock(lock: &drvdata->spinlock); |
387 | |
388 | return sprintf(buf, fmt: "%#lx %#lx\n" , val1, val2); |
389 | } |
390 | |
391 | static ssize_t addr_range_store(struct device *dev, |
392 | struct device_attribute *attr, |
393 | const char *buf, size_t size) |
394 | { |
395 | u8 idx; |
396 | unsigned long val1, val2; |
397 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
398 | struct etm_config *config = &drvdata->config; |
399 | |
400 | if (sscanf(buf, "%lx %lx" , &val1, &val2) != 2) |
401 | return -EINVAL; |
402 | /* Lower address comparator cannot have a higher address value */ |
403 | if (val1 > val2) |
404 | return -EINVAL; |
405 | |
406 | spin_lock(lock: &drvdata->spinlock); |
407 | idx = config->addr_idx; |
408 | if (idx % 2 != 0) { |
409 | spin_unlock(lock: &drvdata->spinlock); |
410 | return -EPERM; |
411 | } |
412 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && |
413 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || |
414 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && |
415 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { |
416 | spin_unlock(lock: &drvdata->spinlock); |
417 | return -EPERM; |
418 | } |
419 | |
420 | config->addr_val[idx] = val1; |
421 | config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; |
422 | config->addr_val[idx + 1] = val2; |
423 | config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; |
424 | config->enable_ctrl1 |= (1 << (idx/2)); |
425 | spin_unlock(lock: &drvdata->spinlock); |
426 | |
427 | return size; |
428 | } |
429 | static DEVICE_ATTR_RW(addr_range); |
430 | |
431 | static ssize_t addr_start_show(struct device *dev, |
432 | struct device_attribute *attr, char *buf) |
433 | { |
434 | u8 idx; |
435 | unsigned long val; |
436 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
437 | struct etm_config *config = &drvdata->config; |
438 | |
439 | spin_lock(lock: &drvdata->spinlock); |
440 | idx = config->addr_idx; |
441 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || |
442 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { |
443 | spin_unlock(lock: &drvdata->spinlock); |
444 | return -EPERM; |
445 | } |
446 | |
447 | val = config->addr_val[idx]; |
448 | spin_unlock(lock: &drvdata->spinlock); |
449 | |
450 | return sprintf(buf, fmt: "%#lx\n" , val); |
451 | } |
452 | |
453 | static ssize_t addr_start_store(struct device *dev, |
454 | struct device_attribute *attr, |
455 | const char *buf, size_t size) |
456 | { |
457 | u8 idx; |
458 | int ret; |
459 | unsigned long val; |
460 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
461 | struct etm_config *config = &drvdata->config; |
462 | |
463 | ret = kstrtoul(s: buf, base: 16, res: &val); |
464 | if (ret) |
465 | return ret; |
466 | |
467 | spin_lock(lock: &drvdata->spinlock); |
468 | idx = config->addr_idx; |
469 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || |
470 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { |
471 | spin_unlock(lock: &drvdata->spinlock); |
472 | return -EPERM; |
473 | } |
474 | |
475 | config->addr_val[idx] = val; |
476 | config->addr_type[idx] = ETM_ADDR_TYPE_START; |
477 | config->startstop_ctrl |= (1 << idx); |
478 | config->enable_ctrl1 |= ETMTECR1_START_STOP; |
479 | spin_unlock(lock: &drvdata->spinlock); |
480 | |
481 | return size; |
482 | } |
483 | static DEVICE_ATTR_RW(addr_start); |
484 | |
485 | static ssize_t addr_stop_show(struct device *dev, |
486 | struct device_attribute *attr, char *buf) |
487 | { |
488 | u8 idx; |
489 | unsigned long val; |
490 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
491 | struct etm_config *config = &drvdata->config; |
492 | |
493 | spin_lock(lock: &drvdata->spinlock); |
494 | idx = config->addr_idx; |
495 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || |
496 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { |
497 | spin_unlock(lock: &drvdata->spinlock); |
498 | return -EPERM; |
499 | } |
500 | |
501 | val = config->addr_val[idx]; |
502 | spin_unlock(lock: &drvdata->spinlock); |
503 | |
504 | return sprintf(buf, fmt: "%#lx\n" , val); |
505 | } |
506 | |
507 | static ssize_t addr_stop_store(struct device *dev, |
508 | struct device_attribute *attr, |
509 | const char *buf, size_t size) |
510 | { |
511 | u8 idx; |
512 | int ret; |
513 | unsigned long val; |
514 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
515 | struct etm_config *config = &drvdata->config; |
516 | |
517 | ret = kstrtoul(s: buf, base: 16, res: &val); |
518 | if (ret) |
519 | return ret; |
520 | |
521 | spin_lock(lock: &drvdata->spinlock); |
522 | idx = config->addr_idx; |
523 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || |
524 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { |
525 | spin_unlock(lock: &drvdata->spinlock); |
526 | return -EPERM; |
527 | } |
528 | |
529 | config->addr_val[idx] = val; |
530 | config->addr_type[idx] = ETM_ADDR_TYPE_STOP; |
531 | config->startstop_ctrl |= (1 << (idx + 16)); |
532 | config->enable_ctrl1 |= ETMTECR1_START_STOP; |
533 | spin_unlock(lock: &drvdata->spinlock); |
534 | |
535 | return size; |
536 | } |
537 | static DEVICE_ATTR_RW(addr_stop); |
538 | |
539 | static ssize_t addr_acctype_show(struct device *dev, |
540 | struct device_attribute *attr, char *buf) |
541 | { |
542 | unsigned long val; |
543 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
544 | struct etm_config *config = &drvdata->config; |
545 | |
546 | spin_lock(lock: &drvdata->spinlock); |
547 | val = config->addr_acctype[config->addr_idx]; |
548 | spin_unlock(lock: &drvdata->spinlock); |
549 | |
550 | return sprintf(buf, fmt: "%#lx\n" , val); |
551 | } |
552 | |
553 | static ssize_t addr_acctype_store(struct device *dev, |
554 | struct device_attribute *attr, |
555 | const char *buf, size_t size) |
556 | { |
557 | int ret; |
558 | unsigned long val; |
559 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
560 | struct etm_config *config = &drvdata->config; |
561 | |
562 | ret = kstrtoul(s: buf, base: 16, res: &val); |
563 | if (ret) |
564 | return ret; |
565 | |
566 | spin_lock(lock: &drvdata->spinlock); |
567 | config->addr_acctype[config->addr_idx] = val; |
568 | spin_unlock(lock: &drvdata->spinlock); |
569 | |
570 | return size; |
571 | } |
572 | static DEVICE_ATTR_RW(addr_acctype); |
573 | |
574 | static ssize_t cntr_idx_show(struct device *dev, |
575 | struct device_attribute *attr, char *buf) |
576 | { |
577 | unsigned long val; |
578 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
579 | struct etm_config *config = &drvdata->config; |
580 | |
581 | val = config->cntr_idx; |
582 | return sprintf(buf, fmt: "%#lx\n" , val); |
583 | } |
584 | |
585 | static ssize_t cntr_idx_store(struct device *dev, |
586 | struct device_attribute *attr, |
587 | const char *buf, size_t size) |
588 | { |
589 | int ret; |
590 | unsigned long val; |
591 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
592 | struct etm_config *config = &drvdata->config; |
593 | |
594 | ret = kstrtoul(s: buf, base: 16, res: &val); |
595 | if (ret) |
596 | return ret; |
597 | |
598 | if (val >= drvdata->nr_cntr) |
599 | return -EINVAL; |
600 | /* |
601 | * Use spinlock to ensure index doesn't change while it gets |
602 | * dereferenced multiple times within a spinlock block elsewhere. |
603 | */ |
604 | spin_lock(lock: &drvdata->spinlock); |
605 | config->cntr_idx = val; |
606 | spin_unlock(lock: &drvdata->spinlock); |
607 | |
608 | return size; |
609 | } |
610 | static DEVICE_ATTR_RW(cntr_idx); |
611 | |
612 | static ssize_t cntr_rld_val_show(struct device *dev, |
613 | struct device_attribute *attr, char *buf) |
614 | { |
615 | unsigned long val; |
616 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
617 | struct etm_config *config = &drvdata->config; |
618 | |
619 | spin_lock(lock: &drvdata->spinlock); |
620 | val = config->cntr_rld_val[config->cntr_idx]; |
621 | spin_unlock(lock: &drvdata->spinlock); |
622 | |
623 | return sprintf(buf, fmt: "%#lx\n" , val); |
624 | } |
625 | |
626 | static ssize_t cntr_rld_val_store(struct device *dev, |
627 | struct device_attribute *attr, |
628 | const char *buf, size_t size) |
629 | { |
630 | int ret; |
631 | unsigned long val; |
632 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
633 | struct etm_config *config = &drvdata->config; |
634 | |
635 | ret = kstrtoul(s: buf, base: 16, res: &val); |
636 | if (ret) |
637 | return ret; |
638 | |
639 | spin_lock(lock: &drvdata->spinlock); |
640 | config->cntr_rld_val[config->cntr_idx] = val; |
641 | spin_unlock(lock: &drvdata->spinlock); |
642 | |
643 | return size; |
644 | } |
645 | static DEVICE_ATTR_RW(cntr_rld_val); |
646 | |
647 | static ssize_t cntr_event_show(struct device *dev, |
648 | struct device_attribute *attr, char *buf) |
649 | { |
650 | unsigned long val; |
651 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
652 | struct etm_config *config = &drvdata->config; |
653 | |
654 | spin_lock(lock: &drvdata->spinlock); |
655 | val = config->cntr_event[config->cntr_idx]; |
656 | spin_unlock(lock: &drvdata->spinlock); |
657 | |
658 | return sprintf(buf, fmt: "%#lx\n" , val); |
659 | } |
660 | |
661 | static ssize_t cntr_event_store(struct device *dev, |
662 | struct device_attribute *attr, |
663 | const char *buf, size_t size) |
664 | { |
665 | int ret; |
666 | unsigned long val; |
667 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
668 | struct etm_config *config = &drvdata->config; |
669 | |
670 | ret = kstrtoul(s: buf, base: 16, res: &val); |
671 | if (ret) |
672 | return ret; |
673 | |
674 | spin_lock(lock: &drvdata->spinlock); |
675 | config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK; |
676 | spin_unlock(lock: &drvdata->spinlock); |
677 | |
678 | return size; |
679 | } |
680 | static DEVICE_ATTR_RW(cntr_event); |
681 | |
682 | static ssize_t cntr_rld_event_show(struct device *dev, |
683 | struct device_attribute *attr, char *buf) |
684 | { |
685 | unsigned long val; |
686 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
687 | struct etm_config *config = &drvdata->config; |
688 | |
689 | spin_lock(lock: &drvdata->spinlock); |
690 | val = config->cntr_rld_event[config->cntr_idx]; |
691 | spin_unlock(lock: &drvdata->spinlock); |
692 | |
693 | return sprintf(buf, fmt: "%#lx\n" , val); |
694 | } |
695 | |
696 | static ssize_t cntr_rld_event_store(struct device *dev, |
697 | struct device_attribute *attr, |
698 | const char *buf, size_t size) |
699 | { |
700 | int ret; |
701 | unsigned long val; |
702 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
703 | struct etm_config *config = &drvdata->config; |
704 | |
705 | ret = kstrtoul(s: buf, base: 16, res: &val); |
706 | if (ret) |
707 | return ret; |
708 | |
709 | spin_lock(lock: &drvdata->spinlock); |
710 | config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK; |
711 | spin_unlock(lock: &drvdata->spinlock); |
712 | |
713 | return size; |
714 | } |
715 | static DEVICE_ATTR_RW(cntr_rld_event); |
716 | |
717 | static ssize_t cntr_val_show(struct device *dev, |
718 | struct device_attribute *attr, char *buf) |
719 | { |
720 | int i, ret = 0; |
721 | u32 val; |
722 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
723 | struct etm_config *config = &drvdata->config; |
724 | |
725 | if (!coresight_get_mode(csdev: drvdata->csdev)) { |
726 | spin_lock(lock: &drvdata->spinlock); |
727 | for (i = 0; i < drvdata->nr_cntr; i++) |
728 | ret += sprintf(buf, fmt: "counter %d: %x\n" , |
729 | i, config->cntr_val[i]); |
730 | spin_unlock(lock: &drvdata->spinlock); |
731 | return ret; |
732 | } |
733 | |
734 | for (i = 0; i < drvdata->nr_cntr; i++) { |
735 | val = etm_readl(drvdata, ETMCNTVRn(i)); |
736 | ret += sprintf(buf, fmt: "counter %d: %x\n" , i, val); |
737 | } |
738 | |
739 | return ret; |
740 | } |
741 | |
742 | static ssize_t cntr_val_store(struct device *dev, |
743 | struct device_attribute *attr, |
744 | const char *buf, size_t size) |
745 | { |
746 | int ret; |
747 | unsigned long val; |
748 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
749 | struct etm_config *config = &drvdata->config; |
750 | |
751 | ret = kstrtoul(s: buf, base: 16, res: &val); |
752 | if (ret) |
753 | return ret; |
754 | |
755 | spin_lock(lock: &drvdata->spinlock); |
756 | config->cntr_val[config->cntr_idx] = val; |
757 | spin_unlock(lock: &drvdata->spinlock); |
758 | |
759 | return size; |
760 | } |
761 | static DEVICE_ATTR_RW(cntr_val); |
762 | |
763 | static ssize_t seq_12_event_show(struct device *dev, |
764 | struct device_attribute *attr, char *buf) |
765 | { |
766 | unsigned long val; |
767 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
768 | struct etm_config *config = &drvdata->config; |
769 | |
770 | val = config->seq_12_event; |
771 | return sprintf(buf, fmt: "%#lx\n" , val); |
772 | } |
773 | |
774 | static ssize_t seq_12_event_store(struct device *dev, |
775 | struct device_attribute *attr, |
776 | const char *buf, size_t size) |
777 | { |
778 | int ret; |
779 | unsigned long val; |
780 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
781 | struct etm_config *config = &drvdata->config; |
782 | |
783 | ret = kstrtoul(s: buf, base: 16, res: &val); |
784 | if (ret) |
785 | return ret; |
786 | |
787 | config->seq_12_event = val & ETM_EVENT_MASK; |
788 | return size; |
789 | } |
790 | static DEVICE_ATTR_RW(seq_12_event); |
791 | |
792 | static ssize_t seq_21_event_show(struct device *dev, |
793 | struct device_attribute *attr, char *buf) |
794 | { |
795 | unsigned long val; |
796 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
797 | struct etm_config *config = &drvdata->config; |
798 | |
799 | val = config->seq_21_event; |
800 | return sprintf(buf, fmt: "%#lx\n" , val); |
801 | } |
802 | |
803 | static ssize_t seq_21_event_store(struct device *dev, |
804 | struct device_attribute *attr, |
805 | const char *buf, size_t size) |
806 | { |
807 | int ret; |
808 | unsigned long val; |
809 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
810 | struct etm_config *config = &drvdata->config; |
811 | |
812 | ret = kstrtoul(s: buf, base: 16, res: &val); |
813 | if (ret) |
814 | return ret; |
815 | |
816 | config->seq_21_event = val & ETM_EVENT_MASK; |
817 | return size; |
818 | } |
819 | static DEVICE_ATTR_RW(seq_21_event); |
820 | |
821 | static ssize_t seq_23_event_show(struct device *dev, |
822 | struct device_attribute *attr, char *buf) |
823 | { |
824 | unsigned long val; |
825 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
826 | struct etm_config *config = &drvdata->config; |
827 | |
828 | val = config->seq_23_event; |
829 | return sprintf(buf, fmt: "%#lx\n" , val); |
830 | } |
831 | |
832 | static ssize_t seq_23_event_store(struct device *dev, |
833 | struct device_attribute *attr, |
834 | const char *buf, size_t size) |
835 | { |
836 | int ret; |
837 | unsigned long val; |
838 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
839 | struct etm_config *config = &drvdata->config; |
840 | |
841 | ret = kstrtoul(s: buf, base: 16, res: &val); |
842 | if (ret) |
843 | return ret; |
844 | |
845 | config->seq_23_event = val & ETM_EVENT_MASK; |
846 | return size; |
847 | } |
848 | static DEVICE_ATTR_RW(seq_23_event); |
849 | |
850 | static ssize_t seq_31_event_show(struct device *dev, |
851 | struct device_attribute *attr, char *buf) |
852 | { |
853 | unsigned long val; |
854 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
855 | struct etm_config *config = &drvdata->config; |
856 | |
857 | val = config->seq_31_event; |
858 | return sprintf(buf, fmt: "%#lx\n" , val); |
859 | } |
860 | |
861 | static ssize_t seq_31_event_store(struct device *dev, |
862 | struct device_attribute *attr, |
863 | const char *buf, size_t size) |
864 | { |
865 | int ret; |
866 | unsigned long val; |
867 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
868 | struct etm_config *config = &drvdata->config; |
869 | |
870 | ret = kstrtoul(s: buf, base: 16, res: &val); |
871 | if (ret) |
872 | return ret; |
873 | |
874 | config->seq_31_event = val & ETM_EVENT_MASK; |
875 | return size; |
876 | } |
877 | static DEVICE_ATTR_RW(seq_31_event); |
878 | |
879 | static ssize_t seq_32_event_show(struct device *dev, |
880 | struct device_attribute *attr, char *buf) |
881 | { |
882 | unsigned long val; |
883 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
884 | struct etm_config *config = &drvdata->config; |
885 | |
886 | val = config->seq_32_event; |
887 | return sprintf(buf, fmt: "%#lx\n" , val); |
888 | } |
889 | |
890 | static ssize_t seq_32_event_store(struct device *dev, |
891 | struct device_attribute *attr, |
892 | const char *buf, size_t size) |
893 | { |
894 | int ret; |
895 | unsigned long val; |
896 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
897 | struct etm_config *config = &drvdata->config; |
898 | |
899 | ret = kstrtoul(s: buf, base: 16, res: &val); |
900 | if (ret) |
901 | return ret; |
902 | |
903 | config->seq_32_event = val & ETM_EVENT_MASK; |
904 | return size; |
905 | } |
906 | static DEVICE_ATTR_RW(seq_32_event); |
907 | |
908 | static ssize_t seq_13_event_show(struct device *dev, |
909 | struct device_attribute *attr, char *buf) |
910 | { |
911 | unsigned long val; |
912 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
913 | struct etm_config *config = &drvdata->config; |
914 | |
915 | val = config->seq_13_event; |
916 | return sprintf(buf, fmt: "%#lx\n" , val); |
917 | } |
918 | |
919 | static ssize_t seq_13_event_store(struct device *dev, |
920 | struct device_attribute *attr, |
921 | const char *buf, size_t size) |
922 | { |
923 | int ret; |
924 | unsigned long val; |
925 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
926 | struct etm_config *config = &drvdata->config; |
927 | |
928 | ret = kstrtoul(s: buf, base: 16, res: &val); |
929 | if (ret) |
930 | return ret; |
931 | |
932 | config->seq_13_event = val & ETM_EVENT_MASK; |
933 | return size; |
934 | } |
935 | static DEVICE_ATTR_RW(seq_13_event); |
936 | |
937 | static ssize_t seq_curr_state_show(struct device *dev, |
938 | struct device_attribute *attr, char *buf) |
939 | { |
940 | unsigned long val, flags; |
941 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
942 | struct etm_config *config = &drvdata->config; |
943 | |
944 | if (!coresight_get_mode(csdev: drvdata->csdev)) { |
945 | val = config->seq_curr_state; |
946 | goto out; |
947 | } |
948 | |
949 | pm_runtime_get_sync(dev: dev->parent); |
950 | spin_lock_irqsave(&drvdata->spinlock, flags); |
951 | |
952 | CS_UNLOCK(addr: drvdata->base); |
953 | val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); |
954 | CS_LOCK(addr: drvdata->base); |
955 | |
956 | spin_unlock_irqrestore(lock: &drvdata->spinlock, flags); |
957 | pm_runtime_put(dev: dev->parent); |
958 | out: |
959 | return sprintf(buf, fmt: "%#lx\n" , val); |
960 | } |
961 | |
962 | static ssize_t seq_curr_state_store(struct device *dev, |
963 | struct device_attribute *attr, |
964 | const char *buf, size_t size) |
965 | { |
966 | int ret; |
967 | unsigned long val; |
968 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
969 | struct etm_config *config = &drvdata->config; |
970 | |
971 | ret = kstrtoul(s: buf, base: 16, res: &val); |
972 | if (ret) |
973 | return ret; |
974 | |
975 | if (val > ETM_SEQ_STATE_MAX_VAL) |
976 | return -EINVAL; |
977 | |
978 | config->seq_curr_state = val; |
979 | |
980 | return size; |
981 | } |
982 | static DEVICE_ATTR_RW(seq_curr_state); |
983 | |
984 | static ssize_t ctxid_idx_show(struct device *dev, |
985 | struct device_attribute *attr, char *buf) |
986 | { |
987 | unsigned long val; |
988 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
989 | struct etm_config *config = &drvdata->config; |
990 | |
991 | val = config->ctxid_idx; |
992 | return sprintf(buf, fmt: "%#lx\n" , val); |
993 | } |
994 | |
995 | static ssize_t ctxid_idx_store(struct device *dev, |
996 | struct device_attribute *attr, |
997 | const char *buf, size_t size) |
998 | { |
999 | int ret; |
1000 | unsigned long val; |
1001 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1002 | struct etm_config *config = &drvdata->config; |
1003 | |
1004 | ret = kstrtoul(s: buf, base: 16, res: &val); |
1005 | if (ret) |
1006 | return ret; |
1007 | |
1008 | if (val >= drvdata->nr_ctxid_cmp) |
1009 | return -EINVAL; |
1010 | |
1011 | /* |
1012 | * Use spinlock to ensure index doesn't change while it gets |
1013 | * dereferenced multiple times within a spinlock block elsewhere. |
1014 | */ |
1015 | spin_lock(lock: &drvdata->spinlock); |
1016 | config->ctxid_idx = val; |
1017 | spin_unlock(lock: &drvdata->spinlock); |
1018 | |
1019 | return size; |
1020 | } |
1021 | static DEVICE_ATTR_RW(ctxid_idx); |
1022 | |
1023 | static ssize_t ctxid_pid_show(struct device *dev, |
1024 | struct device_attribute *attr, char *buf) |
1025 | { |
1026 | unsigned long val; |
1027 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1028 | struct etm_config *config = &drvdata->config; |
1029 | |
1030 | /* |
1031 | * Don't use contextID tracing if coming from a PID namespace. See |
1032 | * comment in ctxid_pid_store(). |
1033 | */ |
1034 | if (task_active_pid_ns(current) != &init_pid_ns) |
1035 | return -EINVAL; |
1036 | |
1037 | spin_lock(lock: &drvdata->spinlock); |
1038 | val = config->ctxid_pid[config->ctxid_idx]; |
1039 | spin_unlock(lock: &drvdata->spinlock); |
1040 | |
1041 | return sprintf(buf, fmt: "%#lx\n" , val); |
1042 | } |
1043 | |
1044 | static ssize_t ctxid_pid_store(struct device *dev, |
1045 | struct device_attribute *attr, |
1046 | const char *buf, size_t size) |
1047 | { |
1048 | int ret; |
1049 | unsigned long pid; |
1050 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1051 | struct etm_config *config = &drvdata->config; |
1052 | |
1053 | /* |
1054 | * When contextID tracing is enabled the tracers will insert the |
1055 | * value found in the contextID register in the trace stream. But if |
1056 | * a process is in a namespace the PID of that process as seen from the |
1057 | * namespace won't be what the kernel sees, something that makes the |
1058 | * feature confusing and can potentially leak kernel only information. |
1059 | * As such refuse to use the feature if @current is not in the initial |
1060 | * PID namespace. |
1061 | */ |
1062 | if (task_active_pid_ns(current) != &init_pid_ns) |
1063 | return -EINVAL; |
1064 | |
1065 | ret = kstrtoul(s: buf, base: 16, res: &pid); |
1066 | if (ret) |
1067 | return ret; |
1068 | |
1069 | spin_lock(lock: &drvdata->spinlock); |
1070 | config->ctxid_pid[config->ctxid_idx] = pid; |
1071 | spin_unlock(lock: &drvdata->spinlock); |
1072 | |
1073 | return size; |
1074 | } |
1075 | static DEVICE_ATTR_RW(ctxid_pid); |
1076 | |
1077 | static ssize_t ctxid_mask_show(struct device *dev, |
1078 | struct device_attribute *attr, char *buf) |
1079 | { |
1080 | unsigned long val; |
1081 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1082 | struct etm_config *config = &drvdata->config; |
1083 | |
1084 | /* |
1085 | * Don't use contextID tracing if coming from a PID namespace. See |
1086 | * comment in ctxid_pid_store(). |
1087 | */ |
1088 | if (task_active_pid_ns(current) != &init_pid_ns) |
1089 | return -EINVAL; |
1090 | |
1091 | val = config->ctxid_mask; |
1092 | return sprintf(buf, fmt: "%#lx\n" , val); |
1093 | } |
1094 | |
1095 | static ssize_t ctxid_mask_store(struct device *dev, |
1096 | struct device_attribute *attr, |
1097 | const char *buf, size_t size) |
1098 | { |
1099 | int ret; |
1100 | unsigned long val; |
1101 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1102 | struct etm_config *config = &drvdata->config; |
1103 | |
1104 | /* |
1105 | * Don't use contextID tracing if coming from a PID namespace. See |
1106 | * comment in ctxid_pid_store(). |
1107 | */ |
1108 | if (task_active_pid_ns(current) != &init_pid_ns) |
1109 | return -EINVAL; |
1110 | |
1111 | ret = kstrtoul(s: buf, base: 16, res: &val); |
1112 | if (ret) |
1113 | return ret; |
1114 | |
1115 | config->ctxid_mask = val; |
1116 | return size; |
1117 | } |
1118 | static DEVICE_ATTR_RW(ctxid_mask); |
1119 | |
1120 | static ssize_t sync_freq_show(struct device *dev, |
1121 | struct device_attribute *attr, char *buf) |
1122 | { |
1123 | unsigned long val; |
1124 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1125 | struct etm_config *config = &drvdata->config; |
1126 | |
1127 | val = config->sync_freq; |
1128 | return sprintf(buf, fmt: "%#lx\n" , val); |
1129 | } |
1130 | |
1131 | static ssize_t sync_freq_store(struct device *dev, |
1132 | struct device_attribute *attr, |
1133 | const char *buf, size_t size) |
1134 | { |
1135 | int ret; |
1136 | unsigned long val; |
1137 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1138 | struct etm_config *config = &drvdata->config; |
1139 | |
1140 | ret = kstrtoul(s: buf, base: 16, res: &val); |
1141 | if (ret) |
1142 | return ret; |
1143 | |
1144 | config->sync_freq = val & ETM_SYNC_MASK; |
1145 | return size; |
1146 | } |
1147 | static DEVICE_ATTR_RW(sync_freq); |
1148 | |
1149 | static ssize_t timestamp_event_show(struct device *dev, |
1150 | struct device_attribute *attr, char *buf) |
1151 | { |
1152 | unsigned long val; |
1153 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1154 | struct etm_config *config = &drvdata->config; |
1155 | |
1156 | val = config->timestamp_event; |
1157 | return sprintf(buf, fmt: "%#lx\n" , val); |
1158 | } |
1159 | |
1160 | static ssize_t timestamp_event_store(struct device *dev, |
1161 | struct device_attribute *attr, |
1162 | const char *buf, size_t size) |
1163 | { |
1164 | int ret; |
1165 | unsigned long val; |
1166 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1167 | struct etm_config *config = &drvdata->config; |
1168 | |
1169 | ret = kstrtoul(s: buf, base: 16, res: &val); |
1170 | if (ret) |
1171 | return ret; |
1172 | |
1173 | config->timestamp_event = val & ETM_EVENT_MASK; |
1174 | return size; |
1175 | } |
1176 | static DEVICE_ATTR_RW(timestamp_event); |
1177 | |
1178 | static ssize_t cpu_show(struct device *dev, |
1179 | struct device_attribute *attr, char *buf) |
1180 | { |
1181 | int val; |
1182 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1183 | |
1184 | val = drvdata->cpu; |
1185 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , val); |
1186 | |
1187 | } |
1188 | static DEVICE_ATTR_RO(cpu); |
1189 | |
1190 | static ssize_t traceid_show(struct device *dev, |
1191 | struct device_attribute *attr, char *buf) |
1192 | { |
1193 | int trace_id; |
1194 | struct etm_drvdata *drvdata = dev_get_drvdata(dev: dev->parent); |
1195 | |
1196 | trace_id = etm_read_alloc_trace_id(drvdata); |
1197 | if (trace_id < 0) |
1198 | return trace_id; |
1199 | |
1200 | return sysfs_emit(buf, fmt: "%#x\n" , trace_id); |
1201 | } |
1202 | static DEVICE_ATTR_RO(traceid); |
1203 | |
1204 | static struct attribute *coresight_etm_attrs[] = { |
1205 | &dev_attr_nr_addr_cmp.attr, |
1206 | &dev_attr_nr_cntr.attr, |
1207 | &dev_attr_nr_ctxid_cmp.attr, |
1208 | &dev_attr_etmsr.attr, |
1209 | &dev_attr_reset.attr, |
1210 | &dev_attr_mode.attr, |
1211 | &dev_attr_trigger_event.attr, |
1212 | &dev_attr_enable_event.attr, |
1213 | &dev_attr_fifofull_level.attr, |
1214 | &dev_attr_addr_idx.attr, |
1215 | &dev_attr_addr_single.attr, |
1216 | &dev_attr_addr_range.attr, |
1217 | &dev_attr_addr_start.attr, |
1218 | &dev_attr_addr_stop.attr, |
1219 | &dev_attr_addr_acctype.attr, |
1220 | &dev_attr_cntr_idx.attr, |
1221 | &dev_attr_cntr_rld_val.attr, |
1222 | &dev_attr_cntr_event.attr, |
1223 | &dev_attr_cntr_rld_event.attr, |
1224 | &dev_attr_cntr_val.attr, |
1225 | &dev_attr_seq_12_event.attr, |
1226 | &dev_attr_seq_21_event.attr, |
1227 | &dev_attr_seq_23_event.attr, |
1228 | &dev_attr_seq_31_event.attr, |
1229 | &dev_attr_seq_32_event.attr, |
1230 | &dev_attr_seq_13_event.attr, |
1231 | &dev_attr_seq_curr_state.attr, |
1232 | &dev_attr_ctxid_idx.attr, |
1233 | &dev_attr_ctxid_pid.attr, |
1234 | &dev_attr_ctxid_mask.attr, |
1235 | &dev_attr_sync_freq.attr, |
1236 | &dev_attr_timestamp_event.attr, |
1237 | &dev_attr_traceid.attr, |
1238 | &dev_attr_cpu.attr, |
1239 | NULL, |
1240 | }; |
1241 | |
1242 | static struct attribute *coresight_etm_mgmt_attrs[] = { |
1243 | coresight_simple_reg32(etmccr, ETMCCR), |
1244 | coresight_simple_reg32(etmccer, ETMCCER), |
1245 | coresight_simple_reg32(etmscr, ETMSCR), |
1246 | coresight_simple_reg32(etmidr, ETMIDR), |
1247 | coresight_simple_reg32(etmcr, ETMCR), |
1248 | coresight_simple_reg32(etmtraceidr, ETMTRACEIDR), |
1249 | coresight_simple_reg32(etmteevr, ETMTEEVR), |
1250 | coresight_simple_reg32(etmtssvr, ETMTSSCR), |
1251 | coresight_simple_reg32(etmtecr1, ETMTECR1), |
1252 | coresight_simple_reg32(etmtecr2, ETMTECR2), |
1253 | NULL, |
1254 | }; |
1255 | |
1256 | static const struct attribute_group coresight_etm_group = { |
1257 | .attrs = coresight_etm_attrs, |
1258 | }; |
1259 | |
1260 | static const struct attribute_group coresight_etm_mgmt_group = { |
1261 | .attrs = coresight_etm_mgmt_attrs, |
1262 | .name = "mgmt" , |
1263 | }; |
1264 | |
1265 | const struct attribute_group *coresight_etm_groups[] = { |
1266 | &coresight_etm_group, |
1267 | &coresight_etm_mgmt_group, |
1268 | NULL, |
1269 | }; |
1270 | |