1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright 2017 NXP |
4 | * Copyright 2016 Freescale Semiconductor, Inc. |
5 | */ |
6 | |
7 | #include <linux/bitfield.h> |
8 | #include <linux/init.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/io.h> |
11 | #include <linux/module.h> |
12 | #include <linux/of.h> |
13 | #include <linux/of_irq.h> |
14 | #include <linux/perf_event.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/slab.h> |
17 | |
18 | #define COUNTER_CNTL 0x0 |
19 | #define COUNTER_READ 0x20 |
20 | |
21 | #define COUNTER_DPCR1 0x30 |
22 | |
23 | #define CNTL_OVER 0x1 |
24 | #define CNTL_CLEAR 0x2 |
25 | #define CNTL_EN 0x4 |
26 | #define CNTL_EN_MASK 0xFFFFFFFB |
27 | #define CNTL_CLEAR_MASK 0xFFFFFFFD |
28 | #define CNTL_OVER_MASK 0xFFFFFFFE |
29 | |
30 | #define CNTL_CP_SHIFT 16 |
31 | #define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT) |
32 | #define CNTL_CSV_SHIFT 24 |
33 | #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) |
34 | |
35 | #define EVENT_CYCLES_ID 0 |
36 | #define EVENT_CYCLES_COUNTER 0 |
37 | #define NUM_COUNTERS 4 |
38 | |
39 | /* For removing bias if cycle counter CNTL.CP is set to 0xf0 */ |
40 | #define CYCLES_COUNTER_MASK 0x0FFFFFFF |
41 | #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ |
42 | |
43 | #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) |
44 | |
45 | #define DDR_PERF_DEV_NAME "imx8_ddr" |
46 | #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" |
47 | |
48 | static DEFINE_IDA(ddr_ida); |
49 | |
50 | /* DDR Perf hardware feature */ |
51 | #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ |
52 | #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ |
53 | |
54 | struct fsl_ddr_devtype_data { |
55 | unsigned int quirks; /* quirks needed for different DDR Perf core */ |
56 | const char *identifier; /* system PMU identifier for userspace */ |
57 | }; |
58 | |
59 | static const struct fsl_ddr_devtype_data imx8_devtype_data; |
60 | |
61 | static const struct fsl_ddr_devtype_data imx8m_devtype_data = { |
62 | .quirks = DDR_CAP_AXI_ID_FILTER, |
63 | }; |
64 | |
65 | static const struct fsl_ddr_devtype_data imx8mq_devtype_data = { |
66 | .quirks = DDR_CAP_AXI_ID_FILTER, |
67 | .identifier = "i.MX8MQ" , |
68 | }; |
69 | |
70 | static const struct fsl_ddr_devtype_data imx8mm_devtype_data = { |
71 | .quirks = DDR_CAP_AXI_ID_FILTER, |
72 | .identifier = "i.MX8MM" , |
73 | }; |
74 | |
75 | static const struct fsl_ddr_devtype_data imx8mn_devtype_data = { |
76 | .quirks = DDR_CAP_AXI_ID_FILTER, |
77 | .identifier = "i.MX8MN" , |
78 | }; |
79 | |
80 | static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { |
81 | .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, |
82 | .identifier = "i.MX8MP" , |
83 | }; |
84 | |
85 | static const struct of_device_id imx_ddr_pmu_dt_ids[] = { |
86 | { .compatible = "fsl,imx8-ddr-pmu" , .data = &imx8_devtype_data}, |
87 | { .compatible = "fsl,imx8m-ddr-pmu" , .data = &imx8m_devtype_data}, |
88 | { .compatible = "fsl,imx8mq-ddr-pmu" , .data = &imx8mq_devtype_data}, |
89 | { .compatible = "fsl,imx8mm-ddr-pmu" , .data = &imx8mm_devtype_data}, |
90 | { .compatible = "fsl,imx8mn-ddr-pmu" , .data = &imx8mn_devtype_data}, |
91 | { .compatible = "fsl,imx8mp-ddr-pmu" , .data = &imx8mp_devtype_data}, |
92 | { /* sentinel */ } |
93 | }; |
94 | MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); |
95 | |
96 | struct ddr_pmu { |
97 | struct pmu pmu; |
98 | void __iomem *base; |
99 | unsigned int cpu; |
100 | struct hlist_node node; |
101 | struct device *dev; |
102 | struct perf_event *events[NUM_COUNTERS]; |
103 | enum cpuhp_state cpuhp_state; |
104 | const struct fsl_ddr_devtype_data *devtype_data; |
105 | int irq; |
106 | int id; |
107 | int active_counter; |
108 | }; |
109 | |
110 | static ssize_t ddr_perf_identifier_show(struct device *dev, |
111 | struct device_attribute *attr, |
112 | char *page) |
113 | { |
114 | struct ddr_pmu *pmu = dev_get_drvdata(dev); |
115 | |
116 | return sysfs_emit(buf: page, fmt: "%s\n" , pmu->devtype_data->identifier); |
117 | } |
118 | |
119 | static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj, |
120 | struct attribute *attr, |
121 | int n) |
122 | { |
123 | struct device *dev = kobj_to_dev(kobj); |
124 | struct ddr_pmu *pmu = dev_get_drvdata(dev); |
125 | |
126 | if (!pmu->devtype_data->identifier) |
127 | return 0; |
128 | return attr->mode; |
129 | }; |
130 | |
131 | static struct device_attribute ddr_perf_identifier_attr = |
132 | __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL); |
133 | |
134 | static struct attribute *ddr_perf_identifier_attrs[] = { |
135 | &ddr_perf_identifier_attr.attr, |
136 | NULL, |
137 | }; |
138 | |
139 | static const struct attribute_group ddr_perf_identifier_attr_group = { |
140 | .attrs = ddr_perf_identifier_attrs, |
141 | .is_visible = ddr_perf_identifier_attr_visible, |
142 | }; |
143 | |
144 | enum ddr_perf_filter_capabilities { |
145 | PERF_CAP_AXI_ID_FILTER = 0, |
146 | PERF_CAP_AXI_ID_FILTER_ENHANCED, |
147 | PERF_CAP_AXI_ID_FEAT_MAX, |
148 | }; |
149 | |
150 | static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) |
151 | { |
152 | u32 quirks = pmu->devtype_data->quirks; |
153 | |
154 | switch (cap) { |
155 | case PERF_CAP_AXI_ID_FILTER: |
156 | return !!(quirks & DDR_CAP_AXI_ID_FILTER); |
157 | case PERF_CAP_AXI_ID_FILTER_ENHANCED: |
158 | quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED; |
159 | return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED; |
160 | default: |
161 | WARN(1, "unknown filter cap %d\n" , cap); |
162 | } |
163 | |
164 | return 0; |
165 | } |
166 | |
167 | static ssize_t ddr_perf_filter_cap_show(struct device *dev, |
168 | struct device_attribute *attr, |
169 | char *buf) |
170 | { |
171 | struct ddr_pmu *pmu = dev_get_drvdata(dev); |
172 | struct dev_ext_attribute *ea = |
173 | container_of(attr, struct dev_ext_attribute, attr); |
174 | int cap = (long)ea->var; |
175 | |
176 | return sysfs_emit(buf, fmt: "%u\n" , ddr_perf_filter_cap_get(pmu, cap)); |
177 | } |
178 | |
179 | #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \ |
180 | (&((struct dev_ext_attribute) { \ |
181 | __ATTR(_name, 0444, _func, NULL), (void *)_var \ |
182 | }).attr.attr) |
183 | |
184 | #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \ |
185 | PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var) |
186 | |
187 | static struct attribute *ddr_perf_filter_cap_attr[] = { |
188 | PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER), |
189 | PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED), |
190 | NULL, |
191 | }; |
192 | |
193 | static const struct attribute_group ddr_perf_filter_cap_attr_group = { |
194 | .name = "caps" , |
195 | .attrs = ddr_perf_filter_cap_attr, |
196 | }; |
197 | |
198 | static ssize_t ddr_perf_cpumask_show(struct device *dev, |
199 | struct device_attribute *attr, char *buf) |
200 | { |
201 | struct ddr_pmu *pmu = dev_get_drvdata(dev); |
202 | |
203 | return cpumap_print_to_pagebuf(list: true, buf, cpumask_of(pmu->cpu)); |
204 | } |
205 | |
206 | static struct device_attribute ddr_perf_cpumask_attr = |
207 | __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); |
208 | |
209 | static struct attribute *ddr_perf_cpumask_attrs[] = { |
210 | &ddr_perf_cpumask_attr.attr, |
211 | NULL, |
212 | }; |
213 | |
214 | static const struct attribute_group ddr_perf_cpumask_attr_group = { |
215 | .attrs = ddr_perf_cpumask_attrs, |
216 | }; |
217 | |
218 | static ssize_t |
219 | ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, |
220 | char *page) |
221 | { |
222 | struct perf_pmu_events_attr *pmu_attr; |
223 | |
224 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); |
225 | return sysfs_emit(buf: page, fmt: "event=0x%02llx\n" , pmu_attr->id); |
226 | } |
227 | |
228 | #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ |
229 | PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id) |
230 | |
231 | static struct attribute *ddr_perf_events_attrs[] = { |
232 | IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), |
233 | IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01), |
234 | IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04), |
235 | IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05), |
236 | IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08), |
237 | IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09), |
238 | IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10), |
239 | IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11), |
240 | IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12), |
241 | IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20), |
242 | IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21), |
243 | IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22), |
244 | IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23), |
245 | IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24), |
246 | IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25), |
247 | IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26), |
248 | IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27), |
249 | IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29), |
250 | IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a), |
251 | IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b), |
252 | IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30), |
253 | IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31), |
254 | IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32), |
255 | IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33), |
256 | IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34), |
257 | IMX8_DDR_PMU_EVENT_ATTR(read, 0x35), |
258 | IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36), |
259 | IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), |
260 | IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), |
261 | IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), |
262 | IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), |
263 | IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), |
264 | NULL, |
265 | }; |
266 | |
267 | static const struct attribute_group ddr_perf_events_attr_group = { |
268 | .name = "events" , |
269 | .attrs = ddr_perf_events_attrs, |
270 | }; |
271 | |
272 | PMU_FORMAT_ATTR(event, "config:0-7" ); |
273 | PMU_FORMAT_ATTR(axi_id, "config1:0-15" ); |
274 | PMU_FORMAT_ATTR(axi_mask, "config1:16-31" ); |
275 | |
276 | static struct attribute *ddr_perf_format_attrs[] = { |
277 | &format_attr_event.attr, |
278 | &format_attr_axi_id.attr, |
279 | &format_attr_axi_mask.attr, |
280 | NULL, |
281 | }; |
282 | |
283 | static const struct attribute_group ddr_perf_format_attr_group = { |
284 | .name = "format" , |
285 | .attrs = ddr_perf_format_attrs, |
286 | }; |
287 | |
288 | static const struct attribute_group *attr_groups[] = { |
289 | &ddr_perf_events_attr_group, |
290 | &ddr_perf_format_attr_group, |
291 | &ddr_perf_cpumask_attr_group, |
292 | &ddr_perf_filter_cap_attr_group, |
293 | &ddr_perf_identifier_attr_group, |
294 | NULL, |
295 | }; |
296 | |
297 | static bool ddr_perf_is_filtered(struct perf_event *event) |
298 | { |
299 | return event->attr.config == 0x41 || event->attr.config == 0x42; |
300 | } |
301 | |
302 | static u32 ddr_perf_filter_val(struct perf_event *event) |
303 | { |
304 | return event->attr.config1; |
305 | } |
306 | |
307 | static bool ddr_perf_filters_compatible(struct perf_event *a, |
308 | struct perf_event *b) |
309 | { |
310 | if (!ddr_perf_is_filtered(event: a)) |
311 | return true; |
312 | if (!ddr_perf_is_filtered(event: b)) |
313 | return true; |
314 | return ddr_perf_filter_val(event: a) == ddr_perf_filter_val(event: b); |
315 | } |
316 | |
317 | static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) |
318 | { |
319 | unsigned int filt; |
320 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
321 | |
322 | filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; |
323 | return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) && |
324 | ddr_perf_is_filtered(event); |
325 | } |
326 | |
327 | static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) |
328 | { |
329 | int i; |
330 | |
331 | /* |
332 | * Always map cycle event to counter 0 |
333 | * Cycles counter is dedicated for cycle event |
334 | * can't used for the other events |
335 | */ |
336 | if (event == EVENT_CYCLES_ID) { |
337 | if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) |
338 | return EVENT_CYCLES_COUNTER; |
339 | else |
340 | return -ENOENT; |
341 | } |
342 | |
343 | for (i = 1; i < NUM_COUNTERS; i++) { |
344 | if (pmu->events[i] == NULL) |
345 | return i; |
346 | } |
347 | |
348 | return -ENOENT; |
349 | } |
350 | |
351 | static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) |
352 | { |
353 | pmu->events[counter] = NULL; |
354 | } |
355 | |
356 | static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) |
357 | { |
358 | struct perf_event *event = pmu->events[counter]; |
359 | void __iomem *base = pmu->base; |
360 | |
361 | /* |
362 | * return bytes instead of bursts from ddr transaction for |
363 | * axid-read and axid-write event if PMU core supports enhanced |
364 | * filter. |
365 | */ |
366 | base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : |
367 | COUNTER_READ; |
368 | return readl_relaxed(base + counter * 4); |
369 | } |
370 | |
371 | static int ddr_perf_event_init(struct perf_event *event) |
372 | { |
373 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
374 | struct hw_perf_event *hwc = &event->hw; |
375 | struct perf_event *sibling; |
376 | |
377 | if (event->attr.type != event->pmu->type) |
378 | return -ENOENT; |
379 | |
380 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) |
381 | return -EOPNOTSUPP; |
382 | |
383 | if (event->cpu < 0) { |
384 | dev_warn(pmu->dev, "Can't provide per-task data!\n" ); |
385 | return -EOPNOTSUPP; |
386 | } |
387 | |
388 | /* |
389 | * We must NOT create groups containing mixed PMUs, although software |
390 | * events are acceptable (for example to create a CCN group |
391 | * periodically read when a hrtimer aka cpu-clock leader triggers). |
392 | */ |
393 | if (event->group_leader->pmu != event->pmu && |
394 | !is_software_event(event: event->group_leader)) |
395 | return -EINVAL; |
396 | |
397 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { |
398 | if (!ddr_perf_filters_compatible(a: event, b: event->group_leader)) |
399 | return -EINVAL; |
400 | for_each_sibling_event(sibling, event->group_leader) { |
401 | if (!ddr_perf_filters_compatible(a: event, b: sibling)) |
402 | return -EINVAL; |
403 | } |
404 | } |
405 | |
406 | for_each_sibling_event(sibling, event->group_leader) { |
407 | if (sibling->pmu != event->pmu && |
408 | !is_software_event(event: sibling)) |
409 | return -EINVAL; |
410 | } |
411 | |
412 | event->cpu = pmu->cpu; |
413 | hwc->idx = -1; |
414 | |
415 | return 0; |
416 | } |
417 | |
418 | static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, |
419 | int counter, bool enable) |
420 | { |
421 | u8 reg = counter * 4 + COUNTER_CNTL; |
422 | int val; |
423 | |
424 | if (enable) { |
425 | /* |
426 | * cycle counter is special which should firstly write 0 then |
427 | * write 1 into CLEAR bit to clear it. Other counters only |
428 | * need write 0 into CLEAR bit and it turns out to be 1 by |
429 | * hardware. Below enable flow is harmless for all counters. |
430 | */ |
431 | writel(val: 0, addr: pmu->base + reg); |
432 | val = CNTL_EN | CNTL_CLEAR; |
433 | val |= FIELD_PREP(CNTL_CSV_MASK, config); |
434 | |
435 | /* |
436 | * On i.MX8MP we need to bias the cycle counter to overflow more often. |
437 | * We do this by initializing bits [23:16] of the counter value via the |
438 | * COUNTER_CTRL Counter Parameter (CP) field. |
439 | */ |
440 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { |
441 | if (counter == EVENT_CYCLES_COUNTER) |
442 | val |= FIELD_PREP(CNTL_CP_MASK, 0xf0); |
443 | } |
444 | |
445 | writel(val, addr: pmu->base + reg); |
446 | } else { |
447 | /* Disable counter */ |
448 | val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; |
449 | writel(val, addr: pmu->base + reg); |
450 | } |
451 | } |
452 | |
453 | static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter) |
454 | { |
455 | int val; |
456 | |
457 | val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL); |
458 | |
459 | return val & CNTL_OVER; |
460 | } |
461 | |
462 | static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter) |
463 | { |
464 | u8 reg = counter * 4 + COUNTER_CNTL; |
465 | int val; |
466 | |
467 | val = readl_relaxed(pmu->base + reg); |
468 | val &= ~CNTL_CLEAR; |
469 | writel(val, addr: pmu->base + reg); |
470 | |
471 | val |= CNTL_CLEAR; |
472 | writel(val, addr: pmu->base + reg); |
473 | } |
474 | |
475 | static void ddr_perf_event_update(struct perf_event *event) |
476 | { |
477 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
478 | struct hw_perf_event *hwc = &event->hw; |
479 | u64 new_raw_count; |
480 | int counter = hwc->idx; |
481 | int ret; |
482 | |
483 | new_raw_count = ddr_perf_read_counter(pmu, counter); |
484 | /* Remove the bias applied in ddr_perf_counter_enable(). */ |
485 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { |
486 | if (counter == EVENT_CYCLES_COUNTER) |
487 | new_raw_count &= CYCLES_COUNTER_MASK; |
488 | } |
489 | |
490 | local64_add(new_raw_count, &event->count); |
491 | |
492 | /* |
493 | * For legacy SoCs: event counter continue counting when overflow, |
494 | * no need to clear the counter. |
495 | * For new SoCs: event counter stop counting when overflow, need |
496 | * clear counter to let it count again. |
497 | */ |
498 | if (counter != EVENT_CYCLES_COUNTER) { |
499 | ret = ddr_perf_counter_overflow(pmu, counter); |
500 | if (ret) |
501 | dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n" , |
502 | event->attr.config); |
503 | } |
504 | |
505 | /* clear counter every time for both cycle counter and event counter */ |
506 | ddr_perf_counter_clear(pmu, counter); |
507 | } |
508 | |
509 | static void ddr_perf_event_start(struct perf_event *event, int flags) |
510 | { |
511 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
512 | struct hw_perf_event *hwc = &event->hw; |
513 | int counter = hwc->idx; |
514 | |
515 | local64_set(&hwc->prev_count, 0); |
516 | |
517 | ddr_perf_counter_enable(pmu, config: event->attr.config, counter, enable: true); |
518 | |
519 | if (!pmu->active_counter++) |
520 | ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, |
521 | EVENT_CYCLES_COUNTER, enable: true); |
522 | |
523 | hwc->state = 0; |
524 | } |
525 | |
526 | static int ddr_perf_event_add(struct perf_event *event, int flags) |
527 | { |
528 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
529 | struct hw_perf_event *hwc = &event->hw; |
530 | int counter; |
531 | int cfg = event->attr.config; |
532 | int cfg1 = event->attr.config1; |
533 | |
534 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { |
535 | int i; |
536 | |
537 | for (i = 1; i < NUM_COUNTERS; i++) { |
538 | if (pmu->events[i] && |
539 | !ddr_perf_filters_compatible(a: event, b: pmu->events[i])) |
540 | return -EINVAL; |
541 | } |
542 | |
543 | if (ddr_perf_is_filtered(event)) { |
544 | /* revert axi id masking(axi_mask) value */ |
545 | cfg1 ^= AXI_MASKING_REVERT; |
546 | writel(val: cfg1, addr: pmu->base + COUNTER_DPCR1); |
547 | } |
548 | } |
549 | |
550 | counter = ddr_perf_alloc_counter(pmu, event: cfg); |
551 | if (counter < 0) { |
552 | dev_dbg(pmu->dev, "There are not enough counters\n" ); |
553 | return -EOPNOTSUPP; |
554 | } |
555 | |
556 | pmu->events[counter] = event; |
557 | hwc->idx = counter; |
558 | |
559 | hwc->state |= PERF_HES_STOPPED; |
560 | |
561 | if (flags & PERF_EF_START) |
562 | ddr_perf_event_start(event, flags); |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | static void ddr_perf_event_stop(struct perf_event *event, int flags) |
568 | { |
569 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
570 | struct hw_perf_event *hwc = &event->hw; |
571 | int counter = hwc->idx; |
572 | |
573 | ddr_perf_counter_enable(pmu, config: event->attr.config, counter, enable: false); |
574 | ddr_perf_event_update(event); |
575 | |
576 | if (!--pmu->active_counter) |
577 | ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, |
578 | EVENT_CYCLES_COUNTER, enable: false); |
579 | |
580 | hwc->state |= PERF_HES_STOPPED; |
581 | } |
582 | |
583 | static void ddr_perf_event_del(struct perf_event *event, int flags) |
584 | { |
585 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
586 | struct hw_perf_event *hwc = &event->hw; |
587 | int counter = hwc->idx; |
588 | |
589 | ddr_perf_event_stop(event, PERF_EF_UPDATE); |
590 | |
591 | ddr_perf_free_counter(pmu, counter); |
592 | hwc->idx = -1; |
593 | } |
594 | |
595 | static void ddr_perf_pmu_enable(struct pmu *pmu) |
596 | { |
597 | } |
598 | |
599 | static void ddr_perf_pmu_disable(struct pmu *pmu) |
600 | { |
601 | } |
602 | |
603 | static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, |
604 | struct device *dev) |
605 | { |
606 | *pmu = (struct ddr_pmu) { |
607 | .pmu = (struct pmu) { |
608 | .module = THIS_MODULE, |
609 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
610 | .task_ctx_nr = perf_invalid_context, |
611 | .attr_groups = attr_groups, |
612 | .event_init = ddr_perf_event_init, |
613 | .add = ddr_perf_event_add, |
614 | .del = ddr_perf_event_del, |
615 | .start = ddr_perf_event_start, |
616 | .stop = ddr_perf_event_stop, |
617 | .read = ddr_perf_event_update, |
618 | .pmu_enable = ddr_perf_pmu_enable, |
619 | .pmu_disable = ddr_perf_pmu_disable, |
620 | }, |
621 | .base = base, |
622 | .dev = dev, |
623 | }; |
624 | |
625 | pmu->id = ida_alloc(ida: &ddr_ida, GFP_KERNEL); |
626 | return pmu->id; |
627 | } |
628 | |
629 | static irqreturn_t ddr_perf_irq_handler(int irq, void *p) |
630 | { |
631 | int i; |
632 | struct ddr_pmu *pmu = (struct ddr_pmu *) p; |
633 | struct perf_event *event; |
634 | |
635 | /* all counter will stop if cycle counter disabled */ |
636 | ddr_perf_counter_enable(pmu, |
637 | EVENT_CYCLES_ID, |
638 | EVENT_CYCLES_COUNTER, |
639 | enable: false); |
640 | /* |
641 | * When the cycle counter overflows, all counters are stopped, |
642 | * and an IRQ is raised. If any other counter overflows, it |
643 | * continues counting, and no IRQ is raised. But for new SoCs, |
644 | * such as i.MX8MP, event counter would stop when overflow, so |
645 | * we need use cycle counter to stop overflow of event counter. |
646 | * |
647 | * Cycles occur at least 4 times as often as other events, so we |
648 | * can update all events on a cycle counter overflow and not |
649 | * lose events. |
650 | * |
651 | */ |
652 | for (i = 0; i < NUM_COUNTERS; i++) { |
653 | |
654 | if (!pmu->events[i]) |
655 | continue; |
656 | |
657 | event = pmu->events[i]; |
658 | |
659 | ddr_perf_event_update(event); |
660 | } |
661 | |
662 | ddr_perf_counter_enable(pmu, |
663 | EVENT_CYCLES_ID, |
664 | EVENT_CYCLES_COUNTER, |
665 | enable: true); |
666 | |
667 | return IRQ_HANDLED; |
668 | } |
669 | |
670 | static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) |
671 | { |
672 | struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); |
673 | int target; |
674 | |
675 | if (cpu != pmu->cpu) |
676 | return 0; |
677 | |
678 | target = cpumask_any_but(cpu_online_mask, cpu); |
679 | if (target >= nr_cpu_ids) |
680 | return 0; |
681 | |
682 | perf_pmu_migrate_context(pmu: &pmu->pmu, src_cpu: cpu, dst_cpu: target); |
683 | pmu->cpu = target; |
684 | |
685 | WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu))); |
686 | |
687 | return 0; |
688 | } |
689 | |
690 | static int ddr_perf_probe(struct platform_device *pdev) |
691 | { |
692 | struct ddr_pmu *pmu; |
693 | struct device_node *np; |
694 | void __iomem *base; |
695 | char *name; |
696 | int num; |
697 | int ret; |
698 | int irq; |
699 | |
700 | base = devm_platform_ioremap_resource(pdev, index: 0); |
701 | if (IS_ERR(ptr: base)) |
702 | return PTR_ERR(ptr: base); |
703 | |
704 | np = pdev->dev.of_node; |
705 | |
706 | pmu = devm_kzalloc(dev: &pdev->dev, size: sizeof(*pmu), GFP_KERNEL); |
707 | if (!pmu) |
708 | return -ENOMEM; |
709 | |
710 | num = ddr_perf_init(pmu, base, dev: &pdev->dev); |
711 | |
712 | platform_set_drvdata(pdev, data: pmu); |
713 | |
714 | name = devm_kasprintf(dev: &pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d" , |
715 | num); |
716 | if (!name) { |
717 | ret = -ENOMEM; |
718 | goto cpuhp_state_err; |
719 | } |
720 | |
721 | pmu->devtype_data = of_device_get_match_data(dev: &pdev->dev); |
722 | |
723 | pmu->cpu = raw_smp_processor_id(); |
724 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, |
725 | DDR_CPUHP_CB_NAME, |
726 | NULL, |
727 | teardown: ddr_perf_offline_cpu); |
728 | |
729 | if (ret < 0) { |
730 | dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n" ); |
731 | goto cpuhp_state_err; |
732 | } |
733 | |
734 | pmu->cpuhp_state = ret; |
735 | |
736 | /* Register the pmu instance for cpu hotplug */ |
737 | ret = cpuhp_state_add_instance_nocalls(state: pmu->cpuhp_state, node: &pmu->node); |
738 | if (ret) { |
739 | dev_err(&pdev->dev, "Error %d registering hotplug\n" , ret); |
740 | goto cpuhp_instance_err; |
741 | } |
742 | |
743 | /* Request irq */ |
744 | irq = of_irq_get(dev: np, index: 0); |
745 | if (irq < 0) { |
746 | dev_err(&pdev->dev, "Failed to get irq: %d" , irq); |
747 | ret = irq; |
748 | goto ddr_perf_err; |
749 | } |
750 | |
751 | ret = devm_request_irq(dev: &pdev->dev, irq, |
752 | handler: ddr_perf_irq_handler, |
753 | IRQF_NOBALANCING | IRQF_NO_THREAD, |
754 | DDR_CPUHP_CB_NAME, |
755 | dev_id: pmu); |
756 | if (ret < 0) { |
757 | dev_err(&pdev->dev, "Request irq failed: %d" , ret); |
758 | goto ddr_perf_err; |
759 | } |
760 | |
761 | pmu->irq = irq; |
762 | ret = irq_set_affinity(irq: pmu->irq, cpumask_of(pmu->cpu)); |
763 | if (ret) { |
764 | dev_err(pmu->dev, "Failed to set interrupt affinity!\n" ); |
765 | goto ddr_perf_err; |
766 | } |
767 | |
768 | ret = perf_pmu_register(pmu: &pmu->pmu, name, type: -1); |
769 | if (ret) |
770 | goto ddr_perf_err; |
771 | |
772 | return 0; |
773 | |
774 | ddr_perf_err: |
775 | cpuhp_state_remove_instance_nocalls(state: pmu->cpuhp_state, node: &pmu->node); |
776 | cpuhp_instance_err: |
777 | cpuhp_remove_multi_state(state: pmu->cpuhp_state); |
778 | cpuhp_state_err: |
779 | ida_free(&ddr_ida, id: pmu->id); |
780 | dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n" , ret); |
781 | return ret; |
782 | } |
783 | |
784 | static int ddr_perf_remove(struct platform_device *pdev) |
785 | { |
786 | struct ddr_pmu *pmu = platform_get_drvdata(pdev); |
787 | |
788 | cpuhp_state_remove_instance_nocalls(state: pmu->cpuhp_state, node: &pmu->node); |
789 | cpuhp_remove_multi_state(state: pmu->cpuhp_state); |
790 | |
791 | perf_pmu_unregister(pmu: &pmu->pmu); |
792 | |
793 | ida_free(&ddr_ida, id: pmu->id); |
794 | return 0; |
795 | } |
796 | |
797 | static struct platform_driver imx_ddr_pmu_driver = { |
798 | .driver = { |
799 | .name = "imx-ddr-pmu" , |
800 | .of_match_table = imx_ddr_pmu_dt_ids, |
801 | .suppress_bind_attrs = true, |
802 | }, |
803 | .probe = ddr_perf_probe, |
804 | .remove = ddr_perf_remove, |
805 | }; |
806 | |
807 | module_platform_driver(imx_ddr_pmu_driver); |
808 | MODULE_LICENSE("GPL v2" ); |
809 | |