1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2022 Amlogic, Inc. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/bitfield.h> |
7 | #include <linux/init.h> |
8 | #include <linux/irqreturn.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> |
11 | #include <linux/of.h> |
12 | #include <linux/perf_event.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/printk.h> |
15 | #include <linux/sysfs.h> |
16 | #include <linux/types.h> |
17 | |
18 | #include <soc/amlogic/meson_ddr_pmu.h> |
19 | |
20 | struct ddr_pmu { |
21 | struct pmu pmu; |
22 | struct dmc_info info; |
23 | struct dmc_counter counters; /* save counters from hw */ |
24 | bool pmu_enabled; |
25 | struct device *dev; |
26 | char *name; |
27 | struct hlist_node node; |
28 | enum cpuhp_state cpuhp_state; |
29 | int cpu; /* for cpu hotplug */ |
30 | }; |
31 | |
32 | #define DDR_PERF_DEV_NAME "meson_ddr_bw" |
33 | #define MAX_AXI_PORTS_OF_CHANNEL 4 /* A DMC channel can monitor max 4 axi ports */ |
34 | |
35 | #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) |
36 | #define dmc_info_to_pmu(p) container_of(p, struct ddr_pmu, info) |
37 | |
38 | static void dmc_pmu_enable(struct ddr_pmu *pmu) |
39 | { |
40 | if (!pmu->pmu_enabled) |
41 | pmu->info.hw_info->enable(&pmu->info); |
42 | |
43 | pmu->pmu_enabled = true; |
44 | } |
45 | |
46 | static void dmc_pmu_disable(struct ddr_pmu *pmu) |
47 | { |
48 | if (pmu->pmu_enabled) |
49 | pmu->info.hw_info->disable(&pmu->info); |
50 | |
51 | pmu->pmu_enabled = false; |
52 | } |
53 | |
54 | static void meson_ddr_set_axi_filter(struct perf_event *event, u8 axi_id) |
55 | { |
56 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
57 | int chann; |
58 | |
59 | if (event->attr.config > ALL_CHAN_COUNTER_ID && |
60 | event->attr.config < COUNTER_MAX_ID) { |
61 | chann = event->attr.config - CHAN1_COUNTER_ID; |
62 | |
63 | pmu->info.hw_info->set_axi_filter(&pmu->info, axi_id, chann); |
64 | } |
65 | } |
66 | |
67 | static void ddr_cnt_addition(struct dmc_counter *sum, |
68 | struct dmc_counter *add1, |
69 | struct dmc_counter *add2, |
70 | int chann_nr) |
71 | { |
72 | int i; |
73 | u64 cnt1, cnt2; |
74 | |
75 | sum->all_cnt = add1->all_cnt + add2->all_cnt; |
76 | sum->all_req = add1->all_req + add2->all_req; |
77 | for (i = 0; i < chann_nr; i++) { |
78 | cnt1 = add1->channel_cnt[i]; |
79 | cnt2 = add2->channel_cnt[i]; |
80 | |
81 | sum->channel_cnt[i] = cnt1 + cnt2; |
82 | } |
83 | } |
84 | |
85 | static void meson_ddr_perf_event_update(struct perf_event *event) |
86 | { |
87 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
88 | u64 new_raw_count = 0; |
89 | struct dmc_counter dc = {0}, sum_dc = {0}; |
90 | int idx; |
91 | int chann_nr = pmu->info.hw_info->chann_nr; |
92 | |
93 | /* get the remain counters in register. */ |
94 | pmu->info.hw_info->get_counters(&pmu->info, &dc); |
95 | |
96 | ddr_cnt_addition(sum: &sum_dc, add1: &pmu->counters, add2: &dc, chann_nr); |
97 | |
98 | switch (event->attr.config) { |
99 | case ALL_CHAN_COUNTER_ID: |
100 | new_raw_count = sum_dc.all_cnt; |
101 | break; |
102 | case CHAN1_COUNTER_ID: |
103 | case CHAN2_COUNTER_ID: |
104 | case CHAN3_COUNTER_ID: |
105 | case CHAN4_COUNTER_ID: |
106 | case CHAN5_COUNTER_ID: |
107 | case CHAN6_COUNTER_ID: |
108 | case CHAN7_COUNTER_ID: |
109 | case CHAN8_COUNTER_ID: |
110 | idx = event->attr.config - CHAN1_COUNTER_ID; |
111 | new_raw_count = sum_dc.channel_cnt[idx]; |
112 | break; |
113 | } |
114 | |
115 | local64_set(&event->count, new_raw_count); |
116 | } |
117 | |
118 | static int meson_ddr_perf_event_init(struct perf_event *event) |
119 | { |
120 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
121 | u64 config1 = event->attr.config1; |
122 | u64 config2 = event->attr.config2; |
123 | |
124 | if (event->attr.type != event->pmu->type) |
125 | return -ENOENT; |
126 | |
127 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) |
128 | return -EOPNOTSUPP; |
129 | |
130 | if (event->cpu < 0) |
131 | return -EOPNOTSUPP; |
132 | |
133 | /* check if the number of parameters is too much */ |
134 | if (event->attr.config != ALL_CHAN_COUNTER_ID && |
135 | hweight64(config1) + hweight64(config2) > MAX_AXI_PORTS_OF_CHANNEL) |
136 | return -EOPNOTSUPP; |
137 | |
138 | event->cpu = pmu->cpu; |
139 | |
140 | return 0; |
141 | } |
142 | |
143 | static void meson_ddr_perf_event_start(struct perf_event *event, int flags) |
144 | { |
145 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
146 | |
147 | memset(&pmu->counters, 0, sizeof(pmu->counters)); |
148 | dmc_pmu_enable(pmu); |
149 | } |
150 | |
151 | static int meson_ddr_perf_event_add(struct perf_event *event, int flags) |
152 | { |
153 | u64 config1 = event->attr.config1; |
154 | u64 config2 = event->attr.config2; |
155 | int i; |
156 | |
157 | for_each_set_bit(i, |
158 | (const unsigned long *)&config1, |
159 | BITS_PER_TYPE(config1)) |
160 | meson_ddr_set_axi_filter(event, axi_id: i); |
161 | |
162 | for_each_set_bit(i, |
163 | (const unsigned long *)&config2, |
164 | BITS_PER_TYPE(config2)) |
165 | meson_ddr_set_axi_filter(event, axi_id: i + 64); |
166 | |
167 | if (flags & PERF_EF_START) |
168 | meson_ddr_perf_event_start(event, flags); |
169 | |
170 | return 0; |
171 | } |
172 | |
173 | static void meson_ddr_perf_event_stop(struct perf_event *event, int flags) |
174 | { |
175 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
176 | |
177 | if (flags & PERF_EF_UPDATE) |
178 | meson_ddr_perf_event_update(event); |
179 | |
180 | dmc_pmu_disable(pmu); |
181 | } |
182 | |
183 | static void meson_ddr_perf_event_del(struct perf_event *event, int flags) |
184 | { |
185 | meson_ddr_perf_event_stop(event, PERF_EF_UPDATE); |
186 | } |
187 | |
188 | static ssize_t meson_ddr_perf_cpumask_show(struct device *dev, |
189 | struct device_attribute *attr, |
190 | char *buf) |
191 | { |
192 | struct ddr_pmu *pmu = dev_get_drvdata(dev); |
193 | |
194 | return cpumap_print_to_pagebuf(list: true, buf, cpumask_of(pmu->cpu)); |
195 | } |
196 | |
197 | static struct device_attribute meson_ddr_perf_cpumask_attr = |
198 | __ATTR(cpumask, 0444, meson_ddr_perf_cpumask_show, NULL); |
199 | |
200 | static struct attribute *meson_ddr_perf_cpumask_attrs[] = { |
201 | &meson_ddr_perf_cpumask_attr.attr, |
202 | NULL, |
203 | }; |
204 | |
205 | static const struct attribute_group ddr_perf_cpumask_attr_group = { |
206 | .attrs = meson_ddr_perf_cpumask_attrs, |
207 | }; |
208 | |
209 | static ssize_t |
210 | pmu_event_show(struct device *dev, struct device_attribute *attr, |
211 | char *page) |
212 | { |
213 | struct perf_pmu_events_attr *pmu_attr; |
214 | |
215 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); |
216 | return sysfs_emit(buf: page, fmt: "event=0x%02llx\n" , pmu_attr->id); |
217 | } |
218 | |
219 | static ssize_t |
220 | event_show_unit(struct device *dev, struct device_attribute *attr, |
221 | char *page) |
222 | { |
223 | return sysfs_emit(buf: page, fmt: "MB\n" ); |
224 | } |
225 | |
226 | static ssize_t |
227 | event_show_scale(struct device *dev, struct device_attribute *attr, |
228 | char *page) |
229 | { |
230 | /* one count = 16byte = 1.52587890625e-05 MB */ |
231 | return sysfs_emit(buf: page, fmt: "1.52587890625e-05\n" ); |
232 | } |
233 | |
234 | #define AML_DDR_PMU_EVENT_ATTR(_name, _id) \ |
235 | { \ |
236 | .attr = __ATTR(_name, 0444, pmu_event_show, NULL), \ |
237 | .id = _id, \ |
238 | } |
239 | |
240 | #define AML_DDR_PMU_EVENT_UNIT_ATTR(_name) \ |
241 | __ATTR(_name.unit, 0444, event_show_unit, NULL) |
242 | |
243 | #define AML_DDR_PMU_EVENT_SCALE_ATTR(_name) \ |
244 | __ATTR(_name.scale, 0444, event_show_scale, NULL) |
245 | |
246 | static struct device_attribute event_unit_attrs[] = { |
247 | AML_DDR_PMU_EVENT_UNIT_ATTR(total_rw_bytes), |
248 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_1_rw_bytes), |
249 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_2_rw_bytes), |
250 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_3_rw_bytes), |
251 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_4_rw_bytes), |
252 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_5_rw_bytes), |
253 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_6_rw_bytes), |
254 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_7_rw_bytes), |
255 | AML_DDR_PMU_EVENT_UNIT_ATTR(chan_8_rw_bytes), |
256 | }; |
257 | |
258 | static struct device_attribute event_scale_attrs[] = { |
259 | AML_DDR_PMU_EVENT_SCALE_ATTR(total_rw_bytes), |
260 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_1_rw_bytes), |
261 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_2_rw_bytes), |
262 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_3_rw_bytes), |
263 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_4_rw_bytes), |
264 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_5_rw_bytes), |
265 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_6_rw_bytes), |
266 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_7_rw_bytes), |
267 | AML_DDR_PMU_EVENT_SCALE_ATTR(chan_8_rw_bytes), |
268 | }; |
269 | |
270 | static struct perf_pmu_events_attr event_attrs[] = { |
271 | AML_DDR_PMU_EVENT_ATTR(total_rw_bytes, ALL_CHAN_COUNTER_ID), |
272 | AML_DDR_PMU_EVENT_ATTR(chan_1_rw_bytes, CHAN1_COUNTER_ID), |
273 | AML_DDR_PMU_EVENT_ATTR(chan_2_rw_bytes, CHAN2_COUNTER_ID), |
274 | AML_DDR_PMU_EVENT_ATTR(chan_3_rw_bytes, CHAN3_COUNTER_ID), |
275 | AML_DDR_PMU_EVENT_ATTR(chan_4_rw_bytes, CHAN4_COUNTER_ID), |
276 | AML_DDR_PMU_EVENT_ATTR(chan_5_rw_bytes, CHAN5_COUNTER_ID), |
277 | AML_DDR_PMU_EVENT_ATTR(chan_6_rw_bytes, CHAN6_COUNTER_ID), |
278 | AML_DDR_PMU_EVENT_ATTR(chan_7_rw_bytes, CHAN7_COUNTER_ID), |
279 | AML_DDR_PMU_EVENT_ATTR(chan_8_rw_bytes, CHAN8_COUNTER_ID), |
280 | }; |
281 | |
282 | /* three attrs are combined an event */ |
283 | static struct attribute *ddr_perf_events_attrs[COUNTER_MAX_ID * 3]; |
284 | |
285 | static struct attribute_group ddr_perf_events_attr_group = { |
286 | .name = "events" , |
287 | .attrs = ddr_perf_events_attrs, |
288 | }; |
289 | |
290 | static umode_t meson_ddr_perf_format_attr_visible(struct kobject *kobj, |
291 | struct attribute *attr, |
292 | int n) |
293 | { |
294 | struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); |
295 | struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); |
296 | const u64 *capability = ddr_pmu->info.hw_info->capability; |
297 | struct device_attribute *dev_attr; |
298 | int id; |
299 | char value[20]; // config1:xxx, 20 is enough |
300 | |
301 | dev_attr = container_of(attr, struct device_attribute, attr); |
302 | dev_attr->show(NULL, NULL, value); |
303 | |
304 | if (sscanf(value, "config1:%d" , &id) == 1) |
305 | return capability[0] & (1ULL << id) ? attr->mode : 0; |
306 | |
307 | if (sscanf(value, "config2:%d" , &id) == 1) |
308 | return capability[1] & (1ULL << id) ? attr->mode : 0; |
309 | |
310 | return attr->mode; |
311 | } |
312 | |
313 | static struct attribute_group ddr_perf_format_attr_group = { |
314 | .name = "format" , |
315 | .is_visible = meson_ddr_perf_format_attr_visible, |
316 | }; |
317 | |
318 | static ssize_t meson_ddr_perf_identifier_show(struct device *dev, |
319 | struct device_attribute *attr, |
320 | char *page) |
321 | { |
322 | struct ddr_pmu *pmu = dev_get_drvdata(dev); |
323 | |
324 | return sysfs_emit(buf: page, fmt: "%s\n" , pmu->name); |
325 | } |
326 | |
327 | static struct device_attribute meson_ddr_perf_identifier_attr = |
328 | __ATTR(identifier, 0444, meson_ddr_perf_identifier_show, NULL); |
329 | |
330 | static struct attribute *meson_ddr_perf_identifier_attrs[] = { |
331 | &meson_ddr_perf_identifier_attr.attr, |
332 | NULL, |
333 | }; |
334 | |
335 | static const struct attribute_group ddr_perf_identifier_attr_group = { |
336 | .attrs = meson_ddr_perf_identifier_attrs, |
337 | }; |
338 | |
339 | static const struct attribute_group *attr_groups[] = { |
340 | &ddr_perf_events_attr_group, |
341 | &ddr_perf_format_attr_group, |
342 | &ddr_perf_cpumask_attr_group, |
343 | &ddr_perf_identifier_attr_group, |
344 | NULL, |
345 | }; |
346 | |
347 | static irqreturn_t dmc_irq_handler(int irq, void *dev_id) |
348 | { |
349 | struct dmc_info *info = dev_id; |
350 | struct ddr_pmu *pmu; |
351 | struct dmc_counter counters, *sum_cnter; |
352 | int i; |
353 | |
354 | pmu = dmc_info_to_pmu(info); |
355 | |
356 | if (info->hw_info->irq_handler(info, &counters) != 0) |
357 | goto out; |
358 | |
359 | sum_cnter = &pmu->counters; |
360 | sum_cnter->all_cnt += counters.all_cnt; |
361 | sum_cnter->all_req += counters.all_req; |
362 | |
363 | for (i = 0; i < pmu->info.hw_info->chann_nr; i++) |
364 | sum_cnter->channel_cnt[i] += counters.channel_cnt[i]; |
365 | |
366 | if (pmu->pmu_enabled) |
367 | /* |
368 | * the timer interrupt only supprt |
369 | * one shot mode, we have to re-enable |
370 | * it in ISR to support continue mode. |
371 | */ |
372 | info->hw_info->enable(info); |
373 | |
374 | dev_dbg(pmu->dev, "counts: %llu %llu %llu, %llu, %llu, %llu\t\t" |
375 | "sum: %llu %llu %llu, %llu, %llu, %llu\n" , |
376 | counters.all_req, |
377 | counters.all_cnt, |
378 | counters.channel_cnt[0], |
379 | counters.channel_cnt[1], |
380 | counters.channel_cnt[2], |
381 | counters.channel_cnt[3], |
382 | |
383 | pmu->counters.all_req, |
384 | pmu->counters.all_cnt, |
385 | pmu->counters.channel_cnt[0], |
386 | pmu->counters.channel_cnt[1], |
387 | pmu->counters.channel_cnt[2], |
388 | pmu->counters.channel_cnt[3]); |
389 | out: |
390 | return IRQ_HANDLED; |
391 | } |
392 | |
393 | static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) |
394 | { |
395 | struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); |
396 | int target; |
397 | |
398 | if (cpu != pmu->cpu) |
399 | return 0; |
400 | |
401 | target = cpumask_any_but(cpu_online_mask, cpu); |
402 | if (target >= nr_cpu_ids) |
403 | return 0; |
404 | |
405 | perf_pmu_migrate_context(pmu: &pmu->pmu, src_cpu: cpu, dst_cpu: target); |
406 | pmu->cpu = target; |
407 | |
408 | WARN_ON(irq_set_affinity(pmu->info.irq_num, cpumask_of(pmu->cpu))); |
409 | |
410 | return 0; |
411 | } |
412 | |
413 | static void fill_event_attr(struct ddr_pmu *pmu) |
414 | { |
415 | int i, j, k; |
416 | struct attribute **dst = ddr_perf_events_attrs; |
417 | |
418 | j = 0; |
419 | k = 0; |
420 | |
421 | /* fill ALL_CHAN_COUNTER_ID event */ |
422 | dst[j++] = &event_attrs[k].attr.attr; |
423 | dst[j++] = &event_unit_attrs[k].attr; |
424 | dst[j++] = &event_scale_attrs[k].attr; |
425 | |
426 | k++; |
427 | |
428 | /* fill each channel event */ |
429 | for (i = 0; i < pmu->info.hw_info->chann_nr; i++, k++) { |
430 | dst[j++] = &event_attrs[k].attr.attr; |
431 | dst[j++] = &event_unit_attrs[k].attr; |
432 | dst[j++] = &event_scale_attrs[k].attr; |
433 | } |
434 | |
435 | dst[j] = NULL; /* mark end */ |
436 | } |
437 | |
438 | static void fmt_attr_fill(struct attribute **fmt_attr) |
439 | { |
440 | ddr_perf_format_attr_group.attrs = fmt_attr; |
441 | } |
442 | |
443 | static int ddr_pmu_parse_dt(struct platform_device *pdev, |
444 | struct dmc_info *info) |
445 | { |
446 | void __iomem *base; |
447 | int i, ret; |
448 | |
449 | info->hw_info = of_device_get_match_data(dev: &pdev->dev); |
450 | |
451 | for (i = 0; i < info->hw_info->dmc_nr; i++) { |
452 | /* resource 0 for ddr register base */ |
453 | base = devm_platform_ioremap_resource(pdev, index: i); |
454 | if (IS_ERR(ptr: base)) |
455 | return PTR_ERR(ptr: base); |
456 | |
457 | info->ddr_reg[i] = base; |
458 | } |
459 | |
460 | /* resource i for pll register base */ |
461 | base = devm_platform_ioremap_resource(pdev, index: i); |
462 | if (IS_ERR(ptr: base)) |
463 | return PTR_ERR(ptr: base); |
464 | |
465 | info->pll_reg = base; |
466 | |
467 | ret = platform_get_irq(pdev, 0); |
468 | if (ret < 0) |
469 | return ret; |
470 | |
471 | info->irq_num = ret; |
472 | |
473 | ret = devm_request_irq(dev: &pdev->dev, irq: info->irq_num, handler: dmc_irq_handler, |
474 | IRQF_NOBALANCING, devname: dev_name(dev: &pdev->dev), |
475 | dev_id: (void *)info); |
476 | if (ret < 0) |
477 | return ret; |
478 | |
479 | return 0; |
480 | } |
481 | |
482 | int meson_ddr_pmu_create(struct platform_device *pdev) |
483 | { |
484 | int ret; |
485 | char *name; |
486 | struct ddr_pmu *pmu; |
487 | |
488 | pmu = devm_kzalloc(dev: &pdev->dev, size: sizeof(struct ddr_pmu), GFP_KERNEL); |
489 | if (!pmu) |
490 | return -ENOMEM; |
491 | |
492 | *pmu = (struct ddr_pmu) { |
493 | .pmu = { |
494 | .module = THIS_MODULE, |
495 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
496 | .task_ctx_nr = perf_invalid_context, |
497 | .attr_groups = attr_groups, |
498 | .event_init = meson_ddr_perf_event_init, |
499 | .add = meson_ddr_perf_event_add, |
500 | .del = meson_ddr_perf_event_del, |
501 | .start = meson_ddr_perf_event_start, |
502 | .stop = meson_ddr_perf_event_stop, |
503 | .read = meson_ddr_perf_event_update, |
504 | }, |
505 | }; |
506 | |
507 | ret = ddr_pmu_parse_dt(pdev, info: &pmu->info); |
508 | if (ret < 0) |
509 | return ret; |
510 | |
511 | fmt_attr_fill(fmt_attr: pmu->info.hw_info->fmt_attr); |
512 | |
513 | pmu->cpu = smp_processor_id(); |
514 | |
515 | name = devm_kasprintf(dev: &pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME); |
516 | if (!name) |
517 | return -ENOMEM; |
518 | |
519 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, name, NULL, |
520 | teardown: ddr_perf_offline_cpu); |
521 | if (ret < 0) |
522 | return ret; |
523 | |
524 | pmu->cpuhp_state = ret; |
525 | |
526 | /* Register the pmu instance for cpu hotplug */ |
527 | ret = cpuhp_state_add_instance_nocalls(state: pmu->cpuhp_state, node: &pmu->node); |
528 | if (ret) |
529 | goto cpuhp_instance_err; |
530 | |
531 | fill_event_attr(pmu); |
532 | |
533 | ret = perf_pmu_register(pmu: &pmu->pmu, name, type: -1); |
534 | if (ret) |
535 | goto pmu_register_err; |
536 | |
537 | pmu->name = name; |
538 | pmu->dev = &pdev->dev; |
539 | pmu->pmu_enabled = false; |
540 | |
541 | platform_set_drvdata(pdev, data: pmu); |
542 | |
543 | return 0; |
544 | |
545 | pmu_register_err: |
546 | cpuhp_state_remove_instance_nocalls(state: pmu->cpuhp_state, node: &pmu->node); |
547 | |
548 | cpuhp_instance_err: |
549 | cpuhp_remove_state(state: pmu->cpuhp_state); |
550 | |
551 | return ret; |
552 | } |
553 | |
554 | int meson_ddr_pmu_remove(struct platform_device *pdev) |
555 | { |
556 | struct ddr_pmu *pmu = platform_get_drvdata(pdev); |
557 | |
558 | perf_pmu_unregister(pmu: &pmu->pmu); |
559 | cpuhp_state_remove_instance_nocalls(state: pmu->cpuhp_state, node: &pmu->node); |
560 | cpuhp_remove_state(state: pmu->cpuhp_state); |
561 | |
562 | return 0; |
563 | } |
564 | |