1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | /* |
4 | * This driver adds support for perf events to use the Performance |
5 | * Monitor Counter Groups (PMCG) associated with an SMMUv3 node |
6 | * to monitor that node. |
7 | * |
8 | * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where |
9 | * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped |
10 | * to 4K boundary. For example, the PMCG at 0xff88840000 is named |
11 | * smmuv3_pmcg_ff88840 |
12 | * |
13 | * Filtering by stream id is done by specifying filtering parameters |
14 | * with the event. options are: |
15 | * filter_enable - 0 = no filtering, 1 = filtering enabled |
16 | * filter_span - 0 = exact match, 1 = pattern match |
17 | * filter_stream_id - pattern to filter against |
18 | * |
19 | * To match a partial StreamID where the X most-significant bits must match |
20 | * but the Y least-significant bits might differ, STREAMID is programmed |
21 | * with a value that contains: |
22 | * STREAMID[Y - 1] == 0. |
23 | * STREAMID[Y - 2:0] == 1 (where Y > 1). |
24 | * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards) |
25 | * contain a value to match from the corresponding bits of event StreamID. |
26 | * |
27 | * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1, |
28 | * filter_span=1,filter_stream_id=0x42/ -a netperf |
29 | * Applies filter pattern 0x42 to transaction events, which means events |
30 | * matching stream ids 0x42 and 0x43 are counted. Further filtering |
31 | * information is available in the SMMU documentation. |
32 | * |
33 | * SMMU events are not attributable to a CPU, so task mode and sampling |
34 | * are not supported. |
35 | */ |
36 | |
37 | #include <linux/acpi.h> |
38 | #include <linux/acpi_iort.h> |
39 | #include <linux/bitfield.h> |
40 | #include <linux/bitops.h> |
41 | #include <linux/cpuhotplug.h> |
42 | #include <linux/cpumask.h> |
43 | #include <linux/device.h> |
44 | #include <linux/errno.h> |
45 | #include <linux/interrupt.h> |
46 | #include <linux/irq.h> |
47 | #include <linux/kernel.h> |
48 | #include <linux/list.h> |
49 | #include <linux/msi.h> |
50 | #include <linux/of.h> |
51 | #include <linux/perf_event.h> |
52 | #include <linux/platform_device.h> |
53 | #include <linux/smp.h> |
54 | #include <linux/sysfs.h> |
55 | #include <linux/types.h> |
56 | |
57 | #define SMMU_PMCG_EVCNTR0 0x0 |
58 | #define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride)) |
59 | #define SMMU_PMCG_EVTYPER0 0x400 |
60 | #define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4) |
61 | #define SMMU_PMCG_SID_SPAN_SHIFT 29 |
62 | #define SMMU_PMCG_SMR0 0xA00 |
63 | #define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4) |
64 | #define SMMU_PMCG_CNTENSET0 0xC00 |
65 | #define SMMU_PMCG_CNTENCLR0 0xC20 |
66 | #define SMMU_PMCG_INTENSET0 0xC40 |
67 | #define SMMU_PMCG_INTENCLR0 0xC60 |
68 | #define SMMU_PMCG_OVSCLR0 0xC80 |
69 | #define SMMU_PMCG_OVSSET0 0xCC0 |
70 | #define SMMU_PMCG_CFGR 0xE00 |
71 | #define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23) |
72 | #define SMMU_PMCG_CFGR_MSI BIT(21) |
73 | #define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20) |
74 | #define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8) |
75 | #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0) |
76 | #define SMMU_PMCG_CR 0xE04 |
77 | #define SMMU_PMCG_CR_ENABLE BIT(0) |
78 | #define SMMU_PMCG_IIDR 0xE08 |
79 | #define SMMU_PMCG_IIDR_PRODUCTID GENMASK(31, 20) |
80 | #define SMMU_PMCG_IIDR_VARIANT GENMASK(19, 16) |
81 | #define SMMU_PMCG_IIDR_REVISION GENMASK(15, 12) |
82 | #define SMMU_PMCG_IIDR_IMPLEMENTER GENMASK(11, 0) |
83 | #define SMMU_PMCG_CEID0 0xE20 |
84 | #define SMMU_PMCG_CEID1 0xE28 |
85 | #define SMMU_PMCG_IRQ_CTRL 0xE50 |
86 | #define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0) |
87 | #define SMMU_PMCG_IRQ_CFG0 0xE58 |
88 | #define SMMU_PMCG_IRQ_CFG1 0xE60 |
89 | #define SMMU_PMCG_IRQ_CFG2 0xE64 |
90 | |
91 | /* IMP-DEF ID registers */ |
92 | #define SMMU_PMCG_PIDR0 0xFE0 |
93 | #define SMMU_PMCG_PIDR0_PART_0 GENMASK(7, 0) |
94 | #define SMMU_PMCG_PIDR1 0xFE4 |
95 | #define SMMU_PMCG_PIDR1_DES_0 GENMASK(7, 4) |
96 | #define SMMU_PMCG_PIDR1_PART_1 GENMASK(3, 0) |
97 | #define SMMU_PMCG_PIDR2 0xFE8 |
98 | #define SMMU_PMCG_PIDR2_REVISION GENMASK(7, 4) |
99 | #define SMMU_PMCG_PIDR2_DES_1 GENMASK(2, 0) |
100 | #define SMMU_PMCG_PIDR3 0xFEC |
101 | #define SMMU_PMCG_PIDR3_REVAND GENMASK(7, 4) |
102 | #define SMMU_PMCG_PIDR4 0xFD0 |
103 | #define SMMU_PMCG_PIDR4_DES_2 GENMASK(3, 0) |
104 | |
105 | /* MSI config fields */ |
106 | #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) |
107 | #define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1 |
108 | |
109 | #define SMMU_PMCG_DEFAULT_FILTER_SPAN 1 |
110 | #define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0) |
111 | |
112 | #define SMMU_PMCG_MAX_COUNTERS 64 |
113 | #define SMMU_PMCG_ARCH_MAX_EVENTS 128 |
114 | |
115 | #define SMMU_PMCG_PA_SHIFT 12 |
116 | |
117 | #define SMMU_PMCG_EVCNTR_RDONLY BIT(0) |
118 | #define SMMU_PMCG_HARDEN_DISABLE BIT(1) |
119 | |
120 | static int cpuhp_state_num; |
121 | |
122 | struct smmu_pmu { |
123 | struct hlist_node node; |
124 | struct perf_event *events[SMMU_PMCG_MAX_COUNTERS]; |
125 | DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS); |
126 | DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS); |
127 | unsigned int irq; |
128 | unsigned int on_cpu; |
129 | struct pmu pmu; |
130 | unsigned int num_counters; |
131 | struct device *dev; |
132 | void __iomem *reg_base; |
133 | void __iomem *reloc_base; |
134 | u64 counter_mask; |
135 | u32 options; |
136 | u32 iidr; |
137 | bool global_filter; |
138 | }; |
139 | |
140 | #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) |
141 | |
142 | #define (_name, _config, _start, _end) \ |
143 | static inline u32 get_##_name(struct perf_event *event) \ |
144 | { \ |
145 | return FIELD_GET(GENMASK_ULL(_end, _start), \ |
146 | event->attr._config); \ |
147 | } \ |
148 | |
149 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15); |
150 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31); |
151 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32); |
152 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33); |
153 | |
154 | static inline void smmu_pmu_enable(struct pmu *pmu) |
155 | { |
156 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); |
157 | |
158 | writel(SMMU_PMCG_IRQ_CTRL_IRQEN, |
159 | addr: smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); |
160 | writel(SMMU_PMCG_CR_ENABLE, addr: smmu_pmu->reg_base + SMMU_PMCG_CR); |
161 | } |
162 | |
163 | static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, |
164 | struct perf_event *event, int idx); |
165 | |
166 | static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu) |
167 | { |
168 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); |
169 | unsigned int idx; |
170 | |
171 | for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) |
172 | smmu_pmu_apply_event_filter(smmu_pmu, event: smmu_pmu->events[idx], idx); |
173 | |
174 | smmu_pmu_enable(pmu); |
175 | } |
176 | |
177 | static inline void smmu_pmu_disable(struct pmu *pmu) |
178 | { |
179 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); |
180 | |
181 | writel(val: 0, addr: smmu_pmu->reg_base + SMMU_PMCG_CR); |
182 | writel(val: 0, addr: smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); |
183 | } |
184 | |
185 | static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu) |
186 | { |
187 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); |
188 | unsigned int idx; |
189 | |
190 | /* |
191 | * The global disable of PMU sometimes fail to stop the counting. |
192 | * Harden this by writing an invalid event type to each used counter |
193 | * to forcibly stop counting. |
194 | */ |
195 | for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) |
196 | writel(val: 0xffff, addr: smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); |
197 | |
198 | smmu_pmu_disable(pmu); |
199 | } |
200 | |
201 | static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu, |
202 | u32 idx, u64 value) |
203 | { |
204 | if (smmu_pmu->counter_mask & BIT(32)) |
205 | writeq(val: value, addr: smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); |
206 | else |
207 | writel(val: value, addr: smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); |
208 | } |
209 | |
210 | static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx) |
211 | { |
212 | u64 value; |
213 | |
214 | if (smmu_pmu->counter_mask & BIT(32)) |
215 | value = readq(addr: smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); |
216 | else |
217 | value = readl(addr: smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); |
218 | |
219 | return value; |
220 | } |
221 | |
222 | static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx) |
223 | { |
224 | writeq(BIT(idx), addr: smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0); |
225 | } |
226 | |
227 | static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx) |
228 | { |
229 | writeq(BIT(idx), addr: smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); |
230 | } |
231 | |
232 | static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx) |
233 | { |
234 | writeq(BIT(idx), addr: smmu_pmu->reg_base + SMMU_PMCG_INTENSET0); |
235 | } |
236 | |
237 | static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu, |
238 | u32 idx) |
239 | { |
240 | writeq(BIT(idx), addr: smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); |
241 | } |
242 | |
243 | static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx, |
244 | u32 val) |
245 | { |
246 | writel(val, addr: smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); |
247 | } |
248 | |
249 | static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val) |
250 | { |
251 | writel(val, addr: smmu_pmu->reg_base + SMMU_PMCG_SMR(idx)); |
252 | } |
253 | |
254 | static void smmu_pmu_event_update(struct perf_event *event) |
255 | { |
256 | struct hw_perf_event *hwc = &event->hw; |
257 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
258 | u64 delta, prev, now; |
259 | u32 idx = hwc->idx; |
260 | |
261 | do { |
262 | prev = local64_read(&hwc->prev_count); |
263 | now = smmu_pmu_counter_get_value(smmu_pmu, idx); |
264 | } while (local64_cmpxchg(l: &hwc->prev_count, old: prev, new: now) != prev); |
265 | |
266 | /* handle overflow. */ |
267 | delta = now - prev; |
268 | delta &= smmu_pmu->counter_mask; |
269 | |
270 | local64_add(delta, &event->count); |
271 | } |
272 | |
273 | static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu, |
274 | struct hw_perf_event *hwc) |
275 | { |
276 | u32 idx = hwc->idx; |
277 | u64 new; |
278 | |
279 | if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) { |
280 | /* |
281 | * On platforms that require this quirk, if the counter starts |
282 | * at < half_counter value and wraps, the current logic of |
283 | * handling the overflow may not work. It is expected that, |
284 | * those platforms will have full 64 counter bits implemented |
285 | * so that such a possibility is remote(eg: HiSilicon HIP08). |
286 | */ |
287 | new = smmu_pmu_counter_get_value(smmu_pmu, idx); |
288 | } else { |
289 | /* |
290 | * We limit the max period to half the max counter value |
291 | * of the counter size, so that even in the case of extreme |
292 | * interrupt latency the counter will (hopefully) not wrap |
293 | * past its initial value. |
294 | */ |
295 | new = smmu_pmu->counter_mask >> 1; |
296 | smmu_pmu_counter_set_value(smmu_pmu, idx, value: new); |
297 | } |
298 | |
299 | local64_set(&hwc->prev_count, new); |
300 | } |
301 | |
302 | static void smmu_pmu_set_event_filter(struct perf_event *event, |
303 | int idx, u32 span, u32 sid) |
304 | { |
305 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
306 | u32 evtyper; |
307 | |
308 | evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT; |
309 | smmu_pmu_set_evtyper(smmu_pmu, idx, val: evtyper); |
310 | smmu_pmu_set_smr(smmu_pmu, idx, val: sid); |
311 | } |
312 | |
313 | static bool smmu_pmu_check_global_filter(struct perf_event *curr, |
314 | struct perf_event *new) |
315 | { |
316 | if (get_filter_enable(event: new) != get_filter_enable(event: curr)) |
317 | return false; |
318 | |
319 | if (!get_filter_enable(event: new)) |
320 | return true; |
321 | |
322 | return get_filter_span(event: new) == get_filter_span(event: curr) && |
323 | get_filter_stream_id(event: new) == get_filter_stream_id(event: curr); |
324 | } |
325 | |
326 | static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, |
327 | struct perf_event *event, int idx) |
328 | { |
329 | u32 span, sid; |
330 | unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters; |
331 | bool filter_en = !!get_filter_enable(event); |
332 | |
333 | span = filter_en ? get_filter_span(event) : |
334 | SMMU_PMCG_DEFAULT_FILTER_SPAN; |
335 | sid = filter_en ? get_filter_stream_id(event) : |
336 | SMMU_PMCG_DEFAULT_FILTER_SID; |
337 | |
338 | cur_idx = find_first_bit(addr: smmu_pmu->used_counters, size: num_ctrs); |
339 | /* |
340 | * Per-counter filtering, or scheduling the first globally-filtered |
341 | * event into an empty PMU so idx == 0 and it works out equivalent. |
342 | */ |
343 | if (!smmu_pmu->global_filter || cur_idx == num_ctrs) { |
344 | smmu_pmu_set_event_filter(event, idx, span, sid); |
345 | return 0; |
346 | } |
347 | |
348 | /* Otherwise, must match whatever's currently scheduled */ |
349 | if (smmu_pmu_check_global_filter(curr: smmu_pmu->events[cur_idx], new: event)) { |
350 | smmu_pmu_set_evtyper(smmu_pmu, idx, val: get_event(event)); |
351 | return 0; |
352 | } |
353 | |
354 | return -EAGAIN; |
355 | } |
356 | |
357 | static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, |
358 | struct perf_event *event) |
359 | { |
360 | int idx, err; |
361 | unsigned int num_ctrs = smmu_pmu->num_counters; |
362 | |
363 | idx = find_first_zero_bit(addr: smmu_pmu->used_counters, size: num_ctrs); |
364 | if (idx == num_ctrs) |
365 | /* The counters are all in use. */ |
366 | return -EAGAIN; |
367 | |
368 | err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx); |
369 | if (err) |
370 | return err; |
371 | |
372 | set_bit(nr: idx, addr: smmu_pmu->used_counters); |
373 | |
374 | return idx; |
375 | } |
376 | |
377 | static bool smmu_pmu_events_compatible(struct perf_event *curr, |
378 | struct perf_event *new) |
379 | { |
380 | if (new->pmu != curr->pmu) |
381 | return false; |
382 | |
383 | if (to_smmu_pmu(new->pmu)->global_filter && |
384 | !smmu_pmu_check_global_filter(curr, new)) |
385 | return false; |
386 | |
387 | return true; |
388 | } |
389 | |
390 | /* |
391 | * Implementation of abstract pmu functionality required by |
392 | * the core perf events code. |
393 | */ |
394 | |
395 | static int smmu_pmu_event_init(struct perf_event *event) |
396 | { |
397 | struct hw_perf_event *hwc = &event->hw; |
398 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
399 | struct device *dev = smmu_pmu->dev; |
400 | struct perf_event *sibling; |
401 | int group_num_events = 1; |
402 | u16 event_id; |
403 | |
404 | if (event->attr.type != event->pmu->type) |
405 | return -ENOENT; |
406 | |
407 | if (hwc->sample_period) { |
408 | dev_dbg(dev, "Sampling not supported\n" ); |
409 | return -EOPNOTSUPP; |
410 | } |
411 | |
412 | if (event->cpu < 0) { |
413 | dev_dbg(dev, "Per-task mode not supported\n" ); |
414 | return -EOPNOTSUPP; |
415 | } |
416 | |
417 | /* Verify specified event is supported on this PMU */ |
418 | event_id = get_event(event); |
419 | if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS && |
420 | (!test_bit(event_id, smmu_pmu->supported_events))) { |
421 | dev_dbg(dev, "Invalid event %d for this PMU\n" , event_id); |
422 | return -EINVAL; |
423 | } |
424 | |
425 | /* Don't allow groups with mixed PMUs, except for s/w events */ |
426 | if (!is_software_event(event: event->group_leader)) { |
427 | if (!smmu_pmu_events_compatible(curr: event->group_leader, new: event)) |
428 | return -EINVAL; |
429 | |
430 | if (++group_num_events > smmu_pmu->num_counters) |
431 | return -EINVAL; |
432 | } |
433 | |
434 | for_each_sibling_event(sibling, event->group_leader) { |
435 | if (is_software_event(event: sibling)) |
436 | continue; |
437 | |
438 | if (!smmu_pmu_events_compatible(curr: sibling, new: event)) |
439 | return -EINVAL; |
440 | |
441 | if (++group_num_events > smmu_pmu->num_counters) |
442 | return -EINVAL; |
443 | } |
444 | |
445 | hwc->idx = -1; |
446 | |
447 | /* |
448 | * Ensure all events are on the same cpu so all events are in the |
449 | * same cpu context, to avoid races on pmu_enable etc. |
450 | */ |
451 | event->cpu = smmu_pmu->on_cpu; |
452 | |
453 | return 0; |
454 | } |
455 | |
456 | static void smmu_pmu_event_start(struct perf_event *event, int flags) |
457 | { |
458 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
459 | struct hw_perf_event *hwc = &event->hw; |
460 | int idx = hwc->idx; |
461 | |
462 | hwc->state = 0; |
463 | |
464 | smmu_pmu_set_period(smmu_pmu, hwc); |
465 | |
466 | smmu_pmu_counter_enable(smmu_pmu, idx); |
467 | } |
468 | |
469 | static void smmu_pmu_event_stop(struct perf_event *event, int flags) |
470 | { |
471 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
472 | struct hw_perf_event *hwc = &event->hw; |
473 | int idx = hwc->idx; |
474 | |
475 | if (hwc->state & PERF_HES_STOPPED) |
476 | return; |
477 | |
478 | smmu_pmu_counter_disable(smmu_pmu, idx); |
479 | /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */ |
480 | smmu_pmu_event_update(event); |
481 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
482 | } |
483 | |
484 | static int smmu_pmu_event_add(struct perf_event *event, int flags) |
485 | { |
486 | struct hw_perf_event *hwc = &event->hw; |
487 | int idx; |
488 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
489 | |
490 | idx = smmu_pmu_get_event_idx(smmu_pmu, event); |
491 | if (idx < 0) |
492 | return idx; |
493 | |
494 | hwc->idx = idx; |
495 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
496 | smmu_pmu->events[idx] = event; |
497 | local64_set(&hwc->prev_count, 0); |
498 | |
499 | smmu_pmu_interrupt_enable(smmu_pmu, idx); |
500 | |
501 | if (flags & PERF_EF_START) |
502 | smmu_pmu_event_start(event, flags); |
503 | |
504 | /* Propagate changes to the userspace mapping. */ |
505 | perf_event_update_userpage(event); |
506 | |
507 | return 0; |
508 | } |
509 | |
510 | static void smmu_pmu_event_del(struct perf_event *event, int flags) |
511 | { |
512 | struct hw_perf_event *hwc = &event->hw; |
513 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
514 | int idx = hwc->idx; |
515 | |
516 | smmu_pmu_event_stop(event, flags: flags | PERF_EF_UPDATE); |
517 | smmu_pmu_interrupt_disable(smmu_pmu, idx); |
518 | smmu_pmu->events[idx] = NULL; |
519 | clear_bit(nr: idx, addr: smmu_pmu->used_counters); |
520 | |
521 | perf_event_update_userpage(event); |
522 | } |
523 | |
524 | static void smmu_pmu_event_read(struct perf_event *event) |
525 | { |
526 | smmu_pmu_event_update(event); |
527 | } |
528 | |
529 | /* cpumask */ |
530 | |
531 | static ssize_t smmu_pmu_cpumask_show(struct device *dev, |
532 | struct device_attribute *attr, |
533 | char *buf) |
534 | { |
535 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); |
536 | |
537 | return cpumap_print_to_pagebuf(list: true, buf, cpumask_of(smmu_pmu->on_cpu)); |
538 | } |
539 | |
540 | static struct device_attribute smmu_pmu_cpumask_attr = |
541 | __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL); |
542 | |
543 | static struct attribute *smmu_pmu_cpumask_attrs[] = { |
544 | &smmu_pmu_cpumask_attr.attr, |
545 | NULL |
546 | }; |
547 | |
548 | static const struct attribute_group smmu_pmu_cpumask_group = { |
549 | .attrs = smmu_pmu_cpumask_attrs, |
550 | }; |
551 | |
552 | /* Events */ |
553 | |
554 | static ssize_t smmu_pmu_event_show(struct device *dev, |
555 | struct device_attribute *attr, char *page) |
556 | { |
557 | struct perf_pmu_events_attr *pmu_attr; |
558 | |
559 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); |
560 | |
561 | return sysfs_emit(buf: page, fmt: "event=0x%02llx\n" , pmu_attr->id); |
562 | } |
563 | |
564 | #define SMMU_EVENT_ATTR(name, config) \ |
565 | PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config) |
566 | |
567 | static struct attribute *smmu_pmu_events[] = { |
568 | SMMU_EVENT_ATTR(cycles, 0), |
569 | SMMU_EVENT_ATTR(transaction, 1), |
570 | SMMU_EVENT_ATTR(tlb_miss, 2), |
571 | SMMU_EVENT_ATTR(config_cache_miss, 3), |
572 | SMMU_EVENT_ATTR(trans_table_walk_access, 4), |
573 | SMMU_EVENT_ATTR(config_struct_access, 5), |
574 | SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6), |
575 | SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7), |
576 | NULL |
577 | }; |
578 | |
579 | static umode_t smmu_pmu_event_is_visible(struct kobject *kobj, |
580 | struct attribute *attr, int unused) |
581 | { |
582 | struct device *dev = kobj_to_dev(kobj); |
583 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); |
584 | struct perf_pmu_events_attr *pmu_attr; |
585 | |
586 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); |
587 | |
588 | if (test_bit(pmu_attr->id, smmu_pmu->supported_events)) |
589 | return attr->mode; |
590 | |
591 | return 0; |
592 | } |
593 | |
594 | static const struct attribute_group smmu_pmu_events_group = { |
595 | .name = "events" , |
596 | .attrs = smmu_pmu_events, |
597 | .is_visible = smmu_pmu_event_is_visible, |
598 | }; |
599 | |
600 | static ssize_t smmu_pmu_identifier_attr_show(struct device *dev, |
601 | struct device_attribute *attr, |
602 | char *page) |
603 | { |
604 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); |
605 | |
606 | return sysfs_emit(buf: page, fmt: "0x%08x\n" , smmu_pmu->iidr); |
607 | } |
608 | |
609 | static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj, |
610 | struct attribute *attr, |
611 | int n) |
612 | { |
613 | struct device *dev = kobj_to_dev(kobj); |
614 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); |
615 | |
616 | if (!smmu_pmu->iidr) |
617 | return 0; |
618 | return attr->mode; |
619 | } |
620 | |
621 | static struct device_attribute smmu_pmu_identifier_attr = |
622 | __ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL); |
623 | |
624 | static struct attribute *smmu_pmu_identifier_attrs[] = { |
625 | &smmu_pmu_identifier_attr.attr, |
626 | NULL |
627 | }; |
628 | |
629 | static const struct attribute_group smmu_pmu_identifier_group = { |
630 | .attrs = smmu_pmu_identifier_attrs, |
631 | .is_visible = smmu_pmu_identifier_attr_visible, |
632 | }; |
633 | |
634 | /* Formats */ |
635 | PMU_FORMAT_ATTR(event, "config:0-15" ); |
636 | PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31" ); |
637 | PMU_FORMAT_ATTR(filter_span, "config1:32" ); |
638 | PMU_FORMAT_ATTR(filter_enable, "config1:33" ); |
639 | |
640 | static struct attribute *smmu_pmu_formats[] = { |
641 | &format_attr_event.attr, |
642 | &format_attr_filter_stream_id.attr, |
643 | &format_attr_filter_span.attr, |
644 | &format_attr_filter_enable.attr, |
645 | NULL |
646 | }; |
647 | |
648 | static const struct attribute_group smmu_pmu_format_group = { |
649 | .name = "format" , |
650 | .attrs = smmu_pmu_formats, |
651 | }; |
652 | |
653 | static const struct attribute_group *smmu_pmu_attr_grps[] = { |
654 | &smmu_pmu_cpumask_group, |
655 | &smmu_pmu_events_group, |
656 | &smmu_pmu_format_group, |
657 | &smmu_pmu_identifier_group, |
658 | NULL |
659 | }; |
660 | |
661 | /* |
662 | * Generic device handlers |
663 | */ |
664 | |
665 | static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) |
666 | { |
667 | struct smmu_pmu *smmu_pmu; |
668 | unsigned int target; |
669 | |
670 | smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node); |
671 | if (cpu != smmu_pmu->on_cpu) |
672 | return 0; |
673 | |
674 | target = cpumask_any_but(cpu_online_mask, cpu); |
675 | if (target >= nr_cpu_ids) |
676 | return 0; |
677 | |
678 | perf_pmu_migrate_context(pmu: &smmu_pmu->pmu, src_cpu: cpu, dst_cpu: target); |
679 | smmu_pmu->on_cpu = target; |
680 | WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target))); |
681 | |
682 | return 0; |
683 | } |
684 | |
685 | static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data) |
686 | { |
687 | struct smmu_pmu *smmu_pmu = data; |
688 | DECLARE_BITMAP(ovs, BITS_PER_TYPE(u64)); |
689 | u64 ovsr; |
690 | unsigned int idx; |
691 | |
692 | ovsr = readq(addr: smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0); |
693 | if (!ovsr) |
694 | return IRQ_NONE; |
695 | |
696 | writeq(val: ovsr, addr: smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); |
697 | |
698 | bitmap_from_u64(dst: ovs, mask: ovsr); |
699 | for_each_set_bit(idx, ovs, smmu_pmu->num_counters) { |
700 | struct perf_event *event = smmu_pmu->events[idx]; |
701 | struct hw_perf_event *hwc; |
702 | |
703 | if (WARN_ON_ONCE(!event)) |
704 | continue; |
705 | |
706 | smmu_pmu_event_update(event); |
707 | hwc = &event->hw; |
708 | |
709 | smmu_pmu_set_period(smmu_pmu, hwc); |
710 | } |
711 | |
712 | return IRQ_HANDLED; |
713 | } |
714 | |
715 | static void smmu_pmu_free_msis(void *data) |
716 | { |
717 | struct device *dev = data; |
718 | |
719 | platform_msi_domain_free_irqs(dev); |
720 | } |
721 | |
722 | static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) |
723 | { |
724 | phys_addr_t doorbell; |
725 | struct device *dev = msi_desc_to_dev(desc); |
726 | struct smmu_pmu *pmu = dev_get_drvdata(dev); |
727 | |
728 | doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; |
729 | doorbell &= MSI_CFG0_ADDR_MASK; |
730 | |
731 | writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); |
732 | writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1); |
733 | writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, |
734 | pmu->reg_base + SMMU_PMCG_IRQ_CFG2); |
735 | } |
736 | |
737 | static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) |
738 | { |
739 | struct device *dev = pmu->dev; |
740 | int ret; |
741 | |
742 | /* Clear MSI address reg */ |
743 | writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); |
744 | |
745 | /* MSI supported or not */ |
746 | if (!(readl(addr: pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) |
747 | return; |
748 | |
749 | ret = platform_msi_domain_alloc_irqs(dev, nvec: 1, write_msi_msg: smmu_pmu_write_msi_msg); |
750 | if (ret) { |
751 | dev_warn(dev, "failed to allocate MSIs\n" ); |
752 | return; |
753 | } |
754 | |
755 | pmu->irq = msi_get_virq(dev, index: 0); |
756 | |
757 | /* Add callback to free MSIs on teardown */ |
758 | devm_add_action(dev, smmu_pmu_free_msis, dev); |
759 | } |
760 | |
761 | static int smmu_pmu_setup_irq(struct smmu_pmu *pmu) |
762 | { |
763 | unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD; |
764 | int irq, ret = -ENXIO; |
765 | |
766 | smmu_pmu_setup_msi(pmu); |
767 | |
768 | irq = pmu->irq; |
769 | if (irq) |
770 | ret = devm_request_irq(dev: pmu->dev, irq, handler: smmu_pmu_handle_irq, |
771 | irqflags: flags, devname: "smmuv3-pmu" , dev_id: pmu); |
772 | return ret; |
773 | } |
774 | |
775 | static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu) |
776 | { |
777 | u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0); |
778 | |
779 | smmu_pmu_disable(pmu: &smmu_pmu->pmu); |
780 | |
781 | /* Disable counter and interrupt */ |
782 | writeq_relaxed(counter_present_mask, |
783 | smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); |
784 | writeq_relaxed(counter_present_mask, |
785 | smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); |
786 | writeq_relaxed(counter_present_mask, |
787 | smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); |
788 | } |
789 | |
790 | static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) |
791 | { |
792 | u32 model; |
793 | |
794 | model = *(u32 *)dev_get_platdata(dev: smmu_pmu->dev); |
795 | |
796 | switch (model) { |
797 | case IORT_SMMU_V3_PMCG_HISI_HIP08: |
798 | /* HiSilicon Erratum 162001800 */ |
799 | smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE; |
800 | break; |
801 | case IORT_SMMU_V3_PMCG_HISI_HIP09: |
802 | smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE; |
803 | break; |
804 | } |
805 | |
806 | dev_notice(smmu_pmu->dev, "option mask 0x%x\n" , smmu_pmu->options); |
807 | } |
808 | |
809 | static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu) |
810 | { |
811 | return of_device_is_compatible(device: smmu_pmu->dev->of_node, |
812 | "arm,mmu-600-pmcg" ); |
813 | } |
814 | |
815 | static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu) |
816 | { |
817 | u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR); |
818 | |
819 | if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) { |
820 | u32 pidr0 = readl(addr: smmu_pmu->reg_base + SMMU_PMCG_PIDR0); |
821 | u32 pidr1 = readl(addr: smmu_pmu->reg_base + SMMU_PMCG_PIDR1); |
822 | u32 pidr2 = readl(addr: smmu_pmu->reg_base + SMMU_PMCG_PIDR2); |
823 | u32 pidr3 = readl(addr: smmu_pmu->reg_base + SMMU_PMCG_PIDR3); |
824 | u32 pidr4 = readl(addr: smmu_pmu->reg_base + SMMU_PMCG_PIDR4); |
825 | |
826 | u32 productid = FIELD_GET(SMMU_PMCG_PIDR0_PART_0, pidr0) | |
827 | (FIELD_GET(SMMU_PMCG_PIDR1_PART_1, pidr1) << 8); |
828 | u32 variant = FIELD_GET(SMMU_PMCG_PIDR2_REVISION, pidr2); |
829 | u32 revision = FIELD_GET(SMMU_PMCG_PIDR3_REVAND, pidr3); |
830 | u32 implementer = |
831 | FIELD_GET(SMMU_PMCG_PIDR1_DES_0, pidr1) | |
832 | (FIELD_GET(SMMU_PMCG_PIDR2_DES_1, pidr2) << 4) | |
833 | (FIELD_GET(SMMU_PMCG_PIDR4_DES_2, pidr4) << 8); |
834 | |
835 | iidr = FIELD_PREP(SMMU_PMCG_IIDR_PRODUCTID, productid) | |
836 | FIELD_PREP(SMMU_PMCG_IIDR_VARIANT, variant) | |
837 | FIELD_PREP(SMMU_PMCG_IIDR_REVISION, revision) | |
838 | FIELD_PREP(SMMU_PMCG_IIDR_IMPLEMENTER, implementer); |
839 | } |
840 | |
841 | smmu_pmu->iidr = iidr; |
842 | } |
843 | |
844 | static int smmu_pmu_probe(struct platform_device *pdev) |
845 | { |
846 | struct smmu_pmu *smmu_pmu; |
847 | struct resource *res_0; |
848 | u32 cfgr, reg_size; |
849 | u64 ceid_64[2]; |
850 | int irq, err; |
851 | char *name; |
852 | struct device *dev = &pdev->dev; |
853 | |
854 | smmu_pmu = devm_kzalloc(dev, size: sizeof(*smmu_pmu), GFP_KERNEL); |
855 | if (!smmu_pmu) |
856 | return -ENOMEM; |
857 | |
858 | smmu_pmu->dev = dev; |
859 | platform_set_drvdata(pdev, data: smmu_pmu); |
860 | |
861 | smmu_pmu->pmu = (struct pmu) { |
862 | .module = THIS_MODULE, |
863 | .task_ctx_nr = perf_invalid_context, |
864 | .pmu_enable = smmu_pmu_enable, |
865 | .pmu_disable = smmu_pmu_disable, |
866 | .event_init = smmu_pmu_event_init, |
867 | .add = smmu_pmu_event_add, |
868 | .del = smmu_pmu_event_del, |
869 | .start = smmu_pmu_event_start, |
870 | .stop = smmu_pmu_event_stop, |
871 | .read = smmu_pmu_event_read, |
872 | .attr_groups = smmu_pmu_attr_grps, |
873 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
874 | }; |
875 | |
876 | smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &res_0); |
877 | if (IS_ERR(ptr: smmu_pmu->reg_base)) |
878 | return PTR_ERR(ptr: smmu_pmu->reg_base); |
879 | |
880 | cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR); |
881 | |
882 | /* Determine if page 1 is present */ |
883 | if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) { |
884 | smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, index: 1); |
885 | if (IS_ERR(ptr: smmu_pmu->reloc_base)) |
886 | return PTR_ERR(ptr: smmu_pmu->reloc_base); |
887 | } else { |
888 | smmu_pmu->reloc_base = smmu_pmu->reg_base; |
889 | } |
890 | |
891 | irq = platform_get_irq_optional(pdev, 0); |
892 | if (irq > 0) |
893 | smmu_pmu->irq = irq; |
894 | |
895 | ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0); |
896 | ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1); |
897 | bitmap_from_arr32(bitmap: smmu_pmu->supported_events, buf: (u32 *)ceid_64, |
898 | SMMU_PMCG_ARCH_MAX_EVENTS); |
899 | |
900 | smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1; |
901 | |
902 | smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE); |
903 | |
904 | reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr); |
905 | smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0); |
906 | |
907 | smmu_pmu_reset(smmu_pmu); |
908 | |
909 | err = smmu_pmu_setup_irq(pmu: smmu_pmu); |
910 | if (err) { |
911 | dev_err(dev, "Setup irq failed, PMU @%pa\n" , &res_0->start); |
912 | return err; |
913 | } |
914 | |
915 | smmu_pmu_get_iidr(smmu_pmu); |
916 | |
917 | name = devm_kasprintf(dev: &pdev->dev, GFP_KERNEL, fmt: "smmuv3_pmcg_%llx" , |
918 | (res_0->start) >> SMMU_PMCG_PA_SHIFT); |
919 | if (!name) { |
920 | dev_err(dev, "Create name failed, PMU @%pa\n" , &res_0->start); |
921 | return -EINVAL; |
922 | } |
923 | |
924 | if (!dev->of_node) |
925 | smmu_pmu_get_acpi_options(smmu_pmu); |
926 | |
927 | /* |
928 | * For platforms suffer this quirk, the PMU disable sometimes fails to |
929 | * stop the counters. This will leads to inaccurate or error counting. |
930 | * Forcibly disable the counters with these quirk handler. |
931 | */ |
932 | if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) { |
933 | smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09; |
934 | smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09; |
935 | } |
936 | |
937 | /* Pick one CPU to be the preferred one to use */ |
938 | smmu_pmu->on_cpu = raw_smp_processor_id(); |
939 | WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu))); |
940 | |
941 | err = cpuhp_state_add_instance_nocalls(state: cpuhp_state_num, |
942 | node: &smmu_pmu->node); |
943 | if (err) { |
944 | dev_err(dev, "Error %d registering hotplug, PMU @%pa\n" , |
945 | err, &res_0->start); |
946 | return err; |
947 | } |
948 | |
949 | err = perf_pmu_register(pmu: &smmu_pmu->pmu, name, type: -1); |
950 | if (err) { |
951 | dev_err(dev, "Error %d registering PMU @%pa\n" , |
952 | err, &res_0->start); |
953 | goto out_unregister; |
954 | } |
955 | |
956 | dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n" , |
957 | &res_0->start, smmu_pmu->num_counters, |
958 | smmu_pmu->global_filter ? "Global(Counter0)" : |
959 | "Individual" ); |
960 | |
961 | return 0; |
962 | |
963 | out_unregister: |
964 | cpuhp_state_remove_instance_nocalls(state: cpuhp_state_num, node: &smmu_pmu->node); |
965 | return err; |
966 | } |
967 | |
968 | static int smmu_pmu_remove(struct platform_device *pdev) |
969 | { |
970 | struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); |
971 | |
972 | perf_pmu_unregister(pmu: &smmu_pmu->pmu); |
973 | cpuhp_state_remove_instance_nocalls(state: cpuhp_state_num, node: &smmu_pmu->node); |
974 | |
975 | return 0; |
976 | } |
977 | |
978 | static void smmu_pmu_shutdown(struct platform_device *pdev) |
979 | { |
980 | struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); |
981 | |
982 | smmu_pmu_disable(pmu: &smmu_pmu->pmu); |
983 | } |
984 | |
985 | #ifdef CONFIG_OF |
986 | static const struct of_device_id smmu_pmu_of_match[] = { |
987 | { .compatible = "arm,smmu-v3-pmcg" }, |
988 | {} |
989 | }; |
990 | MODULE_DEVICE_TABLE(of, smmu_pmu_of_match); |
991 | #endif |
992 | |
993 | static struct platform_driver smmu_pmu_driver = { |
994 | .driver = { |
995 | .name = "arm-smmu-v3-pmcg" , |
996 | .of_match_table = of_match_ptr(smmu_pmu_of_match), |
997 | .suppress_bind_attrs = true, |
998 | }, |
999 | .probe = smmu_pmu_probe, |
1000 | .remove = smmu_pmu_remove, |
1001 | .shutdown = smmu_pmu_shutdown, |
1002 | }; |
1003 | |
1004 | static int __init arm_smmu_pmu_init(void) |
1005 | { |
1006 | int ret; |
1007 | |
1008 | cpuhp_state_num = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, |
1009 | name: "perf/arm/pmcg:online" , |
1010 | NULL, |
1011 | teardown: smmu_pmu_offline_cpu); |
1012 | if (cpuhp_state_num < 0) |
1013 | return cpuhp_state_num; |
1014 | |
1015 | ret = platform_driver_register(&smmu_pmu_driver); |
1016 | if (ret) |
1017 | cpuhp_remove_multi_state(state: cpuhp_state_num); |
1018 | |
1019 | return ret; |
1020 | } |
1021 | module_init(arm_smmu_pmu_init); |
1022 | |
1023 | static void __exit arm_smmu_pmu_exit(void) |
1024 | { |
1025 | platform_driver_unregister(&smmu_pmu_driver); |
1026 | cpuhp_remove_multi_state(state: cpuhp_state_num); |
1027 | } |
1028 | |
1029 | module_exit(arm_smmu_pmu_exit); |
1030 | |
1031 | MODULE_ALIAS("platform:arm-smmu-v3-pmcg" ); |
1032 | MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension" ); |
1033 | MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>" ); |
1034 | MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>" ); |
1035 | MODULE_LICENSE("GPL v2" ); |
1036 | |