1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Driver for FPGA Management Engine (FME) Global Performance Reporting |
4 | * |
5 | * Copyright 2019 Intel Corporation, Inc. |
6 | * |
7 | * Authors: |
8 | * Kang Luwei <luwei.kang@intel.com> |
9 | * Xiao Guangrong <guangrong.xiao@linux.intel.com> |
10 | * Wu Hao <hao.wu@intel.com> |
11 | * Xu Yilun <yilun.xu@intel.com> |
12 | * Joseph Grecco <joe.grecco@intel.com> |
13 | * Enno Luebbers <enno.luebbers@intel.com> |
14 | * Tim Whisonant <tim.whisonant@intel.com> |
15 | * Ananda Ravuri <ananda.ravuri@intel.com> |
16 | * Mitchel, Henry <henry.mitchel@intel.com> |
17 | */ |
18 | |
19 | #include <linux/perf_event.h> |
20 | #include "dfl.h" |
21 | #include "dfl-fme.h" |
22 | |
23 | /* |
24 | * Performance Counter Registers for Cache. |
25 | * |
26 | * Cache Events are listed below as CACHE_EVNT_*. |
27 | */ |
28 | #define CACHE_CTRL 0x8 |
29 | #define CACHE_RESET_CNTR BIT_ULL(0) |
30 | #define CACHE_FREEZE_CNTR BIT_ULL(8) |
31 | #define CACHE_CTRL_EVNT GENMASK_ULL(19, 16) |
32 | #define CACHE_EVNT_RD_HIT 0x0 |
33 | #define CACHE_EVNT_WR_HIT 0x1 |
34 | #define CACHE_EVNT_RD_MISS 0x2 |
35 | #define CACHE_EVNT_WR_MISS 0x3 |
36 | #define CACHE_EVNT_RSVD 0x4 |
37 | #define CACHE_EVNT_HOLD_REQ 0x5 |
38 | #define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6 |
39 | #define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7 |
40 | #define CACHE_EVNT_TX_REQ_STALL 0x8 |
41 | #define CACHE_EVNT_RX_REQ_STALL 0x9 |
42 | #define CACHE_EVNT_EVICTIONS 0xa |
43 | #define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS |
44 | #define CACHE_CHANNEL_SEL BIT_ULL(20) |
45 | #define CACHE_CHANNEL_RD 0 |
46 | #define CACHE_CHANNEL_WR 1 |
47 | #define CACHE_CNTR0 0x10 |
48 | #define CACHE_CNTR1 0x18 |
49 | #define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0) |
50 | #define CACHE_CNTR_EVNT GENMASK_ULL(63, 60) |
51 | |
52 | /* |
53 | * Performance Counter Registers for Fabric. |
54 | * |
55 | * Fabric Events are listed below as FAB_EVNT_* |
56 | */ |
57 | #define FAB_CTRL 0x20 |
58 | #define FAB_RESET_CNTR BIT_ULL(0) |
59 | #define FAB_FREEZE_CNTR BIT_ULL(8) |
60 | #define FAB_CTRL_EVNT GENMASK_ULL(19, 16) |
61 | #define FAB_EVNT_PCIE0_RD 0x0 |
62 | #define FAB_EVNT_PCIE0_WR 0x1 |
63 | #define FAB_EVNT_PCIE1_RD 0x2 |
64 | #define FAB_EVNT_PCIE1_WR 0x3 |
65 | #define FAB_EVNT_UPI_RD 0x4 |
66 | #define FAB_EVNT_UPI_WR 0x5 |
67 | #define FAB_EVNT_MMIO_RD 0x6 |
68 | #define FAB_EVNT_MMIO_WR 0x7 |
69 | #define FAB_EVNT_MAX FAB_EVNT_MMIO_WR |
70 | #define FAB_PORT_ID GENMASK_ULL(21, 20) |
71 | #define FAB_PORT_FILTER BIT_ULL(23) |
72 | #define FAB_PORT_FILTER_DISABLE 0 |
73 | #define FAB_PORT_FILTER_ENABLE 1 |
74 | #define FAB_CNTR 0x28 |
75 | #define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0) |
76 | #define FAB_CNTR_EVNT GENMASK_ULL(63, 60) |
77 | |
78 | /* |
79 | * Performance Counter Registers for Clock. |
80 | * |
81 | * Clock Counter can't be reset or frozen by SW. |
82 | */ |
83 | #define CLK_CNTR 0x30 |
84 | #define BASIC_EVNT_CLK 0x0 |
85 | #define BASIC_EVNT_MAX BASIC_EVNT_CLK |
86 | |
87 | /* |
88 | * Performance Counter Registers for IOMMU / VT-D. |
89 | * |
90 | * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_* |
91 | */ |
92 | #define VTD_CTRL 0x38 |
93 | #define VTD_RESET_CNTR BIT_ULL(0) |
94 | #define VTD_FREEZE_CNTR BIT_ULL(8) |
95 | #define VTD_CTRL_EVNT GENMASK_ULL(19, 16) |
96 | #define VTD_EVNT_AFU_MEM_RD_TRANS 0x0 |
97 | #define VTD_EVNT_AFU_MEM_WR_TRANS 0x1 |
98 | #define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2 |
99 | #define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3 |
100 | #define VTD_EVNT_DEVTLB_4K_FILL 0x4 |
101 | #define VTD_EVNT_DEVTLB_2M_FILL 0x5 |
102 | #define VTD_EVNT_DEVTLB_1G_FILL 0x6 |
103 | #define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL |
104 | #define VTD_CNTR 0x40 |
105 | #define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0) |
106 | #define VTD_CNTR_EVNT GENMASK_ULL(63, 60) |
107 | |
108 | #define VTD_SIP_CTRL 0x48 |
109 | #define VTD_SIP_RESET_CNTR BIT_ULL(0) |
110 | #define VTD_SIP_FREEZE_CNTR BIT_ULL(8) |
111 | #define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16) |
112 | #define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0 |
113 | #define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1 |
114 | #define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2 |
115 | #define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3 |
116 | #define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4 |
117 | #define VTD_SIP_EVNT_RCC_HIT 0x5 |
118 | #define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6 |
119 | #define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7 |
120 | #define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8 |
121 | #define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9 |
122 | #define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa |
123 | #define VTD_SIP_EVNT_RCC_MISS 0xb |
124 | #define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS |
125 | #define VTD_SIP_CNTR 0X50 |
126 | #define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0) |
127 | #define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60) |
128 | |
129 | #define PERF_TIMEOUT 30 |
130 | |
131 | #define PERF_MAX_PORT_NUM 1U |
132 | |
133 | /** |
134 | * struct fme_perf_priv - priv data structure for fme perf driver |
135 | * |
136 | * @dev: parent device. |
137 | * @ioaddr: mapped base address of mmio region. |
138 | * @pmu: pmu data structure for fme perf counters. |
139 | * @id: id of this fme performance report private feature. |
140 | * @fab_users: current user number on fabric counters. |
141 | * @fab_port_id: used to indicate current working mode of fabric counters. |
142 | * @fab_lock: lock to protect fabric counters working mode. |
143 | * @cpu: active CPU to which the PMU is bound for accesses. |
144 | * @node: node for CPU hotplug notifier link. |
145 | * @cpuhp_state: state for CPU hotplug notification; |
146 | */ |
147 | struct fme_perf_priv { |
148 | struct device *dev; |
149 | void __iomem *ioaddr; |
150 | struct pmu pmu; |
151 | u16 id; |
152 | |
153 | u32 fab_users; |
154 | u32 fab_port_id; |
155 | spinlock_t fab_lock; |
156 | |
157 | unsigned int cpu; |
158 | struct hlist_node node; |
159 | enum cpuhp_state cpuhp_state; |
160 | }; |
161 | |
162 | /** |
163 | * struct fme_perf_event_ops - callbacks for fme perf events |
164 | * |
165 | * @event_init: callback invoked during event init. |
166 | * @event_destroy: callback invoked during event destroy. |
167 | * @read_counter: callback to read hardware counters. |
168 | */ |
169 | struct fme_perf_event_ops { |
170 | int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid); |
171 | void (*event_destroy)(struct fme_perf_priv *priv, u32 event, |
172 | u32 portid); |
173 | u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid); |
174 | }; |
175 | |
176 | #define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu) |
177 | |
178 | static ssize_t cpumask_show(struct device *dev, |
179 | struct device_attribute *attr, char *buf) |
180 | { |
181 | struct pmu *pmu = dev_get_drvdata(dev); |
182 | struct fme_perf_priv *priv; |
183 | |
184 | priv = to_fme_perf_priv(pmu); |
185 | |
186 | return cpumap_print_to_pagebuf(list: true, buf, cpumask_of(priv->cpu)); |
187 | } |
188 | static DEVICE_ATTR_RO(cpumask); |
189 | |
190 | static struct attribute *fme_perf_cpumask_attrs[] = { |
191 | &dev_attr_cpumask.attr, |
192 | NULL, |
193 | }; |
194 | |
195 | static const struct attribute_group fme_perf_cpumask_group = { |
196 | .attrs = fme_perf_cpumask_attrs, |
197 | }; |
198 | |
199 | #define FME_EVENT_MASK GENMASK_ULL(11, 0) |
200 | #define FME_EVENT_SHIFT 0 |
201 | #define FME_EVTYPE_MASK GENMASK_ULL(15, 12) |
202 | #define FME_EVTYPE_SHIFT 12 |
203 | #define FME_EVTYPE_BASIC 0 |
204 | #define FME_EVTYPE_CACHE 1 |
205 | #define FME_EVTYPE_FABRIC 2 |
206 | #define FME_EVTYPE_VTD 3 |
207 | #define FME_EVTYPE_VTD_SIP 4 |
208 | #define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP |
209 | #define FME_PORTID_MASK GENMASK_ULL(23, 16) |
210 | #define FME_PORTID_SHIFT 16 |
211 | #define FME_PORTID_ROOT (0xffU) |
212 | |
213 | #define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config) |
214 | #define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config) |
215 | #define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config) |
216 | |
217 | PMU_FORMAT_ATTR(event, "config:0-11" ); |
218 | PMU_FORMAT_ATTR(evtype, "config:12-15" ); |
219 | PMU_FORMAT_ATTR(portid, "config:16-23" ); |
220 | |
221 | static struct attribute *fme_perf_format_attrs[] = { |
222 | &format_attr_event.attr, |
223 | &format_attr_evtype.attr, |
224 | &format_attr_portid.attr, |
225 | NULL, |
226 | }; |
227 | |
228 | static const struct attribute_group fme_perf_format_group = { |
229 | .name = "format" , |
230 | .attrs = fme_perf_format_attrs, |
231 | }; |
232 | |
233 | /* |
234 | * There are no default events, but we need to create |
235 | * "events" group (with empty attrs) before updating |
236 | * it with detected events (using pmu->attr_update). |
237 | */ |
238 | static struct attribute *fme_perf_events_attrs_empty[] = { |
239 | NULL, |
240 | }; |
241 | |
242 | static const struct attribute_group fme_perf_events_group = { |
243 | .name = "events" , |
244 | .attrs = fme_perf_events_attrs_empty, |
245 | }; |
246 | |
247 | static const struct attribute_group *fme_perf_groups[] = { |
248 | &fme_perf_format_group, |
249 | &fme_perf_cpumask_group, |
250 | &fme_perf_events_group, |
251 | NULL, |
252 | }; |
253 | |
254 | static bool is_portid_root(u32 portid) |
255 | { |
256 | return portid == FME_PORTID_ROOT; |
257 | } |
258 | |
259 | static bool is_portid_port(u32 portid) |
260 | { |
261 | return portid < PERF_MAX_PORT_NUM; |
262 | } |
263 | |
264 | static bool is_portid_root_or_port(u32 portid) |
265 | { |
266 | return is_portid_root(portid) || is_portid_port(portid); |
267 | } |
268 | |
269 | static u64 fme_read_perf_cntr_reg(void __iomem *addr) |
270 | { |
271 | u32 low; |
272 | u64 v; |
273 | |
274 | /* |
275 | * For 64bit counter registers, the counter may increases and carries |
276 | * out of bit [31] between 2 32bit reads. So add extra reads to help |
277 | * to prevent this issue. This only happens in platforms which don't |
278 | * support 64bit read - readq is split into 2 readl. |
279 | */ |
280 | do { |
281 | v = readq(addr); |
282 | low = readl(addr); |
283 | } while (((u32)v) > low); |
284 | |
285 | return v; |
286 | } |
287 | |
288 | static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) |
289 | { |
290 | if (event <= BASIC_EVNT_MAX && is_portid_root(portid)) |
291 | return 0; |
292 | |
293 | return -EINVAL; |
294 | } |
295 | |
296 | static u64 basic_read_event_counter(struct fme_perf_priv *priv, |
297 | u32 event, u32 portid) |
298 | { |
299 | void __iomem *base = priv->ioaddr; |
300 | |
301 | return fme_read_perf_cntr_reg(addr: base + CLK_CNTR); |
302 | } |
303 | |
304 | static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) |
305 | { |
306 | if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF && |
307 | event <= CACHE_EVNT_MAX && is_portid_root(portid)) |
308 | return 0; |
309 | |
310 | return -EINVAL; |
311 | } |
312 | |
313 | static u64 cache_read_event_counter(struct fme_perf_priv *priv, |
314 | u32 event, u32 portid) |
315 | { |
316 | void __iomem *base = priv->ioaddr; |
317 | u64 v, count; |
318 | u8 channel; |
319 | |
320 | if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS || |
321 | event == CACHE_EVNT_DATA_WR_PORT_CONTEN || |
322 | event == CACHE_EVNT_TAG_WR_PORT_CONTEN) |
323 | channel = CACHE_CHANNEL_WR; |
324 | else |
325 | channel = CACHE_CHANNEL_RD; |
326 | |
327 | /* set channel access type and cache event code. */ |
328 | v = readq(addr: base + CACHE_CTRL); |
329 | v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT); |
330 | v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel); |
331 | v |= FIELD_PREP(CACHE_CTRL_EVNT, event); |
332 | writeq(val: v, addr: base + CACHE_CTRL); |
333 | |
334 | if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v, |
335 | FIELD_GET(CACHE_CNTR_EVNT, v) == event, |
336 | 1, PERF_TIMEOUT)) { |
337 | dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n" ); |
338 | return 0; |
339 | } |
340 | |
341 | v = fme_read_perf_cntr_reg(addr: base + CACHE_CNTR0); |
342 | count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v); |
343 | v = fme_read_perf_cntr_reg(addr: base + CACHE_CNTR1); |
344 | count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v); |
345 | |
346 | return count; |
347 | } |
348 | |
349 | static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event, |
350 | u32 portid) |
351 | { |
352 | if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid)) |
353 | return false; |
354 | |
355 | if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF && |
356 | (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD || |
357 | event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR)) |
358 | return false; |
359 | |
360 | return true; |
361 | } |
362 | |
363 | static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) |
364 | { |
365 | void __iomem *base = priv->ioaddr; |
366 | int ret = 0; |
367 | u64 v; |
368 | |
369 | if (!is_fabric_event_supported(priv, event, portid)) |
370 | return -EINVAL; |
371 | |
372 | /* |
373 | * as fabric counter set only can be in either overall or port mode. |
374 | * In overall mode, it counts overall data for FPGA, and in port mode, |
375 | * it is configured to monitor on one individual port. |
376 | * |
377 | * so every time, a new event is initialized, driver checks |
378 | * current working mode and if someone is using this counter set. |
379 | */ |
380 | spin_lock(lock: &priv->fab_lock); |
381 | if (priv->fab_users && priv->fab_port_id != portid) { |
382 | dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n" ); |
383 | ret = -EOPNOTSUPP; |
384 | goto exit; |
385 | } |
386 | |
387 | priv->fab_users++; |
388 | |
389 | /* |
390 | * skip if current working mode matches, otherwise change the working |
391 | * mode per input port_id, to monitor overall data or another port. |
392 | */ |
393 | if (priv->fab_port_id == portid) |
394 | goto exit; |
395 | |
396 | priv->fab_port_id = portid; |
397 | |
398 | v = readq(addr: base + FAB_CTRL); |
399 | v &= ~(FAB_PORT_FILTER | FAB_PORT_ID); |
400 | |
401 | if (is_portid_root(portid)) { |
402 | v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE); |
403 | } else { |
404 | v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE); |
405 | v |= FIELD_PREP(FAB_PORT_ID, portid); |
406 | } |
407 | writeq(val: v, addr: base + FAB_CTRL); |
408 | |
409 | exit: |
410 | spin_unlock(lock: &priv->fab_lock); |
411 | return ret; |
412 | } |
413 | |
414 | static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event, |
415 | u32 portid) |
416 | { |
417 | spin_lock(lock: &priv->fab_lock); |
418 | priv->fab_users--; |
419 | spin_unlock(lock: &priv->fab_lock); |
420 | } |
421 | |
422 | static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event, |
423 | u32 portid) |
424 | { |
425 | void __iomem *base = priv->ioaddr; |
426 | u64 v; |
427 | |
428 | v = readq(addr: base + FAB_CTRL); |
429 | v &= ~FAB_CTRL_EVNT; |
430 | v |= FIELD_PREP(FAB_CTRL_EVNT, event); |
431 | writeq(val: v, addr: base + FAB_CTRL); |
432 | |
433 | if (readq_poll_timeout_atomic(base + FAB_CNTR, v, |
434 | FIELD_GET(FAB_CNTR_EVNT, v) == event, |
435 | 1, PERF_TIMEOUT)) { |
436 | dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n" ); |
437 | return 0; |
438 | } |
439 | |
440 | v = fme_read_perf_cntr_reg(addr: base + FAB_CNTR); |
441 | return FIELD_GET(FAB_CNTR_EVNT_CNTR, v); |
442 | } |
443 | |
444 | static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) |
445 | { |
446 | if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF && |
447 | event <= VTD_EVNT_MAX && is_portid_port(portid)) |
448 | return 0; |
449 | |
450 | return -EINVAL; |
451 | } |
452 | |
453 | static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event, |
454 | u32 portid) |
455 | { |
456 | void __iomem *base = priv->ioaddr; |
457 | u64 v; |
458 | |
459 | event += (portid * (VTD_EVNT_MAX + 1)); |
460 | |
461 | v = readq(addr: base + VTD_CTRL); |
462 | v &= ~VTD_CTRL_EVNT; |
463 | v |= FIELD_PREP(VTD_CTRL_EVNT, event); |
464 | writeq(val: v, addr: base + VTD_CTRL); |
465 | |
466 | if (readq_poll_timeout_atomic(base + VTD_CNTR, v, |
467 | FIELD_GET(VTD_CNTR_EVNT, v) == event, |
468 | 1, PERF_TIMEOUT)) { |
469 | dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n" ); |
470 | return 0; |
471 | } |
472 | |
473 | v = fme_read_perf_cntr_reg(addr: base + VTD_CNTR); |
474 | return FIELD_GET(VTD_CNTR_EVNT_CNTR, v); |
475 | } |
476 | |
477 | static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) |
478 | { |
479 | if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF && |
480 | event <= VTD_SIP_EVNT_MAX && is_portid_root(portid)) |
481 | return 0; |
482 | |
483 | return -EINVAL; |
484 | } |
485 | |
486 | static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event, |
487 | u32 portid) |
488 | { |
489 | void __iomem *base = priv->ioaddr; |
490 | u64 v; |
491 | |
492 | v = readq(addr: base + VTD_SIP_CTRL); |
493 | v &= ~VTD_SIP_CTRL_EVNT; |
494 | v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event); |
495 | writeq(val: v, addr: base + VTD_SIP_CTRL); |
496 | |
497 | if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v, |
498 | FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event, |
499 | 1, PERF_TIMEOUT)) { |
500 | dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n" ); |
501 | return 0; |
502 | } |
503 | |
504 | v = fme_read_perf_cntr_reg(addr: base + VTD_SIP_CNTR); |
505 | return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v); |
506 | } |
507 | |
508 | static struct fme_perf_event_ops fme_perf_event_ops[] = { |
509 | [FME_EVTYPE_BASIC] = {.event_init = basic_event_init, |
510 | .read_counter = basic_read_event_counter,}, |
511 | [FME_EVTYPE_CACHE] = {.event_init = cache_event_init, |
512 | .read_counter = cache_read_event_counter,}, |
513 | [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init, |
514 | .event_destroy = fabric_event_destroy, |
515 | .read_counter = fabric_read_event_counter,}, |
516 | [FME_EVTYPE_VTD] = {.event_init = vtd_event_init, |
517 | .read_counter = vtd_read_event_counter,}, |
518 | [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init, |
519 | .read_counter = vtd_sip_read_event_counter,}, |
520 | }; |
521 | |
522 | static ssize_t fme_perf_event_show(struct device *dev, |
523 | struct device_attribute *attr, char *buf) |
524 | { |
525 | struct dev_ext_attribute *eattr; |
526 | unsigned long config; |
527 | char *ptr = buf; |
528 | |
529 | eattr = container_of(attr, struct dev_ext_attribute, attr); |
530 | config = (unsigned long)eattr->var; |
531 | |
532 | ptr += sprintf(buf: ptr, fmt: "event=0x%02x" , (unsigned int)get_event(config)); |
533 | ptr += sprintf(buf: ptr, fmt: ",evtype=0x%02x" , (unsigned int)get_evtype(config)); |
534 | |
535 | if (is_portid_root(get_portid(config))) |
536 | ptr += sprintf(buf: ptr, fmt: ",portid=0x%02x\n" , FME_PORTID_ROOT); |
537 | else |
538 | ptr += sprintf(buf: ptr, fmt: ",portid=?\n" ); |
539 | |
540 | return (ssize_t)(ptr - buf); |
541 | } |
542 | |
543 | #define FME_EVENT_ATTR(_name) \ |
544 | __ATTR(_name, 0444, fme_perf_event_show, NULL) |
545 | |
546 | #define FME_PORT_EVENT_CONFIG(_event, _type) \ |
547 | (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \ |
548 | (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK)) |
549 | |
550 | #define FME_EVENT_CONFIG(_event, _type) \ |
551 | (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \ |
552 | (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \ |
553 | (FME_PORTID_ROOT << FME_PORTID_SHIFT)) |
554 | |
555 | /* FME Perf Basic Events */ |
556 | #define FME_EVENT_BASIC(_name, _event) \ |
557 | static struct dev_ext_attribute fme_perf_event_##_name = { \ |
558 | .attr = FME_EVENT_ATTR(_name), \ |
559 | .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \ |
560 | } |
561 | |
562 | FME_EVENT_BASIC(clock, BASIC_EVNT_CLK); |
563 | |
564 | static struct attribute *fme_perf_basic_events_attrs[] = { |
565 | &fme_perf_event_clock.attr.attr, |
566 | NULL, |
567 | }; |
568 | |
569 | static const struct attribute_group fme_perf_basic_events_group = { |
570 | .name = "events" , |
571 | .attrs = fme_perf_basic_events_attrs, |
572 | }; |
573 | |
574 | /* FME Perf Cache Events */ |
575 | #define FME_EVENT_CACHE(_name, _event) \ |
576 | static struct dev_ext_attribute fme_perf_event_cache_##_name = { \ |
577 | .attr = FME_EVENT_ATTR(cache_##_name), \ |
578 | .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \ |
579 | } |
580 | |
581 | FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT); |
582 | FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS); |
583 | FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT); |
584 | FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS); |
585 | FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ); |
586 | FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL); |
587 | FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL); |
588 | FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS); |
589 | FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN); |
590 | FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN); |
591 | |
592 | static struct attribute *fme_perf_cache_events_attrs[] = { |
593 | &fme_perf_event_cache_read_hit.attr.attr, |
594 | &fme_perf_event_cache_read_miss.attr.attr, |
595 | &fme_perf_event_cache_write_hit.attr.attr, |
596 | &fme_perf_event_cache_write_miss.attr.attr, |
597 | &fme_perf_event_cache_hold_request.attr.attr, |
598 | &fme_perf_event_cache_tx_req_stall.attr.attr, |
599 | &fme_perf_event_cache_rx_req_stall.attr.attr, |
600 | &fme_perf_event_cache_eviction.attr.attr, |
601 | &fme_perf_event_cache_data_write_port_contention.attr.attr, |
602 | &fme_perf_event_cache_tag_write_port_contention.attr.attr, |
603 | NULL, |
604 | }; |
605 | |
606 | static umode_t fme_perf_events_visible(struct kobject *kobj, |
607 | struct attribute *attr, int n) |
608 | { |
609 | struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); |
610 | struct fme_perf_priv *priv = to_fme_perf_priv(pmu); |
611 | |
612 | return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0; |
613 | } |
614 | |
615 | static const struct attribute_group fme_perf_cache_events_group = { |
616 | .name = "events" , |
617 | .attrs = fme_perf_cache_events_attrs, |
618 | .is_visible = fme_perf_events_visible, |
619 | }; |
620 | |
621 | /* FME Perf Fabric Events */ |
622 | #define FME_EVENT_FABRIC(_name, _event) \ |
623 | static struct dev_ext_attribute fme_perf_event_fab_##_name = { \ |
624 | .attr = FME_EVENT_ATTR(fab_##_name), \ |
625 | .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \ |
626 | } |
627 | |
628 | #define FME_EVENT_FABRIC_PORT(_name, _event) \ |
629 | static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \ |
630 | .attr = FME_EVENT_ATTR(fab_port_##_name), \ |
631 | .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \ |
632 | } |
633 | |
634 | FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD); |
635 | FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR); |
636 | FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD); |
637 | FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR); |
638 | FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD); |
639 | FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR); |
640 | FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD); |
641 | FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR); |
642 | |
643 | FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD); |
644 | FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR); |
645 | FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD); |
646 | FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR); |
647 | FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD); |
648 | FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR); |
649 | FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD); |
650 | FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR); |
651 | |
652 | static struct attribute *fme_perf_fabric_events_attrs[] = { |
653 | &fme_perf_event_fab_pcie0_read.attr.attr, |
654 | &fme_perf_event_fab_pcie0_write.attr.attr, |
655 | &fme_perf_event_fab_pcie1_read.attr.attr, |
656 | &fme_perf_event_fab_pcie1_write.attr.attr, |
657 | &fme_perf_event_fab_upi_read.attr.attr, |
658 | &fme_perf_event_fab_upi_write.attr.attr, |
659 | &fme_perf_event_fab_mmio_read.attr.attr, |
660 | &fme_perf_event_fab_mmio_write.attr.attr, |
661 | &fme_perf_event_fab_port_pcie0_read.attr.attr, |
662 | &fme_perf_event_fab_port_pcie0_write.attr.attr, |
663 | &fme_perf_event_fab_port_pcie1_read.attr.attr, |
664 | &fme_perf_event_fab_port_pcie1_write.attr.attr, |
665 | &fme_perf_event_fab_port_upi_read.attr.attr, |
666 | &fme_perf_event_fab_port_upi_write.attr.attr, |
667 | &fme_perf_event_fab_port_mmio_read.attr.attr, |
668 | &fme_perf_event_fab_port_mmio_write.attr.attr, |
669 | NULL, |
670 | }; |
671 | |
672 | static umode_t fme_perf_fabric_events_visible(struct kobject *kobj, |
673 | struct attribute *attr, int n) |
674 | { |
675 | struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); |
676 | struct fme_perf_priv *priv = to_fme_perf_priv(pmu); |
677 | struct dev_ext_attribute *eattr; |
678 | unsigned long var; |
679 | |
680 | eattr = container_of(attr, struct dev_ext_attribute, attr.attr); |
681 | var = (unsigned long)eattr->var; |
682 | |
683 | if (is_fabric_event_supported(priv, get_event(var), get_portid(var))) |
684 | return attr->mode; |
685 | |
686 | return 0; |
687 | } |
688 | |
689 | static const struct attribute_group fme_perf_fabric_events_group = { |
690 | .name = "events" , |
691 | .attrs = fme_perf_fabric_events_attrs, |
692 | .is_visible = fme_perf_fabric_events_visible, |
693 | }; |
694 | |
695 | /* FME Perf VTD Events */ |
696 | #define FME_EVENT_VTD_PORT(_name, _event) \ |
697 | static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \ |
698 | .attr = FME_EVENT_ATTR(vtd_port_##_name), \ |
699 | .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \ |
700 | } |
701 | |
702 | FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS); |
703 | FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS); |
704 | FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT); |
705 | FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT); |
706 | FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL); |
707 | FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL); |
708 | FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL); |
709 | |
710 | static struct attribute *fme_perf_vtd_events_attrs[] = { |
711 | &fme_perf_event_vtd_port_read_transaction.attr.attr, |
712 | &fme_perf_event_vtd_port_write_transaction.attr.attr, |
713 | &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr, |
714 | &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr, |
715 | &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr, |
716 | &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr, |
717 | &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr, |
718 | NULL, |
719 | }; |
720 | |
721 | static const struct attribute_group fme_perf_vtd_events_group = { |
722 | .name = "events" , |
723 | .attrs = fme_perf_vtd_events_attrs, |
724 | .is_visible = fme_perf_events_visible, |
725 | }; |
726 | |
727 | /* FME Perf VTD SIP Events */ |
728 | #define FME_EVENT_VTD_SIP(_name, _event) \ |
729 | static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \ |
730 | .attr = FME_EVENT_ATTR(vtd_sip_##_name), \ |
731 | .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \ |
732 | } |
733 | |
734 | FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT); |
735 | FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT); |
736 | FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT); |
737 | FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT); |
738 | FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT); |
739 | FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT); |
740 | FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS); |
741 | FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS); |
742 | FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS); |
743 | FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS); |
744 | FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS); |
745 | FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS); |
746 | |
747 | static struct attribute *fme_perf_vtd_sip_events_attrs[] = { |
748 | &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr, |
749 | &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr, |
750 | &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr, |
751 | &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr, |
752 | &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr, |
753 | &fme_perf_event_vtd_sip_rcc_hit.attr.attr, |
754 | &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr, |
755 | &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr, |
756 | &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr, |
757 | &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr, |
758 | &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr, |
759 | &fme_perf_event_vtd_sip_rcc_miss.attr.attr, |
760 | NULL, |
761 | }; |
762 | |
763 | static const struct attribute_group fme_perf_vtd_sip_events_group = { |
764 | .name = "events" , |
765 | .attrs = fme_perf_vtd_sip_events_attrs, |
766 | .is_visible = fme_perf_events_visible, |
767 | }; |
768 | |
769 | static const struct attribute_group *fme_perf_events_groups[] = { |
770 | &fme_perf_basic_events_group, |
771 | &fme_perf_cache_events_group, |
772 | &fme_perf_fabric_events_group, |
773 | &fme_perf_vtd_events_group, |
774 | &fme_perf_vtd_sip_events_group, |
775 | NULL, |
776 | }; |
777 | |
778 | static struct fme_perf_event_ops *get_event_ops(u32 evtype) |
779 | { |
780 | if (evtype > FME_EVTYPE_MAX) |
781 | return NULL; |
782 | |
783 | return &fme_perf_event_ops[evtype]; |
784 | } |
785 | |
786 | static void fme_perf_event_destroy(struct perf_event *event) |
787 | { |
788 | struct fme_perf_event_ops *ops = get_event_ops(evtype: event->hw.event_base); |
789 | struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); |
790 | |
791 | if (ops->event_destroy) |
792 | ops->event_destroy(priv, event->hw.idx, event->hw.config_base); |
793 | } |
794 | |
795 | static int fme_perf_event_init(struct perf_event *event) |
796 | { |
797 | struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); |
798 | struct hw_perf_event *hwc = &event->hw; |
799 | struct fme_perf_event_ops *ops; |
800 | u32 eventid, evtype, portid; |
801 | |
802 | /* test the event attr type check for PMU enumeration */ |
803 | if (event->attr.type != event->pmu->type) |
804 | return -ENOENT; |
805 | |
806 | /* |
807 | * fme counters are shared across all cores. |
808 | * Therefore, it does not support per-process mode. |
809 | * Also, it does not support event sampling mode. |
810 | */ |
811 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) |
812 | return -EINVAL; |
813 | |
814 | if (event->cpu < 0) |
815 | return -EINVAL; |
816 | |
817 | if (event->cpu != priv->cpu) |
818 | return -EINVAL; |
819 | |
820 | eventid = get_event(event->attr.config); |
821 | portid = get_portid(event->attr.config); |
822 | evtype = get_evtype(event->attr.config); |
823 | if (evtype > FME_EVTYPE_MAX) |
824 | return -EINVAL; |
825 | |
826 | hwc->event_base = evtype; |
827 | hwc->idx = (int)eventid; |
828 | hwc->config_base = portid; |
829 | |
830 | event->destroy = fme_perf_event_destroy; |
831 | |
832 | dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n" , |
833 | __func__, eventid, evtype, portid); |
834 | |
835 | ops = get_event_ops(evtype); |
836 | if (ops->event_init) |
837 | return ops->event_init(priv, eventid, portid); |
838 | |
839 | return 0; |
840 | } |
841 | |
842 | static void fme_perf_event_update(struct perf_event *event) |
843 | { |
844 | struct fme_perf_event_ops *ops = get_event_ops(evtype: event->hw.event_base); |
845 | struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); |
846 | struct hw_perf_event *hwc = &event->hw; |
847 | u64 now, prev, delta; |
848 | |
849 | now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base); |
850 | prev = local64_read(&hwc->prev_count); |
851 | delta = now - prev; |
852 | |
853 | local64_add(delta, &event->count); |
854 | } |
855 | |
856 | static void fme_perf_event_start(struct perf_event *event, int flags) |
857 | { |
858 | struct fme_perf_event_ops *ops = get_event_ops(evtype: event->hw.event_base); |
859 | struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); |
860 | struct hw_perf_event *hwc = &event->hw; |
861 | u64 count; |
862 | |
863 | count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base); |
864 | local64_set(&hwc->prev_count, count); |
865 | } |
866 | |
867 | static void fme_perf_event_stop(struct perf_event *event, int flags) |
868 | { |
869 | fme_perf_event_update(event); |
870 | } |
871 | |
872 | static int fme_perf_event_add(struct perf_event *event, int flags) |
873 | { |
874 | if (flags & PERF_EF_START) |
875 | fme_perf_event_start(event, flags); |
876 | |
877 | return 0; |
878 | } |
879 | |
880 | static void fme_perf_event_del(struct perf_event *event, int flags) |
881 | { |
882 | fme_perf_event_stop(event, PERF_EF_UPDATE); |
883 | } |
884 | |
885 | static void fme_perf_event_read(struct perf_event *event) |
886 | { |
887 | fme_perf_event_update(event); |
888 | } |
889 | |
890 | static void fme_perf_setup_hardware(struct fme_perf_priv *priv) |
891 | { |
892 | void __iomem *base = priv->ioaddr; |
893 | u64 v; |
894 | |
895 | /* read and save current working mode for fabric counters */ |
896 | v = readq(addr: base + FAB_CTRL); |
897 | |
898 | if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE) |
899 | priv->fab_port_id = FME_PORTID_ROOT; |
900 | else |
901 | priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v); |
902 | } |
903 | |
904 | static int fme_perf_pmu_register(struct platform_device *pdev, |
905 | struct fme_perf_priv *priv) |
906 | { |
907 | struct pmu *pmu = &priv->pmu; |
908 | char *name; |
909 | int ret; |
910 | |
911 | spin_lock_init(&priv->fab_lock); |
912 | |
913 | fme_perf_setup_hardware(priv); |
914 | |
915 | pmu->task_ctx_nr = perf_invalid_context; |
916 | pmu->attr_groups = fme_perf_groups; |
917 | pmu->attr_update = fme_perf_events_groups; |
918 | pmu->event_init = fme_perf_event_init; |
919 | pmu->add = fme_perf_event_add; |
920 | pmu->del = fme_perf_event_del; |
921 | pmu->start = fme_perf_event_start; |
922 | pmu->stop = fme_perf_event_stop; |
923 | pmu->read = fme_perf_event_read; |
924 | pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT | |
925 | PERF_PMU_CAP_NO_EXCLUDE; |
926 | |
927 | name = devm_kasprintf(dev: priv->dev, GFP_KERNEL, fmt: "dfl_fme%d" , pdev->id); |
928 | |
929 | ret = perf_pmu_register(pmu, name, type: -1); |
930 | if (ret) |
931 | return ret; |
932 | |
933 | return 0; |
934 | } |
935 | |
936 | static void fme_perf_pmu_unregister(struct fme_perf_priv *priv) |
937 | { |
938 | perf_pmu_unregister(pmu: &priv->pmu); |
939 | } |
940 | |
941 | static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) |
942 | { |
943 | struct fme_perf_priv *priv; |
944 | int target; |
945 | |
946 | priv = hlist_entry_safe(node, struct fme_perf_priv, node); |
947 | |
948 | if (cpu != priv->cpu) |
949 | return 0; |
950 | |
951 | target = cpumask_any_but(cpu_online_mask, cpu); |
952 | if (target >= nr_cpu_ids) |
953 | return 0; |
954 | |
955 | priv->cpu = target; |
956 | perf_pmu_migrate_context(pmu: &priv->pmu, src_cpu: cpu, dst_cpu: target); |
957 | |
958 | return 0; |
959 | } |
960 | |
961 | static int fme_perf_init(struct platform_device *pdev, |
962 | struct dfl_feature *feature) |
963 | { |
964 | struct fme_perf_priv *priv; |
965 | int ret; |
966 | |
967 | priv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*priv), GFP_KERNEL); |
968 | if (!priv) |
969 | return -ENOMEM; |
970 | |
971 | priv->dev = &pdev->dev; |
972 | priv->ioaddr = feature->ioaddr; |
973 | priv->id = feature->id; |
974 | priv->cpu = raw_smp_processor_id(); |
975 | |
976 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, |
977 | name: "perf/fpga/dfl_fme:online" , |
978 | NULL, teardown: fme_perf_offline_cpu); |
979 | if (ret < 0) |
980 | return ret; |
981 | |
982 | priv->cpuhp_state = ret; |
983 | |
984 | /* Register the pmu instance for cpu hotplug */ |
985 | ret = cpuhp_state_add_instance_nocalls(state: priv->cpuhp_state, node: &priv->node); |
986 | if (ret) |
987 | goto cpuhp_instance_err; |
988 | |
989 | ret = fme_perf_pmu_register(pdev, priv); |
990 | if (ret) |
991 | goto pmu_register_err; |
992 | |
993 | feature->priv = priv; |
994 | return 0; |
995 | |
996 | pmu_register_err: |
997 | cpuhp_state_remove_instance_nocalls(state: priv->cpuhp_state, node: &priv->node); |
998 | cpuhp_instance_err: |
999 | cpuhp_remove_multi_state(state: priv->cpuhp_state); |
1000 | return ret; |
1001 | } |
1002 | |
1003 | static void fme_perf_uinit(struct platform_device *pdev, |
1004 | struct dfl_feature *feature) |
1005 | { |
1006 | struct fme_perf_priv *priv = feature->priv; |
1007 | |
1008 | fme_perf_pmu_unregister(priv); |
1009 | cpuhp_state_remove_instance_nocalls(state: priv->cpuhp_state, node: &priv->node); |
1010 | cpuhp_remove_multi_state(state: priv->cpuhp_state); |
1011 | } |
1012 | |
1013 | const struct dfl_feature_id fme_perf_id_table[] = { |
1014 | {.id = FME_FEATURE_ID_GLOBAL_IPERF,}, |
1015 | {.id = FME_FEATURE_ID_GLOBAL_DPERF,}, |
1016 | {0,} |
1017 | }; |
1018 | |
1019 | const struct dfl_feature_ops fme_perf_ops = { |
1020 | .init = fme_perf_init, |
1021 | .uinit = fme_perf_uinit, |
1022 | }; |
1023 | |