1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * This driver adds support for HNS3 PMU iEP device. Related perf events are |
4 | * bandwidth, latency, packet rate, interrupt rate etc. |
5 | * |
6 | * Copyright (C) 2022 HiSilicon Limited |
7 | */ |
8 | #include <linux/bitfield.h> |
9 | #include <linux/bitmap.h> |
10 | #include <linux/bug.h> |
11 | #include <linux/cpuhotplug.h> |
12 | #include <linux/cpumask.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/device.h> |
15 | #include <linux/err.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/iopoll.h> |
18 | #include <linux/io-64-nonatomic-hi-lo.h> |
19 | #include <linux/irq.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/list.h> |
22 | #include <linux/module.h> |
23 | #include <linux/pci.h> |
24 | #include <linux/pci-epf.h> |
25 | #include <linux/perf_event.h> |
26 | #include <linux/smp.h> |
27 | |
28 | /* registers offset address */ |
29 | #define HNS3_PMU_REG_GLOBAL_CTRL 0x0000 |
30 | #define HNS3_PMU_REG_CLOCK_FREQ 0x0020 |
31 | #define HNS3_PMU_REG_BDF 0x0fe0 |
32 | #define HNS3_PMU_REG_VERSION 0x0fe4 |
33 | #define HNS3_PMU_REG_DEVICE_ID 0x0fe8 |
34 | |
35 | #define HNS3_PMU_REG_EVENT_OFFSET 0x1000 |
36 | #define HNS3_PMU_REG_EVENT_SIZE 0x1000 |
37 | #define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00 |
38 | #define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04 |
39 | #define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08 |
40 | #define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c |
41 | #define HNS3_PMU_REG_EVENT_COUNTER 0x10 |
42 | #define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18 |
43 | #define HNS3_PMU_REG_EVENT_QID_CTRL 0x28 |
44 | #define HNS3_PMU_REG_EVENT_QID_PARA 0x2c |
45 | |
46 | #define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0) |
47 | #define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1) |
48 | #define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2) |
49 | #define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3) |
50 | #define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4) |
51 | #define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5) |
52 | |
53 | #define HNS3_PMU_FILTER_ALL_TC 0xf |
54 | #define HNS3_PMU_FILTER_ALL_QUEUE 0xffff |
55 | |
56 | #define HNS3_PMU_CTRL_SUBEVENT_S 4 |
57 | #define HNS3_PMU_CTRL_FILTER_MODE_S 24 |
58 | |
59 | #define HNS3_PMU_GLOBAL_START BIT(0) |
60 | |
61 | #define HNS3_PMU_EVENT_STATUS_RESET BIT(11) |
62 | #define HNS3_PMU_EVENT_EN BIT(12) |
63 | #define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15) |
64 | |
65 | #define HNS3_PMU_QID_PARA_FUNC_S 0 |
66 | #define HNS3_PMU_QID_PARA_QUEUE_S 16 |
67 | |
68 | #define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0) |
69 | #define HNS3_PMU_QID_CTRL_DONE BIT(1) |
70 | #define HNS3_PMU_QID_CTRL_MISS BIT(2) |
71 | |
72 | #define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1) |
73 | |
74 | #define HNS3_PMU_MAX_HW_EVENTS 8 |
75 | |
76 | /* |
77 | * Each hardware event contains two registers (counter and ext_counter) for |
78 | * bandwidth, packet rate, latency and interrupt rate. These two registers will |
79 | * be triggered to run at the same when a hardware event is enabled. The meaning |
80 | * of counter and ext_counter of different event type are different, their |
81 | * meaning show as follow: |
82 | * |
83 | * +----------------+------------------+---------------+ |
84 | * | event type | counter | ext_counter | |
85 | * +----------------+------------------+---------------+ |
86 | * | bandwidth | byte number | cycle number | |
87 | * +----------------+------------------+---------------+ |
88 | * | packet rate | packet number | cycle number | |
89 | * +----------------+------------------+---------------+ |
90 | * | latency | cycle number | packet number | |
91 | * +----------------+------------------+---------------+ |
92 | * | interrupt rate | interrupt number | cycle number | |
93 | * +----------------+------------------+---------------+ |
94 | * |
95 | * The cycle number indicates increment of counter of hardware timer, the |
96 | * frequency of hardware timer can be read from hw_clk_freq file. |
97 | * |
98 | * Performance of each hardware event is calculated by: counter / ext_counter. |
99 | * |
100 | * Since processing of data is preferred to be done in userspace, we expose |
101 | * ext_counter as a separate event for userspace and use bit 16 to indicate it. |
102 | * For example, event 0x00001 and 0x10001 are actually one event for hardware |
103 | * because bit 0-15 are same. If the bit 16 of one event is 0 means to read |
104 | * counter register, otherwise means to read ext_counter register. |
105 | */ |
106 | /* bandwidth events */ |
107 | #define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001 |
108 | #define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001 |
109 | #define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002 |
110 | #define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002 |
111 | #define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003 |
112 | #define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003 |
113 | #define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004 |
114 | #define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004 |
115 | #define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005 |
116 | #define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005 |
117 | #define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006 |
118 | #define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006 |
119 | #define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008 |
120 | #define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008 |
121 | #define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009 |
122 | #define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009 |
123 | #define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a |
124 | #define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a |
125 | #define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b |
126 | #define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b |
127 | #define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c |
128 | #define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c |
129 | #define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d |
130 | #define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d |
131 | #define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e |
132 | #define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e |
133 | #define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f |
134 | #define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f |
135 | #define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010 |
136 | #define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010 |
137 | |
138 | /* packet rate events */ |
139 | #define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100 |
140 | #define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100 |
141 | #define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101 |
142 | #define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101 |
143 | #define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102 |
144 | #define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102 |
145 | #define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103 |
146 | #define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103 |
147 | #define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104 |
148 | #define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104 |
149 | #define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105 |
150 | #define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105 |
151 | #define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106 |
152 | #define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106 |
153 | #define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107 |
154 | #define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107 |
155 | #define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108 |
156 | #define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108 |
157 | #define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109 |
158 | #define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109 |
159 | #define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a |
160 | #define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a |
161 | #define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b |
162 | #define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b |
163 | #define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c |
164 | #define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c |
165 | #define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d |
166 | #define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d |
167 | #define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e |
168 | #define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e |
169 | #define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f |
170 | #define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f |
171 | #define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110 |
172 | #define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110 |
173 | #define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111 |
174 | #define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111 |
175 | #define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112 |
176 | #define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112 |
177 | |
178 | /* latency events */ |
179 | #define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202 |
180 | #define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202 |
181 | #define HNS3_PMU_EVT_DLY_TX_TIME 0x00204 |
182 | #define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204 |
183 | #define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206 |
184 | #define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206 |
185 | #define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207 |
186 | #define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207 |
187 | #define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208 |
188 | #define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208 |
189 | #define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209 |
190 | #define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209 |
191 | #define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e |
192 | #define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e |
193 | #define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f |
194 | #define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f |
195 | #define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210 |
196 | #define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210 |
197 | #define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211 |
198 | #define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211 |
199 | #define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212 |
200 | #define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212 |
201 | #define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213 |
202 | #define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213 |
203 | #define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214 |
204 | #define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214 |
205 | #define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215 |
206 | #define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215 |
207 | #define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216 |
208 | #define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216 |
209 | #define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217 |
210 | #define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217 |
211 | #define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218 |
212 | #define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218 |
213 | #define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219 |
214 | #define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219 |
215 | #define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a |
216 | #define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a |
217 | #define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c |
218 | #define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c |
219 | |
220 | /* interrupt rate events */ |
221 | #define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300 |
222 | #define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300 |
223 | |
224 | /* filter mode supported by each bandwidth event */ |
225 | #define HNS3_PMU_FILTER_BW_SSU_EGU 0x07 |
226 | #define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f |
227 | #define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f |
228 | #define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f |
229 | #define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f |
230 | #define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11 |
231 | #define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11 |
232 | #define HNS3_PMU_FILTER_BW_WR_FBD 0x1b |
233 | #define HNS3_PMU_FILTER_BW_WR_EBD 0x11 |
234 | #define HNS3_PMU_FILTER_BW_RD_FBD 0x01 |
235 | #define HNS3_PMU_FILTER_BW_RD_EBD 0x1b |
236 | #define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01 |
237 | #define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01 |
238 | #define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01 |
239 | #define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01 |
240 | |
241 | /* filter mode supported by each packet rate event */ |
242 | #define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07 |
243 | #define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07 |
244 | #define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f |
245 | #define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f |
246 | #define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f |
247 | #define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f |
248 | #define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11 |
249 | #define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f |
250 | #define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11 |
251 | #define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b |
252 | #define HNS3_PMU_FILTER_PPS_WR_EBD 0x11 |
253 | #define HNS3_PMU_FILTER_PPS_RD_FBD 0x01 |
254 | #define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b |
255 | #define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01 |
256 | #define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01 |
257 | #define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01 |
258 | #define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01 |
259 | #define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01 |
260 | #define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01 |
261 | |
262 | /* filter mode supported by each latency event */ |
263 | #define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01 |
264 | #define HNS3_PMU_FILTER_DLY_TX 0x01 |
265 | #define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07 |
266 | #define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07 |
267 | #define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07 |
268 | #define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07 |
269 | #define HNS3_PMU_FILTER_DLY_RPU 0x11 |
270 | #define HNS3_PMU_FILTER_DLY_TPU 0x1f |
271 | #define HNS3_PMU_FILTER_DLY_RPE 0x01 |
272 | #define HNS3_PMU_FILTER_DLY_TPE 0x0b |
273 | #define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b |
274 | #define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b |
275 | #define HNS3_PMU_FILTER_DLY_WR_EBD 0x11 |
276 | #define HNS3_PMU_FILTER_DLY_RD_FBD 0x01 |
277 | #define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b |
278 | #define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01 |
279 | #define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01 |
280 | #define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01 |
281 | #define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01 |
282 | #define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01 |
283 | |
284 | /* filter mode supported by each interrupt rate event */ |
285 | #define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01 |
286 | |
287 | enum hns3_pmu_hw_filter_mode { |
288 | HNS3_PMU_HW_FILTER_GLOBAL, |
289 | HNS3_PMU_HW_FILTER_PORT, |
290 | HNS3_PMU_HW_FILTER_PORT_TC, |
291 | HNS3_PMU_HW_FILTER_FUNC, |
292 | HNS3_PMU_HW_FILTER_FUNC_QUEUE, |
293 | HNS3_PMU_HW_FILTER_FUNC_INTR, |
294 | }; |
295 | |
296 | struct hns3_pmu_event_attr { |
297 | u32 event; |
298 | u16 filter_support; |
299 | }; |
300 | |
301 | struct hns3_pmu { |
302 | struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS]; |
303 | struct hlist_node node; |
304 | struct pci_dev *pdev; |
305 | struct pmu pmu; |
306 | void __iomem *base; |
307 | int irq; |
308 | int on_cpu; |
309 | u32 identifier; |
310 | u32 hw_clk_freq; /* hardware clock frequency of PMU */ |
311 | /* maximum and minimum bdf allowed by PMU */ |
312 | u16 bdf_min; |
313 | u16 bdf_max; |
314 | }; |
315 | |
316 | #define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu)) |
317 | |
318 | #define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) |
319 | |
320 | #define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff) |
321 | #define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07)) |
322 | #define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func)) |
323 | |
324 | #define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \ |
325 | static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \ |
326 | { \ |
327 | return FIELD_GET(GENMASK_ULL(_end, _start), \ |
328 | event->attr._config); \ |
329 | } |
330 | |
331 | HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7); |
332 | HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15); |
333 | HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16); |
334 | HNS3_PMU_FILTER_ATTR(port, config1, 0, 3); |
335 | HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7); |
336 | HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23); |
337 | HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39); |
338 | HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51); |
339 | HNS3_PMU_FILTER_ATTR(global, config1, 52, 52); |
340 | |
341 | #define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\ |
342 | HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \ |
343 | HNS3_PMU_FILTER_BW_##_name}) |
344 | #define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ |
345 | HNS3_PMU_EVT_BW_##_name##_TIME, \ |
346 | HNS3_PMU_FILTER_BW_##_name}) |
347 | #define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ |
348 | HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \ |
349 | HNS3_PMU_FILTER_PPS_##_name}) |
350 | #define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ |
351 | HNS3_PMU_EVT_PPS_##_name##_TIME, \ |
352 | HNS3_PMU_FILTER_PPS_##_name}) |
353 | #define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ |
354 | HNS3_PMU_EVT_DLY_##_name##_TIME, \ |
355 | HNS3_PMU_FILTER_DLY_##_name}) |
356 | #define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ |
357 | HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \ |
358 | HNS3_PMU_FILTER_DLY_##_name}) |
359 | #define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\ |
360 | HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \ |
361 | HNS3_PMU_FILTER_INTR_##_name}) |
362 | #define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ |
363 | HNS3_PMU_EVT_PPS_##_name##_TIME, \ |
364 | HNS3_PMU_FILTER_INTR_##_name}) |
365 | |
366 | static ssize_t hns3_pmu_format_show(struct device *dev, |
367 | struct device_attribute *attr, char *buf) |
368 | { |
369 | struct dev_ext_attribute *eattr; |
370 | |
371 | eattr = container_of(attr, struct dev_ext_attribute, attr); |
372 | |
373 | return sysfs_emit(buf, fmt: "%s\n" , (char *)eattr->var); |
374 | } |
375 | |
376 | static ssize_t hns3_pmu_event_show(struct device *dev, |
377 | struct device_attribute *attr, char *buf) |
378 | { |
379 | struct hns3_pmu_event_attr *event; |
380 | struct dev_ext_attribute *eattr; |
381 | |
382 | eattr = container_of(attr, struct dev_ext_attribute, attr); |
383 | event = eattr->var; |
384 | |
385 | return sysfs_emit(buf, fmt: "config=0x%x\n" , event->event); |
386 | } |
387 | |
388 | static ssize_t hns3_pmu_filter_mode_show(struct device *dev, |
389 | struct device_attribute *attr, |
390 | char *buf) |
391 | { |
392 | struct hns3_pmu_event_attr *event; |
393 | struct dev_ext_attribute *eattr; |
394 | int len; |
395 | |
396 | eattr = container_of(attr, struct dev_ext_attribute, attr); |
397 | event = eattr->var; |
398 | |
399 | len = sysfs_emit_at(buf, at: 0, fmt: "filter mode supported: " ); |
400 | if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL) |
401 | len += sysfs_emit_at(buf, at: len, fmt: "global " ); |
402 | if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT) |
403 | len += sysfs_emit_at(buf, at: len, fmt: "port " ); |
404 | if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC) |
405 | len += sysfs_emit_at(buf, at: len, fmt: "port-tc " ); |
406 | if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC) |
407 | len += sysfs_emit_at(buf, at: len, fmt: "func " ); |
408 | if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE) |
409 | len += sysfs_emit_at(buf, at: len, fmt: "func-queue " ); |
410 | if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR) |
411 | len += sysfs_emit_at(buf, at: len, fmt: "func-intr " ); |
412 | |
413 | len += sysfs_emit_at(buf, at: len, fmt: "\n" ); |
414 | |
415 | return len; |
416 | } |
417 | |
418 | #define HNS3_PMU_ATTR(_name, _func, _config) \ |
419 | (&((struct dev_ext_attribute[]) { \ |
420 | { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ |
421 | })[0].attr.attr) |
422 | |
423 | #define HNS3_PMU_FORMAT_ATTR(_name, _format) \ |
424 | HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format) |
425 | #define HNS3_PMU_EVENT_ATTR(_name, _event) \ |
426 | HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event) |
427 | #define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \ |
428 | HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event) |
429 | |
430 | #define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \ |
431 | HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ |
432 | HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) |
433 | #define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \ |
434 | HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ |
435 | HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) |
436 | #define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \ |
437 | HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ |
438 | HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) |
439 | #define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \ |
440 | HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ |
441 | HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) |
442 | |
443 | #define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \ |
444 | HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ |
445 | HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) |
446 | #define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \ |
447 | HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ |
448 | HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) |
449 | #define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \ |
450 | HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ |
451 | HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) |
452 | #define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \ |
453 | HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ |
454 | HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) |
455 | |
456 | static u8 hns3_pmu_hw_filter_modes[] = { |
457 | HNS3_PMU_HW_FILTER_GLOBAL, |
458 | HNS3_PMU_HW_FILTER_PORT, |
459 | HNS3_PMU_HW_FILTER_PORT_TC, |
460 | HNS3_PMU_HW_FILTER_FUNC, |
461 | HNS3_PMU_HW_FILTER_FUNC_QUEUE, |
462 | HNS3_PMU_HW_FILTER_FUNC_INTR, |
463 | }; |
464 | |
465 | #define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \ |
466 | ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)]) |
467 | |
468 | static ssize_t identifier_show(struct device *dev, |
469 | struct device_attribute *attr, char *buf) |
470 | { |
471 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); |
472 | |
473 | return sysfs_emit(buf, fmt: "0x%x\n" , hns3_pmu->identifier); |
474 | } |
475 | static DEVICE_ATTR_RO(identifier); |
476 | |
477 | static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, |
478 | char *buf) |
479 | { |
480 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); |
481 | |
482 | return sysfs_emit(buf, fmt: "%d\n" , hns3_pmu->on_cpu); |
483 | } |
484 | static DEVICE_ATTR_RO(cpumask); |
485 | |
486 | static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, |
487 | char *buf) |
488 | { |
489 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); |
490 | u16 bdf = hns3_pmu->bdf_min; |
491 | |
492 | return sysfs_emit(buf, fmt: "%02x:%02x.%x\n" , PCI_BUS_NUM(bdf), |
493 | PCI_SLOT(bdf), PCI_FUNC(bdf)); |
494 | } |
495 | static DEVICE_ATTR_RO(bdf_min); |
496 | |
497 | static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, |
498 | char *buf) |
499 | { |
500 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); |
501 | u16 bdf = hns3_pmu->bdf_max; |
502 | |
503 | return sysfs_emit(buf, fmt: "%02x:%02x.%x\n" , PCI_BUS_NUM(bdf), |
504 | PCI_SLOT(bdf), PCI_FUNC(bdf)); |
505 | } |
506 | static DEVICE_ATTR_RO(bdf_max); |
507 | |
508 | static ssize_t hw_clk_freq_show(struct device *dev, |
509 | struct device_attribute *attr, char *buf) |
510 | { |
511 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); |
512 | |
513 | return sysfs_emit(buf, fmt: "%u\n" , hns3_pmu->hw_clk_freq); |
514 | } |
515 | static DEVICE_ATTR_RO(hw_clk_freq); |
516 | |
517 | static struct attribute *hns3_pmu_events_attr[] = { |
518 | /* bandwidth events */ |
519 | HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU), |
520 | HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU), |
521 | HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE), |
522 | HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU), |
523 | HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU), |
524 | HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX), |
525 | HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), |
526 | HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD), |
527 | HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD), |
528 | HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD), |
529 | HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD), |
530 | HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0), |
531 | HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1), |
532 | HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0), |
533 | HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1), |
534 | |
535 | /* packet rate events */ |
536 | HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU), |
537 | HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU), |
538 | HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU), |
539 | HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE), |
540 | HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU), |
541 | HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU), |
542 | HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX), |
543 | HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU), |
544 | HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), |
545 | HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD), |
546 | HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD), |
547 | HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD), |
548 | HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD), |
549 | HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0), |
550 | HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1), |
551 | HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0), |
552 | HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1), |
553 | HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), |
554 | HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), |
555 | |
556 | /* latency events */ |
557 | HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH), |
558 | HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX), |
559 | HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), |
560 | HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), |
561 | HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), |
562 | HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), |
563 | HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU), |
564 | HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU), |
565 | HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE), |
566 | HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE), |
567 | HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH), |
568 | HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD), |
569 | HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD), |
570 | HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD), |
571 | HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD), |
572 | HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0), |
573 | HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1), |
574 | HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0), |
575 | HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1), |
576 | HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE), |
577 | |
578 | /* interrupt rate events */ |
579 | HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC), |
580 | |
581 | NULL |
582 | }; |
583 | |
584 | static struct attribute *hns3_pmu_filter_mode_attr[] = { |
585 | /* bandwidth events */ |
586 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU), |
587 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU), |
588 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE), |
589 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU), |
590 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU), |
591 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX), |
592 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), |
593 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD), |
594 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD), |
595 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD), |
596 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD), |
597 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0), |
598 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1), |
599 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0), |
600 | HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1), |
601 | |
602 | /* packet rate events */ |
603 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU), |
604 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU), |
605 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU), |
606 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE), |
607 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU), |
608 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU), |
609 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX), |
610 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU), |
611 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), |
612 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD), |
613 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD), |
614 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD), |
615 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD), |
616 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0), |
617 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1), |
618 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0), |
619 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1), |
620 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), |
621 | HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), |
622 | |
623 | /* latency events */ |
624 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH), |
625 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX), |
626 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), |
627 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), |
628 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), |
629 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), |
630 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU), |
631 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU), |
632 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE), |
633 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE), |
634 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH), |
635 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD), |
636 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD), |
637 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD), |
638 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD), |
639 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0), |
640 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1), |
641 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0), |
642 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1), |
643 | HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE), |
644 | |
645 | /* interrupt rate events */ |
646 | HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC), |
647 | |
648 | NULL |
649 | }; |
650 | |
651 | static struct attribute_group hns3_pmu_events_group = { |
652 | .name = "events" , |
653 | .attrs = hns3_pmu_events_attr, |
654 | }; |
655 | |
656 | static struct attribute_group hns3_pmu_filter_mode_group = { |
657 | .name = "filtermode" , |
658 | .attrs = hns3_pmu_filter_mode_attr, |
659 | }; |
660 | |
661 | static struct attribute *hns3_pmu_format_attr[] = { |
662 | HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7" ), |
663 | HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15" ), |
664 | HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16" ), |
665 | HNS3_PMU_FORMAT_ATTR(port, "config1:0-3" ), |
666 | HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7" ), |
667 | HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23" ), |
668 | HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39" ), |
669 | HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51" ), |
670 | HNS3_PMU_FORMAT_ATTR(global, "config1:52" ), |
671 | NULL |
672 | }; |
673 | |
674 | static struct attribute_group hns3_pmu_format_group = { |
675 | .name = "format" , |
676 | .attrs = hns3_pmu_format_attr, |
677 | }; |
678 | |
679 | static struct attribute *hns3_pmu_cpumask_attrs[] = { |
680 | &dev_attr_cpumask.attr, |
681 | NULL |
682 | }; |
683 | |
684 | static struct attribute_group hns3_pmu_cpumask_attr_group = { |
685 | .attrs = hns3_pmu_cpumask_attrs, |
686 | }; |
687 | |
688 | static struct attribute *hns3_pmu_identifier_attrs[] = { |
689 | &dev_attr_identifier.attr, |
690 | NULL |
691 | }; |
692 | |
693 | static struct attribute_group hns3_pmu_identifier_attr_group = { |
694 | .attrs = hns3_pmu_identifier_attrs, |
695 | }; |
696 | |
697 | static struct attribute *hns3_pmu_bdf_range_attrs[] = { |
698 | &dev_attr_bdf_min.attr, |
699 | &dev_attr_bdf_max.attr, |
700 | NULL |
701 | }; |
702 | |
703 | static struct attribute_group hns3_pmu_bdf_range_attr_group = { |
704 | .attrs = hns3_pmu_bdf_range_attrs, |
705 | }; |
706 | |
707 | static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = { |
708 | &dev_attr_hw_clk_freq.attr, |
709 | NULL |
710 | }; |
711 | |
712 | static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = { |
713 | .attrs = hns3_pmu_hw_clk_freq_attrs, |
714 | }; |
715 | |
716 | static const struct attribute_group *hns3_pmu_attr_groups[] = { |
717 | &hns3_pmu_events_group, |
718 | &hns3_pmu_filter_mode_group, |
719 | &hns3_pmu_format_group, |
720 | &hns3_pmu_cpumask_attr_group, |
721 | &hns3_pmu_identifier_attr_group, |
722 | &hns3_pmu_bdf_range_attr_group, |
723 | &hns3_pmu_hw_clk_freq_attr_group, |
724 | NULL |
725 | }; |
726 | |
727 | static u32 hns3_pmu_get_event(struct perf_event *event) |
728 | { |
729 | return hns3_pmu_get_ext_counter_used(event) << 16 | |
730 | hns3_pmu_get_event_type(event) << 8 | |
731 | hns3_pmu_get_subevent(event); |
732 | } |
733 | |
734 | static u32 hns3_pmu_get_real_event(struct perf_event *event) |
735 | { |
736 | return hns3_pmu_get_event_type(event) << 8 | |
737 | hns3_pmu_get_subevent(event); |
738 | } |
739 | |
740 | static u32 hns3_pmu_get_offset(u32 offset, u32 idx) |
741 | { |
742 | return offset + HNS3_PMU_REG_EVENT_OFFSET + |
743 | HNS3_PMU_REG_EVENT_SIZE * idx; |
744 | } |
745 | |
746 | static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) |
747 | { |
748 | u32 offset = hns3_pmu_get_offset(offset: reg_offset, idx); |
749 | |
750 | return readl(addr: hns3_pmu->base + offset); |
751 | } |
752 | |
753 | static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, |
754 | u32 val) |
755 | { |
756 | u32 offset = hns3_pmu_get_offset(offset: reg_offset, idx); |
757 | |
758 | writel(val, addr: hns3_pmu->base + offset); |
759 | } |
760 | |
761 | static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) |
762 | { |
763 | u32 offset = hns3_pmu_get_offset(offset: reg_offset, idx); |
764 | |
765 | return readq(addr: hns3_pmu->base + offset); |
766 | } |
767 | |
768 | static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, |
769 | u64 val) |
770 | { |
771 | u32 offset = hns3_pmu_get_offset(offset: reg_offset, idx); |
772 | |
773 | writeq(val, addr: hns3_pmu->base + offset); |
774 | } |
775 | |
776 | static bool hns3_pmu_cmp_event(struct perf_event *target, |
777 | struct perf_event *event) |
778 | { |
779 | return hns3_pmu_get_real_event(event: target) == hns3_pmu_get_real_event(event); |
780 | } |
781 | |
782 | static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu, |
783 | struct perf_event *event) |
784 | { |
785 | struct perf_event *sibling; |
786 | int hw_event_used = 0; |
787 | int idx; |
788 | |
789 | for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { |
790 | sibling = hns3_pmu->hw_events[idx]; |
791 | if (!sibling) |
792 | continue; |
793 | |
794 | hw_event_used++; |
795 | |
796 | if (!hns3_pmu_cmp_event(target: sibling, event)) |
797 | continue; |
798 | |
799 | /* Related events is used in group */ |
800 | if (sibling->group_leader == event->group_leader) |
801 | return idx; |
802 | } |
803 | |
804 | /* No related event and all hardware events are used up */ |
805 | if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS) |
806 | return -EBUSY; |
807 | |
808 | /* No related event and there is extra hardware events can be use */ |
809 | return -ENOENT; |
810 | } |
811 | |
812 | static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu) |
813 | { |
814 | int idx; |
815 | |
816 | for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { |
817 | if (!hns3_pmu->hw_events[idx]) |
818 | return idx; |
819 | } |
820 | |
821 | return -EBUSY; |
822 | } |
823 | |
824 | static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf) |
825 | { |
826 | struct pci_dev *pdev; |
827 | |
828 | if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) { |
829 | pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n" , bdf); |
830 | return false; |
831 | } |
832 | |
833 | pdev = pci_get_domain_bus_and_slot(domain: pci_domain_nr(bus: hns3_pmu->pdev->bus), |
834 | PCI_BUS_NUM(bdf), |
835 | GET_PCI_DEVFN(bdf)); |
836 | if (!pdev) { |
837 | pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n" , bdf); |
838 | return false; |
839 | } |
840 | |
841 | pci_dev_put(dev: pdev); |
842 | return true; |
843 | } |
844 | |
845 | static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, |
846 | u16 queue) |
847 | { |
848 | u32 val; |
849 | |
850 | val = GET_PCI_DEVFN(bdf); |
851 | val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S; |
852 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val); |
853 | } |
854 | |
855 | static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx) |
856 | { |
857 | bool queue_id_valid = false; |
858 | u32 reg_qid_ctrl, val; |
859 | int err; |
860 | |
861 | /* enable queue id request */ |
862 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, |
863 | HNS3_PMU_QID_CTRL_REQ_ENABLE); |
864 | |
865 | reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx); |
866 | err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val, |
867 | val & HNS3_PMU_QID_CTRL_DONE, 1, 1000); |
868 | if (err == -ETIMEDOUT) { |
869 | pci_err(hns3_pmu->pdev, "QID request timeout!\n" ); |
870 | goto out; |
871 | } |
872 | |
873 | queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS); |
874 | |
875 | out: |
876 | /* disable qid request and clear status */ |
877 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, val: 0); |
878 | |
879 | return queue_id_valid; |
880 | } |
881 | |
882 | static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, |
883 | u16 queue) |
884 | { |
885 | hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue); |
886 | |
887 | return hns3_pmu_qid_req_start(hns3_pmu, idx); |
888 | } |
889 | |
890 | static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event) |
891 | { |
892 | struct hns3_pmu_event_attr *pmu_event; |
893 | struct dev_ext_attribute *eattr; |
894 | struct device_attribute *dattr; |
895 | struct attribute *attr; |
896 | u32 i; |
897 | |
898 | for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) { |
899 | attr = hns3_pmu_events_attr[i]; |
900 | dattr = container_of(attr, struct device_attribute, attr); |
901 | eattr = container_of(dattr, struct dev_ext_attribute, attr); |
902 | pmu_event = eattr->var; |
903 | |
904 | if (event == pmu_event->event) |
905 | return pmu_event; |
906 | } |
907 | |
908 | return NULL; |
909 | } |
910 | |
911 | static int hns3_pmu_set_func_mode(struct perf_event *event, |
912 | struct hns3_pmu *hns3_pmu) |
913 | { |
914 | struct hw_perf_event *hwc = &event->hw; |
915 | u16 bdf = hns3_pmu_get_bdf(event); |
916 | |
917 | if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) |
918 | return -ENOENT; |
919 | |
920 | HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC); |
921 | |
922 | return 0; |
923 | } |
924 | |
925 | static int hns3_pmu_set_func_queue_mode(struct perf_event *event, |
926 | struct hns3_pmu *hns3_pmu) |
927 | { |
928 | u16 queue_id = hns3_pmu_get_queue(event); |
929 | struct hw_perf_event *hwc = &event->hw; |
930 | u16 bdf = hns3_pmu_get_bdf(event); |
931 | |
932 | if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) |
933 | return -ENOENT; |
934 | |
935 | if (!hns3_pmu_valid_queue(hns3_pmu, idx: hwc->idx, bdf, queue: queue_id)) { |
936 | pci_err(hns3_pmu->pdev, "Invalid queue: %u\n" , queue_id); |
937 | return -ENOENT; |
938 | } |
939 | |
940 | HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE); |
941 | |
942 | return 0; |
943 | } |
944 | |
945 | static bool |
946 | hns3_pmu_is_enabled_global_mode(struct perf_event *event, |
947 | struct hns3_pmu_event_attr *pmu_event) |
948 | { |
949 | u8 global = hns3_pmu_get_global(event); |
950 | |
951 | if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)) |
952 | return false; |
953 | |
954 | return global; |
955 | } |
956 | |
957 | static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event, |
958 | struct hns3_pmu_event_attr *pmu_event) |
959 | { |
960 | u16 queue_id = hns3_pmu_get_queue(event); |
961 | u16 bdf = hns3_pmu_get_bdf(event); |
962 | |
963 | if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)) |
964 | return false; |
965 | else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE) |
966 | return false; |
967 | |
968 | return bdf; |
969 | } |
970 | |
971 | static bool |
972 | hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event, |
973 | struct hns3_pmu_event_attr *pmu_event) |
974 | { |
975 | u16 queue_id = hns3_pmu_get_queue(event); |
976 | u16 bdf = hns3_pmu_get_bdf(event); |
977 | |
978 | if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)) |
979 | return false; |
980 | else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE) |
981 | return false; |
982 | |
983 | return bdf; |
984 | } |
985 | |
986 | static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event, |
987 | struct hns3_pmu_event_attr *pmu_event) |
988 | { |
989 | u8 tc_id = hns3_pmu_get_tc(event); |
990 | |
991 | if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)) |
992 | return false; |
993 | |
994 | return tc_id == HNS3_PMU_FILTER_ALL_TC; |
995 | } |
996 | |
997 | static bool |
998 | hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event, |
999 | struct hns3_pmu_event_attr *pmu_event) |
1000 | { |
1001 | u8 tc_id = hns3_pmu_get_tc(event); |
1002 | |
1003 | if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)) |
1004 | return false; |
1005 | |
1006 | return tc_id != HNS3_PMU_FILTER_ALL_TC; |
1007 | } |
1008 | |
1009 | static bool |
1010 | hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event, |
1011 | struct hns3_pmu *hns3_pmu, |
1012 | struct hns3_pmu_event_attr *pmu_event) |
1013 | { |
1014 | u16 bdf = hns3_pmu_get_bdf(event); |
1015 | |
1016 | if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)) |
1017 | return false; |
1018 | |
1019 | return hns3_pmu_valid_bdf(hns3_pmu, bdf); |
1020 | } |
1021 | |
1022 | static int hns3_pmu_select_filter_mode(struct perf_event *event, |
1023 | struct hns3_pmu *hns3_pmu) |
1024 | { |
1025 | u32 event_id = hns3_pmu_get_event(event); |
1026 | struct hw_perf_event *hwc = &event->hw; |
1027 | struct hns3_pmu_event_attr *pmu_event; |
1028 | |
1029 | pmu_event = hns3_pmu_get_pmu_event(event: event_id); |
1030 | if (!pmu_event) { |
1031 | pci_err(hns3_pmu->pdev, "Invalid pmu event\n" ); |
1032 | return -ENOENT; |
1033 | } |
1034 | |
1035 | if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) { |
1036 | HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL); |
1037 | return 0; |
1038 | } |
1039 | |
1040 | if (hns3_pmu_is_enabled_func_mode(event, pmu_event)) |
1041 | return hns3_pmu_set_func_mode(event, hns3_pmu); |
1042 | |
1043 | if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event)) |
1044 | return hns3_pmu_set_func_queue_mode(event, hns3_pmu); |
1045 | |
1046 | if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) { |
1047 | HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT); |
1048 | return 0; |
1049 | } |
1050 | |
1051 | if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) { |
1052 | HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC); |
1053 | return 0; |
1054 | } |
1055 | |
1056 | if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) { |
1057 | HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR); |
1058 | return 0; |
1059 | } |
1060 | |
1061 | return -ENOENT; |
1062 | } |
1063 | |
1064 | static bool hns3_pmu_validate_event_group(struct perf_event *event) |
1065 | { |
1066 | struct perf_event *sibling, *leader = event->group_leader; |
1067 | struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS]; |
1068 | int counters = 1; |
1069 | int num; |
1070 | |
1071 | event_group[0] = leader; |
1072 | if (!is_software_event(event: leader)) { |
1073 | if (leader->pmu != event->pmu) |
1074 | return false; |
1075 | |
1076 | if (leader != event && !hns3_pmu_cmp_event(target: leader, event)) |
1077 | event_group[counters++] = event; |
1078 | } |
1079 | |
1080 | for_each_sibling_event(sibling, event->group_leader) { |
1081 | if (is_software_event(event: sibling)) |
1082 | continue; |
1083 | |
1084 | if (sibling->pmu != event->pmu) |
1085 | return false; |
1086 | |
1087 | for (num = 0; num < counters; num++) { |
1088 | if (hns3_pmu_cmp_event(target: event_group[num], event: sibling)) |
1089 | break; |
1090 | } |
1091 | |
1092 | if (num == counters) |
1093 | event_group[counters++] = sibling; |
1094 | } |
1095 | |
1096 | return counters <= HNS3_PMU_MAX_HW_EVENTS; |
1097 | } |
1098 | |
1099 | static u32 hns3_pmu_get_filter_condition(struct perf_event *event) |
1100 | { |
1101 | struct hw_perf_event *hwc = &event->hw; |
1102 | u16 intr_id = hns3_pmu_get_intr(event); |
1103 | u8 port_id = hns3_pmu_get_port(event); |
1104 | u16 bdf = hns3_pmu_get_bdf(event); |
1105 | u8 tc_id = hns3_pmu_get_tc(event); |
1106 | u8 filter_mode; |
1107 | |
1108 | filter_mode = *(u8 *)hwc->addr_filters; |
1109 | switch (filter_mode) { |
1110 | case HNS3_PMU_HW_FILTER_PORT: |
1111 | return FILTER_CONDITION_PORT(port_id); |
1112 | case HNS3_PMU_HW_FILTER_PORT_TC: |
1113 | return FILTER_CONDITION_PORT_TC(port_id, tc_id); |
1114 | case HNS3_PMU_HW_FILTER_FUNC: |
1115 | case HNS3_PMU_HW_FILTER_FUNC_QUEUE: |
1116 | return GET_PCI_DEVFN(bdf); |
1117 | case HNS3_PMU_HW_FILTER_FUNC_INTR: |
1118 | return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id); |
1119 | default: |
1120 | break; |
1121 | } |
1122 | |
1123 | return 0; |
1124 | } |
1125 | |
1126 | static void hns3_pmu_config_filter(struct perf_event *event) |
1127 | { |
1128 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1129 | u8 event_type = hns3_pmu_get_event_type(event); |
1130 | u8 subevent_id = hns3_pmu_get_subevent(event); |
1131 | u16 queue_id = hns3_pmu_get_queue(event); |
1132 | struct hw_perf_event *hwc = &event->hw; |
1133 | u8 filter_mode = *(u8 *)hwc->addr_filters; |
1134 | u16 bdf = hns3_pmu_get_bdf(event); |
1135 | u32 idx = hwc->idx; |
1136 | u32 val; |
1137 | |
1138 | val = event_type; |
1139 | val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S; |
1140 | val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S; |
1141 | val |= HNS3_PMU_EVENT_OVERFLOW_RESTART; |
1142 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); |
1143 | |
1144 | val = hns3_pmu_get_filter_condition(event); |
1145 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val); |
1146 | |
1147 | if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE) |
1148 | hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue: queue_id); |
1149 | } |
1150 | |
1151 | static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu, |
1152 | struct hw_perf_event *hwc) |
1153 | { |
1154 | u32 idx = hwc->idx; |
1155 | u32 val; |
1156 | |
1157 | val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); |
1158 | val |= HNS3_PMU_EVENT_EN; |
1159 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); |
1160 | } |
1161 | |
1162 | static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu, |
1163 | struct hw_perf_event *hwc) |
1164 | { |
1165 | u32 idx = hwc->idx; |
1166 | u32 val; |
1167 | |
1168 | val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); |
1169 | val &= ~HNS3_PMU_EVENT_EN; |
1170 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); |
1171 | } |
1172 | |
1173 | static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu, |
1174 | struct hw_perf_event *hwc) |
1175 | { |
1176 | u32 idx = hwc->idx; |
1177 | u32 val; |
1178 | |
1179 | val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); |
1180 | val &= ~HNS3_PMU_INTR_MASK_OVERFLOW; |
1181 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); |
1182 | } |
1183 | |
1184 | static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu, |
1185 | struct hw_perf_event *hwc) |
1186 | { |
1187 | u32 idx = hwc->idx; |
1188 | u32 val; |
1189 | |
1190 | val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); |
1191 | val |= HNS3_PMU_INTR_MASK_OVERFLOW; |
1192 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); |
1193 | } |
1194 | |
1195 | static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx) |
1196 | { |
1197 | u32 val; |
1198 | |
1199 | val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); |
1200 | val |= HNS3_PMU_EVENT_STATUS_RESET; |
1201 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); |
1202 | |
1203 | val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); |
1204 | val &= ~HNS3_PMU_EVENT_STATUS_RESET; |
1205 | hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); |
1206 | } |
1207 | |
1208 | static u64 hns3_pmu_read_counter(struct perf_event *event) |
1209 | { |
1210 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1211 | |
1212 | return hns3_pmu_readq(hns3_pmu, reg_offset: event->hw.event_base, idx: event->hw.idx); |
1213 | } |
1214 | |
1215 | static void hns3_pmu_write_counter(struct perf_event *event, u64 value) |
1216 | { |
1217 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1218 | u32 idx = event->hw.idx; |
1219 | |
1220 | hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, val: value); |
1221 | hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, val: value); |
1222 | } |
1223 | |
1224 | static void hns3_pmu_init_counter(struct perf_event *event) |
1225 | { |
1226 | struct hw_perf_event *hwc = &event->hw; |
1227 | |
1228 | local64_set(&hwc->prev_count, 0); |
1229 | hns3_pmu_write_counter(event, value: 0); |
1230 | } |
1231 | |
1232 | static int hns3_pmu_event_init(struct perf_event *event) |
1233 | { |
1234 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1235 | struct hw_perf_event *hwc = &event->hw; |
1236 | int idx; |
1237 | int ret; |
1238 | |
1239 | if (event->attr.type != event->pmu->type) |
1240 | return -ENOENT; |
1241 | |
1242 | /* Sampling is not supported */ |
1243 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) |
1244 | return -EOPNOTSUPP; |
1245 | |
1246 | event->cpu = hns3_pmu->on_cpu; |
1247 | |
1248 | idx = hns3_pmu_get_event_idx(hns3_pmu); |
1249 | if (idx < 0) { |
1250 | pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n" , |
1251 | HNS3_PMU_MAX_HW_EVENTS); |
1252 | return -EBUSY; |
1253 | } |
1254 | |
1255 | hwc->idx = idx; |
1256 | |
1257 | ret = hns3_pmu_select_filter_mode(event, hns3_pmu); |
1258 | if (ret) { |
1259 | pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n" , ret); |
1260 | return ret; |
1261 | } |
1262 | |
1263 | if (!hns3_pmu_validate_event_group(event)) { |
1264 | pci_err(hns3_pmu->pdev, "Invalid event group.\n" ); |
1265 | return -EINVAL; |
1266 | } |
1267 | |
1268 | if (hns3_pmu_get_ext_counter_used(event)) |
1269 | hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER; |
1270 | else |
1271 | hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER; |
1272 | |
1273 | return 0; |
1274 | } |
1275 | |
1276 | static void hns3_pmu_read(struct perf_event *event) |
1277 | { |
1278 | struct hw_perf_event *hwc = &event->hw; |
1279 | u64 new_cnt, prev_cnt, delta; |
1280 | |
1281 | do { |
1282 | prev_cnt = local64_read(&hwc->prev_count); |
1283 | new_cnt = hns3_pmu_read_counter(event); |
1284 | } while (local64_cmpxchg(l: &hwc->prev_count, old: prev_cnt, new: new_cnt) != |
1285 | prev_cnt); |
1286 | |
1287 | delta = new_cnt - prev_cnt; |
1288 | local64_add(delta, &event->count); |
1289 | } |
1290 | |
1291 | static void hns3_pmu_start(struct perf_event *event, int flags) |
1292 | { |
1293 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1294 | struct hw_perf_event *hwc = &event->hw; |
1295 | |
1296 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) |
1297 | return; |
1298 | |
1299 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
1300 | hwc->state = 0; |
1301 | |
1302 | hns3_pmu_config_filter(event); |
1303 | hns3_pmu_init_counter(event); |
1304 | hns3_pmu_enable_intr(hns3_pmu, hwc); |
1305 | hns3_pmu_enable_counter(hns3_pmu, hwc); |
1306 | |
1307 | perf_event_update_userpage(event); |
1308 | } |
1309 | |
1310 | static void hns3_pmu_stop(struct perf_event *event, int flags) |
1311 | { |
1312 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1313 | struct hw_perf_event *hwc = &event->hw; |
1314 | |
1315 | hns3_pmu_disable_counter(hns3_pmu, hwc); |
1316 | hns3_pmu_disable_intr(hns3_pmu, hwc); |
1317 | |
1318 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); |
1319 | hwc->state |= PERF_HES_STOPPED; |
1320 | |
1321 | if (hwc->state & PERF_HES_UPTODATE) |
1322 | return; |
1323 | |
1324 | /* Read hardware counter and update the perf counter statistics */ |
1325 | hns3_pmu_read(event); |
1326 | hwc->state |= PERF_HES_UPTODATE; |
1327 | } |
1328 | |
1329 | static int hns3_pmu_add(struct perf_event *event, int flags) |
1330 | { |
1331 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1332 | struct hw_perf_event *hwc = &event->hw; |
1333 | int idx; |
1334 | |
1335 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
1336 | |
1337 | /* Check all working events to find a related event. */ |
1338 | idx = hns3_pmu_find_related_event_idx(hns3_pmu, event); |
1339 | if (idx < 0 && idx != -ENOENT) |
1340 | return idx; |
1341 | |
1342 | /* Current event shares an enabled hardware event with related event */ |
1343 | if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) { |
1344 | hwc->idx = idx; |
1345 | goto start_count; |
1346 | } |
1347 | |
1348 | idx = hns3_pmu_get_event_idx(hns3_pmu); |
1349 | if (idx < 0) |
1350 | return idx; |
1351 | |
1352 | hwc->idx = idx; |
1353 | hns3_pmu->hw_events[idx] = event; |
1354 | |
1355 | start_count: |
1356 | if (flags & PERF_EF_START) |
1357 | hns3_pmu_start(event, PERF_EF_RELOAD); |
1358 | |
1359 | return 0; |
1360 | } |
1361 | |
1362 | static void hns3_pmu_del(struct perf_event *event, int flags) |
1363 | { |
1364 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); |
1365 | struct hw_perf_event *hwc = &event->hw; |
1366 | |
1367 | hns3_pmu_stop(event, PERF_EF_UPDATE); |
1368 | hns3_pmu->hw_events[hwc->idx] = NULL; |
1369 | perf_event_update_userpage(event); |
1370 | } |
1371 | |
1372 | static void hns3_pmu_enable(struct pmu *pmu) |
1373 | { |
1374 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); |
1375 | u32 val; |
1376 | |
1377 | val = readl(addr: hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); |
1378 | val |= HNS3_PMU_GLOBAL_START; |
1379 | writel(val, addr: hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); |
1380 | } |
1381 | |
1382 | static void hns3_pmu_disable(struct pmu *pmu) |
1383 | { |
1384 | struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); |
1385 | u32 val; |
1386 | |
1387 | val = readl(addr: hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); |
1388 | val &= ~HNS3_PMU_GLOBAL_START; |
1389 | writel(val, addr: hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); |
1390 | } |
1391 | |
1392 | static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) |
1393 | { |
1394 | u16 device_id; |
1395 | char *name; |
1396 | u32 val; |
1397 | |
1398 | hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2]; |
1399 | if (!hns3_pmu->base) { |
1400 | pci_err(pdev, "ioremap failed\n" ); |
1401 | return -ENOMEM; |
1402 | } |
1403 | |
1404 | hns3_pmu->hw_clk_freq = readl(addr: hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ); |
1405 | |
1406 | val = readl(addr: hns3_pmu->base + HNS3_PMU_REG_BDF); |
1407 | hns3_pmu->bdf_min = val & 0xffff; |
1408 | hns3_pmu->bdf_max = val >> 16; |
1409 | |
1410 | val = readl(addr: hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID); |
1411 | device_id = val & 0xffff; |
1412 | name = devm_kasprintf(dev: &pdev->dev, GFP_KERNEL, fmt: "hns3_pmu_sicl_%u" , device_id); |
1413 | if (!name) |
1414 | return -ENOMEM; |
1415 | |
1416 | hns3_pmu->pdev = pdev; |
1417 | hns3_pmu->on_cpu = -1; |
1418 | hns3_pmu->identifier = readl(addr: hns3_pmu->base + HNS3_PMU_REG_VERSION); |
1419 | hns3_pmu->pmu = (struct pmu) { |
1420 | .name = name, |
1421 | .module = THIS_MODULE, |
1422 | .event_init = hns3_pmu_event_init, |
1423 | .pmu_enable = hns3_pmu_enable, |
1424 | .pmu_disable = hns3_pmu_disable, |
1425 | .add = hns3_pmu_add, |
1426 | .del = hns3_pmu_del, |
1427 | .start = hns3_pmu_start, |
1428 | .stop = hns3_pmu_stop, |
1429 | .read = hns3_pmu_read, |
1430 | .task_ctx_nr = perf_invalid_context, |
1431 | .attr_groups = hns3_pmu_attr_groups, |
1432 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
1433 | }; |
1434 | |
1435 | return 0; |
1436 | } |
1437 | |
1438 | static irqreturn_t hns3_pmu_irq(int irq, void *data) |
1439 | { |
1440 | struct hns3_pmu *hns3_pmu = data; |
1441 | u32 intr_status, idx; |
1442 | |
1443 | for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { |
1444 | intr_status = hns3_pmu_readl(hns3_pmu, |
1445 | HNS3_PMU_REG_EVENT_INTR_STATUS, |
1446 | idx); |
1447 | |
1448 | /* |
1449 | * As each counter will restart from 0 when it is overflowed, |
1450 | * extra processing is no need, just clear interrupt status. |
1451 | */ |
1452 | if (intr_status) |
1453 | hns3_pmu_clear_intr_status(hns3_pmu, idx); |
1454 | } |
1455 | |
1456 | return IRQ_HANDLED; |
1457 | } |
1458 | |
1459 | static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) |
1460 | { |
1461 | struct hns3_pmu *hns3_pmu; |
1462 | |
1463 | hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); |
1464 | if (!hns3_pmu) |
1465 | return -ENODEV; |
1466 | |
1467 | if (hns3_pmu->on_cpu == -1) { |
1468 | hns3_pmu->on_cpu = cpu; |
1469 | irq_set_affinity(irq: hns3_pmu->irq, cpumask_of(cpu)); |
1470 | } |
1471 | |
1472 | return 0; |
1473 | } |
1474 | |
1475 | static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) |
1476 | { |
1477 | struct hns3_pmu *hns3_pmu; |
1478 | unsigned int target; |
1479 | |
1480 | hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); |
1481 | if (!hns3_pmu) |
1482 | return -ENODEV; |
1483 | |
1484 | /* Nothing to do if this CPU doesn't own the PMU */ |
1485 | if (hns3_pmu->on_cpu != cpu) |
1486 | return 0; |
1487 | |
1488 | /* Choose a new CPU from all online cpus */ |
1489 | target = cpumask_any_but(cpu_online_mask, cpu); |
1490 | if (target >= nr_cpu_ids) |
1491 | return 0; |
1492 | |
1493 | perf_pmu_migrate_context(pmu: &hns3_pmu->pmu, src_cpu: cpu, dst_cpu: target); |
1494 | hns3_pmu->on_cpu = target; |
1495 | irq_set_affinity(irq: hns3_pmu->irq, cpumask_of(target)); |
1496 | |
1497 | return 0; |
1498 | } |
1499 | |
1500 | static void hns3_pmu_free_irq(void *data) |
1501 | { |
1502 | struct pci_dev *pdev = data; |
1503 | |
1504 | pci_free_irq_vectors(dev: pdev); |
1505 | } |
1506 | |
1507 | static int hns3_pmu_irq_register(struct pci_dev *pdev, |
1508 | struct hns3_pmu *hns3_pmu) |
1509 | { |
1510 | int irq, ret; |
1511 | |
1512 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI); |
1513 | if (ret < 0) { |
1514 | pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n" , ret); |
1515 | return ret; |
1516 | } |
1517 | |
1518 | ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev); |
1519 | if (ret) { |
1520 | pci_err(pdev, "failed to add free irq action, ret = %d.\n" , ret); |
1521 | return ret; |
1522 | } |
1523 | |
1524 | irq = pci_irq_vector(dev: pdev, nr: 0); |
1525 | ret = devm_request_irq(dev: &pdev->dev, irq, handler: hns3_pmu_irq, irqflags: 0, |
1526 | devname: hns3_pmu->pmu.name, dev_id: hns3_pmu); |
1527 | if (ret) { |
1528 | pci_err(pdev, "failed to register irq, ret = %d.\n" , ret); |
1529 | return ret; |
1530 | } |
1531 | |
1532 | hns3_pmu->irq = irq; |
1533 | |
1534 | return 0; |
1535 | } |
1536 | |
1537 | static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) |
1538 | { |
1539 | int ret; |
1540 | |
1541 | ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu); |
1542 | if (ret) |
1543 | return ret; |
1544 | |
1545 | ret = hns3_pmu_irq_register(pdev, hns3_pmu); |
1546 | if (ret) |
1547 | return ret; |
1548 | |
1549 | ret = cpuhp_state_add_instance(state: CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, |
1550 | node: &hns3_pmu->node); |
1551 | if (ret) { |
1552 | pci_err(pdev, "failed to register hotplug, ret = %d.\n" , ret); |
1553 | return ret; |
1554 | } |
1555 | |
1556 | ret = perf_pmu_register(pmu: &hns3_pmu->pmu, name: hns3_pmu->pmu.name, type: -1); |
1557 | if (ret) { |
1558 | pci_err(pdev, "failed to register perf PMU, ret = %d.\n" , ret); |
1559 | cpuhp_state_remove_instance_nocalls(state: CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, |
1560 | node: &hns3_pmu->node); |
1561 | } |
1562 | |
1563 | return ret; |
1564 | } |
1565 | |
1566 | static void hns3_pmu_uninit_pmu(struct pci_dev *pdev) |
1567 | { |
1568 | struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev); |
1569 | |
1570 | perf_pmu_unregister(pmu: &hns3_pmu->pmu); |
1571 | cpuhp_state_remove_instance_nocalls(state: CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, |
1572 | node: &hns3_pmu->node); |
1573 | } |
1574 | |
1575 | static int hns3_pmu_init_dev(struct pci_dev *pdev) |
1576 | { |
1577 | int ret; |
1578 | |
1579 | ret = pcim_enable_device(pdev); |
1580 | if (ret) { |
1581 | pci_err(pdev, "failed to enable pci device, ret = %d.\n" , ret); |
1582 | return ret; |
1583 | } |
1584 | |
1585 | ret = pcim_iomap_regions(pdev, BIT(BAR_2), name: "hns3_pmu" ); |
1586 | if (ret < 0) { |
1587 | pci_err(pdev, "failed to request pci region, ret = %d.\n" , ret); |
1588 | return ret; |
1589 | } |
1590 | |
1591 | pci_set_master(dev: pdev); |
1592 | |
1593 | return 0; |
1594 | } |
1595 | |
1596 | static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
1597 | { |
1598 | struct hns3_pmu *hns3_pmu; |
1599 | int ret; |
1600 | |
1601 | hns3_pmu = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hns3_pmu), GFP_KERNEL); |
1602 | if (!hns3_pmu) |
1603 | return -ENOMEM; |
1604 | |
1605 | ret = hns3_pmu_init_dev(pdev); |
1606 | if (ret) |
1607 | return ret; |
1608 | |
1609 | ret = hns3_pmu_init_pmu(pdev, hns3_pmu); |
1610 | if (ret) { |
1611 | pci_clear_master(dev: pdev); |
1612 | return ret; |
1613 | } |
1614 | |
1615 | pci_set_drvdata(pdev, data: hns3_pmu); |
1616 | |
1617 | return ret; |
1618 | } |
1619 | |
1620 | static void hns3_pmu_remove(struct pci_dev *pdev) |
1621 | { |
1622 | hns3_pmu_uninit_pmu(pdev); |
1623 | pci_clear_master(dev: pdev); |
1624 | pci_set_drvdata(pdev, NULL); |
1625 | } |
1626 | |
1627 | static const struct pci_device_id hns3_pmu_ids[] = { |
1628 | { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) }, |
1629 | { 0, } |
1630 | }; |
1631 | MODULE_DEVICE_TABLE(pci, hns3_pmu_ids); |
1632 | |
1633 | static struct pci_driver hns3_pmu_driver = { |
1634 | .name = "hns3_pmu" , |
1635 | .id_table = hns3_pmu_ids, |
1636 | .probe = hns3_pmu_probe, |
1637 | .remove = hns3_pmu_remove, |
1638 | }; |
1639 | |
1640 | static int __init hns3_pmu_module_init(void) |
1641 | { |
1642 | int ret; |
1643 | |
1644 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, |
1645 | name: "AP_PERF_ARM_HNS3_PMU_ONLINE" , |
1646 | startup: hns3_pmu_online_cpu, |
1647 | teardown: hns3_pmu_offline_cpu); |
1648 | if (ret) { |
1649 | pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n" , ret); |
1650 | return ret; |
1651 | } |
1652 | |
1653 | ret = pci_register_driver(&hns3_pmu_driver); |
1654 | if (ret) { |
1655 | pr_err("failed to register pci driver, ret = %d.\n" , ret); |
1656 | cpuhp_remove_multi_state(state: CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); |
1657 | } |
1658 | |
1659 | return ret; |
1660 | } |
1661 | module_init(hns3_pmu_module_init); |
1662 | |
1663 | static void __exit hns3_pmu_module_exit(void) |
1664 | { |
1665 | pci_unregister_driver(dev: &hns3_pmu_driver); |
1666 | cpuhp_remove_multi_state(state: CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); |
1667 | } |
1668 | module_exit(hns3_pmu_module_exit); |
1669 | |
1670 | MODULE_DESCRIPTION("HNS3 PMU driver" ); |
1671 | MODULE_LICENSE("GPL v2" ); |
1672 | |