1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4 *
5 */
6
7/* Support for NVIDIA specific attributes. */
8
9#include <linux/module.h>
10#include <linux/topology.h>
11
12#include "arm_cspmu.h"
13
14#define NV_PCIE_PORT_COUNT 10ULL
15#define NV_PCIE_FILTER_ID_MASK GENMASK_ULL(NV_PCIE_PORT_COUNT - 1, 0)
16
17#define NV_NVL_C2C_PORT_COUNT 2ULL
18#define NV_NVL_C2C_FILTER_ID_MASK GENMASK_ULL(NV_NVL_C2C_PORT_COUNT - 1, 0)
19
20#define NV_CNVL_PORT_COUNT 4ULL
21#define NV_CNVL_FILTER_ID_MASK GENMASK_ULL(NV_CNVL_PORT_COUNT - 1, 0)
22
23#define NV_GENERIC_FILTER_ID_MASK GENMASK_ULL(31, 0)
24
25#define NV_PRODID_MASK GENMASK(31, 0)
26
27#define NV_FORMAT_NAME_GENERIC 0
28
29#define to_nv_cspmu_ctx(cspmu) ((struct nv_cspmu_ctx *)(cspmu->impl.ctx))
30
31#define NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _num, _suff, _config) \
32 ARM_CSPMU_EVENT_ATTR(_pref##_num##_suff, _config)
33
34#define NV_CSPMU_EVENT_ATTR_4(_pref, _suff, _config) \
35 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _0_, _suff, _config), \
36 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _1_, _suff, _config + 1), \
37 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _2_, _suff, _config + 2), \
38 NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _3_, _suff, _config + 3)
39
40struct nv_cspmu_ctx {
41 const char *name;
42 u32 filter_mask;
43 u32 filter_default_val;
44 struct attribute **event_attr;
45 struct attribute **format_attr;
46};
47
48static struct attribute *scf_pmu_event_attrs[] = {
49 ARM_CSPMU_EVENT_ATTR(bus_cycles, 0x1d),
50
51 ARM_CSPMU_EVENT_ATTR(scf_cache_allocate, 0xF0),
52 ARM_CSPMU_EVENT_ATTR(scf_cache_refill, 0xF1),
53 ARM_CSPMU_EVENT_ATTR(scf_cache, 0xF2),
54 ARM_CSPMU_EVENT_ATTR(scf_cache_wb, 0xF3),
55
56 NV_CSPMU_EVENT_ATTR_4(socket, rd_data, 0x101),
57 NV_CSPMU_EVENT_ATTR_4(socket, dl_rsp, 0x105),
58 NV_CSPMU_EVENT_ATTR_4(socket, wb_data, 0x109),
59 NV_CSPMU_EVENT_ATTR_4(socket, ev_rsp, 0x10d),
60 NV_CSPMU_EVENT_ATTR_4(socket, prb_data, 0x111),
61
62 NV_CSPMU_EVENT_ATTR_4(socket, rd_outstanding, 0x115),
63 NV_CSPMU_EVENT_ATTR_4(socket, dl_outstanding, 0x119),
64 NV_CSPMU_EVENT_ATTR_4(socket, wb_outstanding, 0x11d),
65 NV_CSPMU_EVENT_ATTR_4(socket, wr_outstanding, 0x121),
66 NV_CSPMU_EVENT_ATTR_4(socket, ev_outstanding, 0x125),
67 NV_CSPMU_EVENT_ATTR_4(socket, prb_outstanding, 0x129),
68
69 NV_CSPMU_EVENT_ATTR_4(socket, rd_access, 0x12d),
70 NV_CSPMU_EVENT_ATTR_4(socket, dl_access, 0x131),
71 NV_CSPMU_EVENT_ATTR_4(socket, wb_access, 0x135),
72 NV_CSPMU_EVENT_ATTR_4(socket, wr_access, 0x139),
73 NV_CSPMU_EVENT_ATTR_4(socket, ev_access, 0x13d),
74 NV_CSPMU_EVENT_ATTR_4(socket, prb_access, 0x141),
75
76 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_data, 0x145),
77 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_access, 0x149),
78 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_access, 0x14d),
79 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_outstanding, 0x151),
80 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_outstanding, 0x155),
81
82 NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_data, 0x159),
83 NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_access, 0x15d),
84 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_access, 0x161),
85 NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_outstanding, 0x165),
86 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_outstanding, 0x169),
87
88 ARM_CSPMU_EVENT_ATTR(gmem_rd_data, 0x16d),
89 ARM_CSPMU_EVENT_ATTR(gmem_rd_access, 0x16e),
90 ARM_CSPMU_EVENT_ATTR(gmem_rd_outstanding, 0x16f),
91 ARM_CSPMU_EVENT_ATTR(gmem_dl_rsp, 0x170),
92 ARM_CSPMU_EVENT_ATTR(gmem_dl_access, 0x171),
93 ARM_CSPMU_EVENT_ATTR(gmem_dl_outstanding, 0x172),
94 ARM_CSPMU_EVENT_ATTR(gmem_wb_data, 0x173),
95 ARM_CSPMU_EVENT_ATTR(gmem_wb_access, 0x174),
96 ARM_CSPMU_EVENT_ATTR(gmem_wb_outstanding, 0x175),
97 ARM_CSPMU_EVENT_ATTR(gmem_ev_rsp, 0x176),
98 ARM_CSPMU_EVENT_ATTR(gmem_ev_access, 0x177),
99 ARM_CSPMU_EVENT_ATTR(gmem_ev_outstanding, 0x178),
100 ARM_CSPMU_EVENT_ATTR(gmem_wr_data, 0x179),
101 ARM_CSPMU_EVENT_ATTR(gmem_wr_outstanding, 0x17a),
102 ARM_CSPMU_EVENT_ATTR(gmem_wr_access, 0x17b),
103
104 NV_CSPMU_EVENT_ATTR_4(socket, wr_data, 0x17c),
105
106 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_data, 0x180),
107 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_data, 0x184),
108 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_access, 0x188),
109 NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_outstanding, 0x18c),
110
111 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_data, 0x190),
112 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_data, 0x194),
113 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_access, 0x198),
114 NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_outstanding, 0x19c),
115
116 ARM_CSPMU_EVENT_ATTR(gmem_wr_total_bytes, 0x1a0),
117 ARM_CSPMU_EVENT_ATTR(remote_socket_wr_total_bytes, 0x1a1),
118 ARM_CSPMU_EVENT_ATTR(remote_socket_rd_data, 0x1a2),
119 ARM_CSPMU_EVENT_ATTR(remote_socket_rd_outstanding, 0x1a3),
120 ARM_CSPMU_EVENT_ATTR(remote_socket_rd_access, 0x1a4),
121
122 ARM_CSPMU_EVENT_ATTR(cmem_rd_data, 0x1a5),
123 ARM_CSPMU_EVENT_ATTR(cmem_rd_access, 0x1a6),
124 ARM_CSPMU_EVENT_ATTR(cmem_rd_outstanding, 0x1a7),
125 ARM_CSPMU_EVENT_ATTR(cmem_dl_rsp, 0x1a8),
126 ARM_CSPMU_EVENT_ATTR(cmem_dl_access, 0x1a9),
127 ARM_CSPMU_EVENT_ATTR(cmem_dl_outstanding, 0x1aa),
128 ARM_CSPMU_EVENT_ATTR(cmem_wb_data, 0x1ab),
129 ARM_CSPMU_EVENT_ATTR(cmem_wb_access, 0x1ac),
130 ARM_CSPMU_EVENT_ATTR(cmem_wb_outstanding, 0x1ad),
131 ARM_CSPMU_EVENT_ATTR(cmem_ev_rsp, 0x1ae),
132 ARM_CSPMU_EVENT_ATTR(cmem_ev_access, 0x1af),
133 ARM_CSPMU_EVENT_ATTR(cmem_ev_outstanding, 0x1b0),
134 ARM_CSPMU_EVENT_ATTR(cmem_wr_data, 0x1b1),
135 ARM_CSPMU_EVENT_ATTR(cmem_wr_outstanding, 0x1b2),
136
137 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_data, 0x1b3),
138 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_access, 0x1b7),
139 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_access, 0x1bb),
140 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_outstanding, 0x1bf),
141 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_outstanding, 0x1c3),
142
143 ARM_CSPMU_EVENT_ATTR(ocu_prb_access, 0x1c7),
144 ARM_CSPMU_EVENT_ATTR(ocu_prb_data, 0x1c8),
145 ARM_CSPMU_EVENT_ATTR(ocu_prb_outstanding, 0x1c9),
146
147 ARM_CSPMU_EVENT_ATTR(cmem_wr_access, 0x1ca),
148
149 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_access, 0x1cb),
150 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_data, 0x1cf),
151 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_data, 0x1d3),
152 NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_outstanding, 0x1d7),
153
154 ARM_CSPMU_EVENT_ATTR(cmem_wr_total_bytes, 0x1db),
155
156 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
157 NULL,
158};
159
160static struct attribute *mcf_pmu_event_attrs[] = {
161 ARM_CSPMU_EVENT_ATTR(rd_bytes_loc, 0x0),
162 ARM_CSPMU_EVENT_ATTR(rd_bytes_rem, 0x1),
163 ARM_CSPMU_EVENT_ATTR(wr_bytes_loc, 0x2),
164 ARM_CSPMU_EVENT_ATTR(wr_bytes_rem, 0x3),
165 ARM_CSPMU_EVENT_ATTR(total_bytes_loc, 0x4),
166 ARM_CSPMU_EVENT_ATTR(total_bytes_rem, 0x5),
167 ARM_CSPMU_EVENT_ATTR(rd_req_loc, 0x6),
168 ARM_CSPMU_EVENT_ATTR(rd_req_rem, 0x7),
169 ARM_CSPMU_EVENT_ATTR(wr_req_loc, 0x8),
170 ARM_CSPMU_EVENT_ATTR(wr_req_rem, 0x9),
171 ARM_CSPMU_EVENT_ATTR(total_req_loc, 0xa),
172 ARM_CSPMU_EVENT_ATTR(total_req_rem, 0xb),
173 ARM_CSPMU_EVENT_ATTR(rd_cum_outs_loc, 0xc),
174 ARM_CSPMU_EVENT_ATTR(rd_cum_outs_rem, 0xd),
175 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
176 NULL,
177};
178
179static struct attribute *generic_pmu_event_attrs[] = {
180 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
181 NULL,
182};
183
184static struct attribute *scf_pmu_format_attrs[] = {
185 ARM_CSPMU_FORMAT_EVENT_ATTR,
186 NULL,
187};
188
189static struct attribute *pcie_pmu_format_attrs[] = {
190 ARM_CSPMU_FORMAT_EVENT_ATTR,
191 ARM_CSPMU_FORMAT_ATTR(root_port, "config1:0-9"),
192 NULL,
193};
194
195static struct attribute *nvlink_c2c_pmu_format_attrs[] = {
196 ARM_CSPMU_FORMAT_EVENT_ATTR,
197 NULL,
198};
199
200static struct attribute *cnvlink_pmu_format_attrs[] = {
201 ARM_CSPMU_FORMAT_EVENT_ATTR,
202 ARM_CSPMU_FORMAT_ATTR(rem_socket, "config1:0-3"),
203 NULL,
204};
205
206static struct attribute *generic_pmu_format_attrs[] = {
207 ARM_CSPMU_FORMAT_EVENT_ATTR,
208 ARM_CSPMU_FORMAT_FILTER_ATTR,
209 NULL,
210};
211
212static struct attribute **
213nv_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
214{
215 const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
216
217 return ctx->event_attr;
218}
219
220static struct attribute **
221nv_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
222{
223 const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
224
225 return ctx->format_attr;
226}
227
228static const char *
229nv_cspmu_get_name(const struct arm_cspmu *cspmu)
230{
231 const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
232
233 return ctx->name;
234}
235
236static u32 nv_cspmu_event_filter(const struct perf_event *event)
237{
238 const struct nv_cspmu_ctx *ctx =
239 to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
240
241 if (ctx->filter_mask == 0)
242 return ctx->filter_default_val;
243
244 return event->attr.config1 & ctx->filter_mask;
245}
246
247enum nv_cspmu_name_fmt {
248 NAME_FMT_GENERIC,
249 NAME_FMT_SOCKET
250};
251
252struct nv_cspmu_match {
253 u32 prodid;
254 u32 prodid_mask;
255 u64 filter_mask;
256 u32 filter_default_val;
257 const char *name_pattern;
258 enum nv_cspmu_name_fmt name_fmt;
259 struct attribute **event_attr;
260 struct attribute **format_attr;
261};
262
263static const struct nv_cspmu_match nv_cspmu_match[] = {
264 {
265 .prodid = 0x103,
266 .prodid_mask = NV_PRODID_MASK,
267 .filter_mask = NV_PCIE_FILTER_ID_MASK,
268 .filter_default_val = NV_PCIE_FILTER_ID_MASK,
269 .name_pattern = "nvidia_pcie_pmu_%u",
270 .name_fmt = NAME_FMT_SOCKET,
271 .event_attr = mcf_pmu_event_attrs,
272 .format_attr = pcie_pmu_format_attrs
273 },
274 {
275 .prodid = 0x104,
276 .prodid_mask = NV_PRODID_MASK,
277 .filter_mask = 0x0,
278 .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
279 .name_pattern = "nvidia_nvlink_c2c1_pmu_%u",
280 .name_fmt = NAME_FMT_SOCKET,
281 .event_attr = mcf_pmu_event_attrs,
282 .format_attr = nvlink_c2c_pmu_format_attrs
283 },
284 {
285 .prodid = 0x105,
286 .prodid_mask = NV_PRODID_MASK,
287 .filter_mask = 0x0,
288 .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
289 .name_pattern = "nvidia_nvlink_c2c0_pmu_%u",
290 .name_fmt = NAME_FMT_SOCKET,
291 .event_attr = mcf_pmu_event_attrs,
292 .format_attr = nvlink_c2c_pmu_format_attrs
293 },
294 {
295 .prodid = 0x106,
296 .prodid_mask = NV_PRODID_MASK,
297 .filter_mask = NV_CNVL_FILTER_ID_MASK,
298 .filter_default_val = NV_CNVL_FILTER_ID_MASK,
299 .name_pattern = "nvidia_cnvlink_pmu_%u",
300 .name_fmt = NAME_FMT_SOCKET,
301 .event_attr = mcf_pmu_event_attrs,
302 .format_attr = cnvlink_pmu_format_attrs
303 },
304 {
305 .prodid = 0x2CF,
306 .prodid_mask = NV_PRODID_MASK,
307 .filter_mask = 0x0,
308 .filter_default_val = 0x0,
309 .name_pattern = "nvidia_scf_pmu_%u",
310 .name_fmt = NAME_FMT_SOCKET,
311 .event_attr = scf_pmu_event_attrs,
312 .format_attr = scf_pmu_format_attrs
313 },
314 {
315 .prodid = 0,
316 .prodid_mask = 0,
317 .filter_mask = NV_GENERIC_FILTER_ID_MASK,
318 .filter_default_val = NV_GENERIC_FILTER_ID_MASK,
319 .name_pattern = "nvidia_uncore_pmu_%u",
320 .name_fmt = NAME_FMT_GENERIC,
321 .event_attr = generic_pmu_event_attrs,
322 .format_attr = generic_pmu_format_attrs
323 },
324};
325
326static char *nv_cspmu_format_name(const struct arm_cspmu *cspmu,
327 const struct nv_cspmu_match *match)
328{
329 char *name;
330 struct device *dev = cspmu->dev;
331
332 static atomic_t pmu_generic_idx = {0};
333
334 switch (match->name_fmt) {
335 case NAME_FMT_SOCKET: {
336 const int cpu = cpumask_first(srcp: &cspmu->associated_cpus);
337 const int socket = cpu_to_node(cpu);
338
339 name = devm_kasprintf(dev, GFP_KERNEL, fmt: match->name_pattern,
340 socket);
341 break;
342 }
343 case NAME_FMT_GENERIC:
344 name = devm_kasprintf(dev, GFP_KERNEL, fmt: match->name_pattern,
345 atomic_fetch_inc(v: &pmu_generic_idx));
346 break;
347 default:
348 name = NULL;
349 break;
350 }
351
352 return name;
353}
354
355static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
356{
357 u32 prodid;
358 struct nv_cspmu_ctx *ctx;
359 struct device *dev = cspmu->dev;
360 struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
361 const struct nv_cspmu_match *match = nv_cspmu_match;
362
363 ctx = devm_kzalloc(dev, size: sizeof(struct nv_cspmu_ctx), GFP_KERNEL);
364 if (!ctx)
365 return -ENOMEM;
366
367 prodid = FIELD_GET(ARM_CSPMU_PMIIDR_PRODUCTID, cspmu->impl.pmiidr);
368
369 /* Find matching PMU. */
370 for (; match->prodid; match++) {
371 const u32 prodid_mask = match->prodid_mask;
372
373 if ((match->prodid & prodid_mask) == (prodid & prodid_mask))
374 break;
375 }
376
377 ctx->name = nv_cspmu_format_name(cspmu, match);
378 ctx->filter_mask = match->filter_mask;
379 ctx->filter_default_val = match->filter_default_val;
380 ctx->event_attr = match->event_attr;
381 ctx->format_attr = match->format_attr;
382
383 cspmu->impl.ctx = ctx;
384
385 /* NVIDIA specific callbacks. */
386 impl_ops->event_filter = nv_cspmu_event_filter;
387 impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
388 impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
389 impl_ops->get_name = nv_cspmu_get_name;
390
391 return 0;
392}
393
394/* Match all NVIDIA Coresight PMU devices */
395static const struct arm_cspmu_impl_match nv_cspmu_param = {
396 .pmiidr_val = ARM_CSPMU_IMPL_ID_NVIDIA,
397 .module = THIS_MODULE,
398 .impl_init_ops = nv_cspmu_init_ops
399};
400
401static int __init nvidia_cspmu_init(void)
402{
403 int ret;
404
405 ret = arm_cspmu_impl_register(impl_match: &nv_cspmu_param);
406 if (ret)
407 pr_err("nvidia_cspmu backend registration error: %d\n", ret);
408
409 return ret;
410}
411
412static void __exit nvidia_cspmu_exit(void)
413{
414 arm_cspmu_impl_unregister(impl_match: &nv_cspmu_param);
415}
416
417module_init(nvidia_cspmu_init);
418module_exit(nvidia_cspmu_exit);
419
420MODULE_LICENSE("GPL v2");
421

source code of linux/drivers/perf/arm_cspmu/nvidia_cspmu.c