1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for HiSilicon PCIe tune and trace device
4 *
5 * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
6 * Author: Yicong Yang <yangyicong@hisilicon.com>
7 */
8
9#include <linux/bitfield.h>
10#include <linux/bitops.h>
11#include <linux/cpuhotplug.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iommu.h>
17#include <linux/iopoll.h>
18#include <linux/module.h>
19#include <linux/sysfs.h>
20#include <linux/vmalloc.h>
21
22#include "hisi_ptt.h"
23
24/* Dynamic CPU hotplug state used by PTT */
25static enum cpuhp_state hisi_ptt_pmu_online;
26
27static bool hisi_ptt_wait_tuning_finish(struct hisi_ptt *hisi_ptt)
28{
29 u32 val;
30
31 return !readl_poll_timeout(hisi_ptt->iobase + HISI_PTT_TUNING_INT_STAT,
32 val, !(val & HISI_PTT_TUNING_INT_STAT_MASK),
33 HISI_PTT_WAIT_POLL_INTERVAL_US,
34 HISI_PTT_WAIT_TUNE_TIMEOUT_US);
35}
36
37static ssize_t hisi_ptt_tune_attr_show(struct device *dev,
38 struct device_attribute *attr,
39 char *buf)
40{
41 struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
42 struct dev_ext_attribute *ext_attr;
43 struct hisi_ptt_tune_desc *desc;
44 u32 reg;
45 u16 val;
46
47 ext_attr = container_of(attr, struct dev_ext_attribute, attr);
48 desc = ext_attr->var;
49
50 mutex_lock(&hisi_ptt->tune_lock);
51
52 reg = readl(addr: hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
53 reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
54 reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
55 desc->event_code);
56 writel(val: reg, addr: hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
57
58 /* Write all 1 to indicates it's the read process */
59 writel(val: ~0U, addr: hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
60
61 if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
62 mutex_unlock(lock: &hisi_ptt->tune_lock);
63 return -ETIMEDOUT;
64 }
65
66 reg = readl(addr: hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
67 reg &= HISI_PTT_TUNING_DATA_VAL_MASK;
68 val = FIELD_GET(HISI_PTT_TUNING_DATA_VAL_MASK, reg);
69
70 mutex_unlock(lock: &hisi_ptt->tune_lock);
71 return sysfs_emit(buf, fmt: "%u\n", val);
72}
73
74static ssize_t hisi_ptt_tune_attr_store(struct device *dev,
75 struct device_attribute *attr,
76 const char *buf, size_t count)
77{
78 struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
79 struct dev_ext_attribute *ext_attr;
80 struct hisi_ptt_tune_desc *desc;
81 u32 reg;
82 u16 val;
83
84 ext_attr = container_of(attr, struct dev_ext_attribute, attr);
85 desc = ext_attr->var;
86
87 if (kstrtou16(s: buf, base: 10, res: &val))
88 return -EINVAL;
89
90 mutex_lock(&hisi_ptt->tune_lock);
91
92 reg = readl(addr: hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
93 reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
94 reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
95 desc->event_code);
96 writel(val: reg, addr: hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
97 writel(FIELD_PREP(HISI_PTT_TUNING_DATA_VAL_MASK, val),
98 addr: hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
99
100 if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
101 mutex_unlock(lock: &hisi_ptt->tune_lock);
102 return -ETIMEDOUT;
103 }
104
105 mutex_unlock(lock: &hisi_ptt->tune_lock);
106 return count;
107}
108
109#define HISI_PTT_TUNE_ATTR(_name, _val, _show, _store) \
110 static struct hisi_ptt_tune_desc _name##_desc = { \
111 .name = #_name, \
112 .event_code = (_val), \
113 }; \
114 static struct dev_ext_attribute hisi_ptt_##_name##_attr = { \
115 .attr = __ATTR(_name, 0600, _show, _store), \
116 .var = &_name##_desc, \
117 }
118
119#define HISI_PTT_TUNE_ATTR_COMMON(_name, _val) \
120 HISI_PTT_TUNE_ATTR(_name, _val, \
121 hisi_ptt_tune_attr_show, \
122 hisi_ptt_tune_attr_store)
123
124/*
125 * The value of the tuning event are composed of two parts: main event code
126 * in BIT[0,15] and subevent code in BIT[16,23]. For example, qox_tx_cpl is
127 * a subevent of 'Tx path QoS control' which for tuning the weight of Tx
128 * completion TLPs. See hisi_ptt.rst documentation for more information.
129 */
130#define HISI_PTT_TUNE_QOS_TX_CPL (0x4 | (3 << 16))
131#define HISI_PTT_TUNE_QOS_TX_NP (0x4 | (4 << 16))
132#define HISI_PTT_TUNE_QOS_TX_P (0x4 | (5 << 16))
133#define HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL (0x5 | (6 << 16))
134#define HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL (0x5 | (7 << 16))
135
136HISI_PTT_TUNE_ATTR_COMMON(qos_tx_cpl, HISI_PTT_TUNE_QOS_TX_CPL);
137HISI_PTT_TUNE_ATTR_COMMON(qos_tx_np, HISI_PTT_TUNE_QOS_TX_NP);
138HISI_PTT_TUNE_ATTR_COMMON(qos_tx_p, HISI_PTT_TUNE_QOS_TX_P);
139HISI_PTT_TUNE_ATTR_COMMON(rx_alloc_buf_level, HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL);
140HISI_PTT_TUNE_ATTR_COMMON(tx_alloc_buf_level, HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL);
141
142static struct attribute *hisi_ptt_tune_attrs[] = {
143 &hisi_ptt_qos_tx_cpl_attr.attr.attr,
144 &hisi_ptt_qos_tx_np_attr.attr.attr,
145 &hisi_ptt_qos_tx_p_attr.attr.attr,
146 &hisi_ptt_rx_alloc_buf_level_attr.attr.attr,
147 &hisi_ptt_tx_alloc_buf_level_attr.attr.attr,
148 NULL,
149};
150
151static struct attribute_group hisi_ptt_tune_group = {
152 .name = "tune",
153 .attrs = hisi_ptt_tune_attrs,
154};
155
156static u16 hisi_ptt_get_filter_val(u16 devid, bool is_port)
157{
158 if (is_port)
159 return BIT(HISI_PCIE_CORE_PORT_ID(devid & 0xff));
160
161 return devid;
162}
163
164static bool hisi_ptt_wait_trace_hw_idle(struct hisi_ptt *hisi_ptt)
165{
166 u32 val;
167
168 return !readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_STS,
169 val, val & HISI_PTT_TRACE_IDLE,
170 HISI_PTT_WAIT_POLL_INTERVAL_US,
171 HISI_PTT_WAIT_TRACE_TIMEOUT_US);
172}
173
174static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt)
175{
176 u32 val;
177
178 readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS,
179 val, !val, HISI_PTT_RESET_POLL_INTERVAL_US,
180 HISI_PTT_RESET_TIMEOUT_US);
181}
182
183static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt)
184{
185 writel(val: 0, addr: hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
186
187 /* Mask the interrupt on the end */
188 writel(HISI_PTT_TRACE_INT_MASK_ALL, addr: hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK);
189
190 hisi_ptt->trace_ctrl.started = false;
191}
192
193static int hisi_ptt_trace_start(struct hisi_ptt *hisi_ptt)
194{
195 struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
196 u32 val;
197 int i;
198
199 /* Check device idle before start trace */
200 if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt)) {
201 pci_err(hisi_ptt->pdev, "Failed to start trace, the device is still busy\n");
202 return -EBUSY;
203 }
204
205 ctrl->started = true;
206
207 /* Reset the DMA before start tracing */
208 val = readl(addr: hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
209 val |= HISI_PTT_TRACE_CTRL_RST;
210 writel(val, addr: hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
211
212 hisi_ptt_wait_dma_reset_done(hisi_ptt);
213
214 val = readl(addr: hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
215 val &= ~HISI_PTT_TRACE_CTRL_RST;
216 writel(val, addr: hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
217
218 /* Reset the index of current buffer */
219 hisi_ptt->trace_ctrl.buf_index = 0;
220
221 /* Zero the trace buffers */
222 for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++)
223 memset(ctrl->trace_buf[i].addr, 0, HISI_PTT_TRACE_BUF_SIZE);
224
225 /* Clear the interrupt status */
226 writel(HISI_PTT_TRACE_INT_STAT_MASK, addr: hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
227 writel(val: 0, addr: hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK);
228
229 /* Set the trace control register */
230 val = FIELD_PREP(HISI_PTT_TRACE_CTRL_TYPE_SEL, ctrl->type);
231 val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_RXTX_SEL, ctrl->direction);
232 val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_DATA_FORMAT, ctrl->format);
233 val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_TARGET_SEL, hisi_ptt->trace_ctrl.filter);
234 if (!hisi_ptt->trace_ctrl.is_port)
235 val |= HISI_PTT_TRACE_CTRL_FILTER_MODE;
236
237 /* Start the Trace */
238 val |= HISI_PTT_TRACE_CTRL_EN;
239 writel(val, addr: hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
240
241 return 0;
242}
243
244static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop)
245{
246 struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
247 struct perf_output_handle *handle = &ctrl->handle;
248 struct perf_event *event = handle->event;
249 struct hisi_ptt_pmu_buf *buf;
250 size_t size;
251 void *addr;
252
253 buf = perf_get_aux(handle);
254 if (!buf || !handle->size)
255 return -EINVAL;
256
257 addr = ctrl->trace_buf[ctrl->buf_index].addr;
258
259 /*
260 * If we're going to stop, read the size of already traced data from
261 * HISI_PTT_TRACE_WR_STS. Otherwise we're coming from the interrupt,
262 * the data size is always HISI_PTT_TRACE_BUF_SIZE.
263 */
264 if (stop) {
265 u32 reg;
266
267 reg = readl(addr: hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS);
268 size = FIELD_GET(HISI_PTT_TRACE_WR_STS_WRITE, reg);
269 } else {
270 size = HISI_PTT_TRACE_BUF_SIZE;
271 }
272
273 memcpy(buf->base + buf->pos, addr, size);
274 buf->pos += size;
275
276 /*
277 * Always commit the data to the AUX buffer in time to make sure
278 * userspace got enough time to consume the data.
279 *
280 * If we're not going to stop, apply a new one and check whether
281 * there's enough room for the next trace.
282 */
283 perf_aux_output_end(handle, size);
284 if (!stop) {
285 buf = perf_aux_output_begin(handle, event);
286 if (!buf)
287 return -EINVAL;
288
289 buf->pos = handle->head % buf->length;
290 if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
291 perf_aux_output_end(handle, size: 0);
292 return -EINVAL;
293 }
294 }
295
296 return 0;
297}
298
299static irqreturn_t hisi_ptt_isr(int irq, void *context)
300{
301 struct hisi_ptt *hisi_ptt = context;
302 u32 status, buf_idx;
303
304 status = readl(addr: hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
305 if (!(status & HISI_PTT_TRACE_INT_STAT_MASK))
306 return IRQ_NONE;
307
308 buf_idx = ffs(status) - 1;
309
310 /* Clear the interrupt status of buffer @buf_idx */
311 writel(val: status, addr: hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
312
313 /*
314 * Update the AUX buffer and cache the current buffer index,
315 * as we need to know this and save the data when the trace
316 * is ended out of the interrupt handler. End the trace
317 * if the updating fails.
318 */
319 if (hisi_ptt_update_aux(hisi_ptt, index: buf_idx, stop: false))
320 hisi_ptt_trace_end(hisi_ptt);
321 else
322 hisi_ptt->trace_ctrl.buf_index = (buf_idx + 1) % HISI_PTT_TRACE_BUF_CNT;
323
324 return IRQ_HANDLED;
325}
326
327static void hisi_ptt_irq_free_vectors(void *pdev)
328{
329 pci_free_irq_vectors(dev: pdev);
330}
331
332static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
333{
334 struct pci_dev *pdev = hisi_ptt->pdev;
335 int ret;
336
337 ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI);
338 if (ret < 0) {
339 pci_err(pdev, "failed to allocate irq vector, ret = %d\n", ret);
340 return ret;
341 }
342
343 ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_irq_free_vectors, pdev);
344 if (ret < 0)
345 return ret;
346
347 hisi_ptt->trace_irq = pci_irq_vector(dev: pdev, HISI_PTT_TRACE_DMA_IRQ);
348 ret = devm_request_irq(dev: &pdev->dev, irq: hisi_ptt->trace_irq, handler: hisi_ptt_isr,
349 IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
350 dev_id: hisi_ptt);
351 if (ret) {
352 pci_err(pdev, "failed to request irq %d, ret = %d\n",
353 hisi_ptt->trace_irq, ret);
354 return ret;
355 }
356
357 return 0;
358}
359
360static void hisi_ptt_del_free_filter(struct hisi_ptt *hisi_ptt,
361 struct hisi_ptt_filter_desc *filter)
362{
363 if (filter->is_port)
364 hisi_ptt->port_mask &= ~hisi_ptt_get_filter_val(devid: filter->devid, is_port: true);
365
366 list_del(entry: &filter->list);
367 kfree(objp: filter->name);
368 kfree(objp: filter);
369}
370
371static struct hisi_ptt_filter_desc *
372hisi_ptt_alloc_add_filter(struct hisi_ptt *hisi_ptt, u16 devid, bool is_port)
373{
374 struct hisi_ptt_filter_desc *filter;
375 u8 devfn = devid & 0xff;
376 char *filter_name;
377
378 filter_name = kasprintf(GFP_KERNEL, fmt: "%04x:%02x:%02x.%d", pci_domain_nr(bus: hisi_ptt->pdev->bus),
379 PCI_BUS_NUM(devid), PCI_SLOT(devfn), PCI_FUNC(devfn));
380 if (!filter_name) {
381 pci_err(hisi_ptt->pdev, "failed to allocate name for filter %04x:%02x:%02x.%d\n",
382 pci_domain_nr(hisi_ptt->pdev->bus), PCI_BUS_NUM(devid),
383 PCI_SLOT(devfn), PCI_FUNC(devfn));
384 return NULL;
385 }
386
387 filter = kzalloc(size: sizeof(*filter), GFP_KERNEL);
388 if (!filter) {
389 pci_err(hisi_ptt->pdev, "failed to add filter for %s\n",
390 filter_name);
391 kfree(objp: filter_name);
392 return NULL;
393 }
394
395 filter->name = filter_name;
396 filter->is_port = is_port;
397 filter->devid = devid;
398
399 if (filter->is_port) {
400 list_add_tail(new: &filter->list, head: &hisi_ptt->port_filters);
401
402 /* Update the available port mask */
403 hisi_ptt->port_mask |= hisi_ptt_get_filter_val(devid: filter->devid, is_port: true);
404 } else {
405 list_add_tail(new: &filter->list, head: &hisi_ptt->req_filters);
406 }
407
408 return filter;
409}
410
411static ssize_t hisi_ptt_filter_show(struct device *dev, struct device_attribute *attr,
412 char *buf)
413{
414 struct hisi_ptt_filter_desc *filter;
415 unsigned long filter_val;
416
417 filter = container_of(attr, struct hisi_ptt_filter_desc, attr);
418 filter_val = hisi_ptt_get_filter_val(devid: filter->devid, is_port: filter->is_port) |
419 (filter->is_port ? HISI_PTT_PMU_FILTER_IS_PORT : 0);
420
421 return sysfs_emit(buf, fmt: "0x%05lx\n", filter_val);
422}
423
424static int hisi_ptt_create_rp_filter_attr(struct hisi_ptt *hisi_ptt,
425 struct hisi_ptt_filter_desc *filter)
426{
427 struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
428
429 sysfs_attr_init(&filter->attr.attr);
430 filter->attr.attr.name = filter->name;
431 filter->attr.attr.mode = 0400; /* DEVICE_ATTR_ADMIN_RO */
432 filter->attr.show = hisi_ptt_filter_show;
433
434 return sysfs_add_file_to_group(kobj, attr: &filter->attr.attr,
435 HISI_PTT_RP_FILTERS_GRP_NAME);
436}
437
438static void hisi_ptt_remove_rp_filter_attr(struct hisi_ptt *hisi_ptt,
439 struct hisi_ptt_filter_desc *filter)
440{
441 struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
442
443 sysfs_remove_file_from_group(kobj, attr: &filter->attr.attr,
444 HISI_PTT_RP_FILTERS_GRP_NAME);
445}
446
447static int hisi_ptt_create_req_filter_attr(struct hisi_ptt *hisi_ptt,
448 struct hisi_ptt_filter_desc *filter)
449{
450 struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
451
452 sysfs_attr_init(&filter->attr.attr);
453 filter->attr.attr.name = filter->name;
454 filter->attr.attr.mode = 0400; /* DEVICE_ATTR_ADMIN_RO */
455 filter->attr.show = hisi_ptt_filter_show;
456
457 return sysfs_add_file_to_group(kobj, attr: &filter->attr.attr,
458 HISI_PTT_REQ_FILTERS_GRP_NAME);
459}
460
461static void hisi_ptt_remove_req_filter_attr(struct hisi_ptt *hisi_ptt,
462 struct hisi_ptt_filter_desc *filter)
463{
464 struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
465
466 sysfs_remove_file_from_group(kobj, attr: &filter->attr.attr,
467 HISI_PTT_REQ_FILTERS_GRP_NAME);
468}
469
470static int hisi_ptt_create_filter_attr(struct hisi_ptt *hisi_ptt,
471 struct hisi_ptt_filter_desc *filter)
472{
473 int ret;
474
475 if (filter->is_port)
476 ret = hisi_ptt_create_rp_filter_attr(hisi_ptt, filter);
477 else
478 ret = hisi_ptt_create_req_filter_attr(hisi_ptt, filter);
479
480 if (ret)
481 pci_err(hisi_ptt->pdev, "failed to create sysfs attribute for filter %s\n",
482 filter->name);
483
484 return ret;
485}
486
487static void hisi_ptt_remove_filter_attr(struct hisi_ptt *hisi_ptt,
488 struct hisi_ptt_filter_desc *filter)
489{
490 if (filter->is_port)
491 hisi_ptt_remove_rp_filter_attr(hisi_ptt, filter);
492 else
493 hisi_ptt_remove_req_filter_attr(hisi_ptt, filter);
494}
495
496static void hisi_ptt_remove_all_filter_attributes(void *data)
497{
498 struct hisi_ptt_filter_desc *filter;
499 struct hisi_ptt *hisi_ptt = data;
500
501 mutex_lock(&hisi_ptt->filter_lock);
502
503 list_for_each_entry(filter, &hisi_ptt->req_filters, list)
504 hisi_ptt_remove_filter_attr(hisi_ptt, filter);
505
506 list_for_each_entry(filter, &hisi_ptt->port_filters, list)
507 hisi_ptt_remove_filter_attr(hisi_ptt, filter);
508
509 hisi_ptt->sysfs_inited = false;
510 mutex_unlock(lock: &hisi_ptt->filter_lock);
511}
512
513static int hisi_ptt_init_filter_attributes(struct hisi_ptt *hisi_ptt)
514{
515 struct hisi_ptt_filter_desc *filter;
516 int ret;
517
518 mutex_lock(&hisi_ptt->filter_lock);
519
520 /*
521 * Register the reset callback in the first stage. In reset we traverse
522 * the filters list to remove the sysfs attributes so the callback can
523 * be called safely even without below filter attributes creation.
524 */
525 ret = devm_add_action(&hisi_ptt->pdev->dev,
526 hisi_ptt_remove_all_filter_attributes,
527 hisi_ptt);
528 if (ret)
529 goto out;
530
531 list_for_each_entry(filter, &hisi_ptt->port_filters, list) {
532 ret = hisi_ptt_create_filter_attr(hisi_ptt, filter);
533 if (ret)
534 goto out;
535 }
536
537 list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
538 ret = hisi_ptt_create_filter_attr(hisi_ptt, filter);
539 if (ret)
540 goto out;
541 }
542
543 hisi_ptt->sysfs_inited = true;
544out:
545 mutex_unlock(lock: &hisi_ptt->filter_lock);
546 return ret;
547}
548
549static void hisi_ptt_update_filters(struct work_struct *work)
550{
551 struct delayed_work *delayed_work = to_delayed_work(work);
552 struct hisi_ptt_filter_update_info info;
553 struct hisi_ptt_filter_desc *filter;
554 struct hisi_ptt *hisi_ptt;
555
556 hisi_ptt = container_of(delayed_work, struct hisi_ptt, work);
557
558 if (!mutex_trylock(lock: &hisi_ptt->filter_lock)) {
559 schedule_delayed_work(dwork: &hisi_ptt->work, HISI_PTT_WORK_DELAY_MS);
560 return;
561 }
562
563 while (kfifo_get(&hisi_ptt->filter_update_kfifo, &info)) {
564 if (info.is_add) {
565 /*
566 * Notify the users if failed to add this filter, others
567 * still work and available. See the comments in
568 * hisi_ptt_init_filters().
569 */
570 filter = hisi_ptt_alloc_add_filter(hisi_ptt, devid: info.devid, is_port: info.is_port);
571 if (!filter)
572 continue;
573
574 /*
575 * If filters' sysfs entries hasn't been initialized,
576 * then we're still at probe stage. Add the filters to
577 * the list and later hisi_ptt_init_filter_attributes()
578 * will create sysfs attributes for all the filters.
579 */
580 if (hisi_ptt->sysfs_inited &&
581 hisi_ptt_create_filter_attr(hisi_ptt, filter)) {
582 hisi_ptt_del_free_filter(hisi_ptt, filter);
583 continue;
584 }
585 } else {
586 struct hisi_ptt_filter_desc *tmp;
587 struct list_head *target_list;
588
589 target_list = info.is_port ? &hisi_ptt->port_filters :
590 &hisi_ptt->req_filters;
591
592 list_for_each_entry_safe(filter, tmp, target_list, list)
593 if (filter->devid == info.devid) {
594 if (hisi_ptt->sysfs_inited)
595 hisi_ptt_remove_filter_attr(hisi_ptt, filter);
596
597 hisi_ptt_del_free_filter(hisi_ptt, filter);
598 break;
599 }
600 }
601 }
602
603 mutex_unlock(lock: &hisi_ptt->filter_lock);
604}
605
606/*
607 * A PCI bus notifier is used here for dynamically updating the filter
608 * list.
609 */
610static int hisi_ptt_notifier_call(struct notifier_block *nb, unsigned long action,
611 void *data)
612{
613 struct hisi_ptt *hisi_ptt = container_of(nb, struct hisi_ptt, hisi_ptt_nb);
614 struct hisi_ptt_filter_update_info info;
615 struct pci_dev *pdev, *root_port;
616 struct device *dev = data;
617 u32 port_devid;
618
619 pdev = to_pci_dev(dev);
620 root_port = pcie_find_root_port(dev: pdev);
621 if (!root_port)
622 return 0;
623
624 port_devid = pci_dev_id(dev: root_port);
625 if (port_devid < hisi_ptt->lower_bdf ||
626 port_devid > hisi_ptt->upper_bdf)
627 return 0;
628
629 info.is_port = pci_pcie_type(dev: pdev) == PCI_EXP_TYPE_ROOT_PORT;
630 info.devid = pci_dev_id(dev: pdev);
631
632 switch (action) {
633 case BUS_NOTIFY_ADD_DEVICE:
634 info.is_add = true;
635 break;
636 case BUS_NOTIFY_DEL_DEVICE:
637 info.is_add = false;
638 break;
639 default:
640 return 0;
641 }
642
643 /*
644 * The FIFO size is 16 which is sufficient for almost all the cases,
645 * since each PCIe core will have most 8 Root Ports (typically only
646 * 1~4 Root Ports). On failure log the failed filter and let user
647 * handle it.
648 */
649 if (kfifo_in_spinlocked(&hisi_ptt->filter_update_kfifo, &info, 1,
650 &hisi_ptt->filter_update_lock))
651 schedule_delayed_work(dwork: &hisi_ptt->work, delay: 0);
652 else
653 pci_warn(hisi_ptt->pdev,
654 "filter update fifo overflow for target %s\n",
655 pci_name(pdev));
656
657 return 0;
658}
659
660static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
661{
662 struct pci_dev *root_port = pcie_find_root_port(dev: pdev);
663 struct hisi_ptt_filter_desc *filter;
664 struct hisi_ptt *hisi_ptt = data;
665 u32 port_devid;
666
667 if (!root_port)
668 return 0;
669
670 port_devid = pci_dev_id(dev: root_port);
671 if (port_devid < hisi_ptt->lower_bdf ||
672 port_devid > hisi_ptt->upper_bdf)
673 return 0;
674
675 /*
676 * We won't fail the probe if filter allocation failed here. The filters
677 * should be partial initialized and users would know which filter fails
678 * through the log. Other functions of PTT device are still available.
679 */
680 filter = hisi_ptt_alloc_add_filter(hisi_ptt, devid: pci_dev_id(dev: pdev),
681 is_port: pci_pcie_type(dev: pdev) == PCI_EXP_TYPE_ROOT_PORT);
682 if (!filter)
683 return -ENOMEM;
684
685 return 0;
686}
687
688static void hisi_ptt_release_filters(void *data)
689{
690 struct hisi_ptt_filter_desc *filter, *tmp;
691 struct hisi_ptt *hisi_ptt = data;
692
693 list_for_each_entry_safe(filter, tmp, &hisi_ptt->req_filters, list)
694 hisi_ptt_del_free_filter(hisi_ptt, filter);
695
696 list_for_each_entry_safe(filter, tmp, &hisi_ptt->port_filters, list)
697 hisi_ptt_del_free_filter(hisi_ptt, filter);
698}
699
700static int hisi_ptt_config_trace_buf(struct hisi_ptt *hisi_ptt)
701{
702 struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
703 struct device *dev = &hisi_ptt->pdev->dev;
704 int i;
705
706 ctrl->trace_buf = devm_kcalloc(dev, HISI_PTT_TRACE_BUF_CNT,
707 size: sizeof(*ctrl->trace_buf), GFP_KERNEL);
708 if (!ctrl->trace_buf)
709 return -ENOMEM;
710
711 for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; ++i) {
712 ctrl->trace_buf[i].addr = dmam_alloc_coherent(dev, HISI_PTT_TRACE_BUF_SIZE,
713 dma_handle: &ctrl->trace_buf[i].dma,
714 GFP_KERNEL);
715 if (!ctrl->trace_buf[i].addr)
716 return -ENOMEM;
717 }
718
719 /* Configure the trace DMA buffer */
720 for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++) {
721 writel(lower_32_bits(ctrl->trace_buf[i].dma),
722 addr: hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_LO_0 +
723 i * HISI_PTT_TRACE_ADDR_STRIDE);
724 writel(upper_32_bits(ctrl->trace_buf[i].dma),
725 addr: hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_HI_0 +
726 i * HISI_PTT_TRACE_ADDR_STRIDE);
727 }
728 writel(HISI_PTT_TRACE_BUF_SIZE, addr: hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_SIZE);
729
730 return 0;
731}
732
733static int hisi_ptt_init_ctrls(struct hisi_ptt *hisi_ptt)
734{
735 struct pci_dev *pdev = hisi_ptt->pdev;
736 struct pci_bus *bus;
737 int ret;
738 u32 reg;
739
740 INIT_DELAYED_WORK(&hisi_ptt->work, hisi_ptt_update_filters);
741 INIT_KFIFO(hisi_ptt->filter_update_kfifo);
742 spin_lock_init(&hisi_ptt->filter_update_lock);
743
744 INIT_LIST_HEAD(list: &hisi_ptt->port_filters);
745 INIT_LIST_HEAD(list: &hisi_ptt->req_filters);
746 mutex_init(&hisi_ptt->filter_lock);
747
748 ret = hisi_ptt_config_trace_buf(hisi_ptt);
749 if (ret)
750 return ret;
751
752 /*
753 * The device range register provides the information about the root
754 * ports which the RCiEP can control and trace. The RCiEP and the root
755 * ports which it supports are on the same PCIe core, with same domain
756 * number but maybe different bus number. The device range register
757 * will tell us which root ports we can support, Bit[31:16] indicates
758 * the upper BDF numbers of the root port, while Bit[15:0] indicates
759 * the lower.
760 */
761 reg = readl(addr: hisi_ptt->iobase + HISI_PTT_DEVICE_RANGE);
762 hisi_ptt->upper_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_UPPER, reg);
763 hisi_ptt->lower_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_LOWER, reg);
764
765 bus = pci_find_bus(domain: pci_domain_nr(bus: pdev->bus), PCI_BUS_NUM(hisi_ptt->upper_bdf));
766 if (bus)
767 pci_walk_bus(top: bus, cb: hisi_ptt_init_filters, userdata: hisi_ptt);
768
769 ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_release_filters, hisi_ptt);
770 if (ret)
771 return ret;
772
773 hisi_ptt->trace_ctrl.on_cpu = -1;
774 return 0;
775}
776
777static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
778 char *buf)
779{
780 struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
781 const cpumask_t *cpumask = cpumask_of_node(node: dev_to_node(dev: &hisi_ptt->pdev->dev));
782
783 return cpumap_print_to_pagebuf(list: true, buf, mask: cpumask);
784}
785static DEVICE_ATTR_RO(cpumask);
786
787static struct attribute *hisi_ptt_cpumask_attrs[] = {
788 &dev_attr_cpumask.attr,
789 NULL
790};
791
792static const struct attribute_group hisi_ptt_cpumask_attr_group = {
793 .attrs = hisi_ptt_cpumask_attrs,
794};
795
796/*
797 * Bit 19 indicates the filter type, 1 for Root Port filter and 0 for Requester
798 * filter. Bit[15:0] indicates the filter value, for Root Port filter it's
799 * a bit mask of desired ports and for Requester filter it's the Requester ID
800 * of the desired PCIe function. Bit[18:16] is reserved for extension.
801 *
802 * See hisi_ptt.rst documentation for detailed information.
803 */
804PMU_FORMAT_ATTR(filter, "config:0-19");
805PMU_FORMAT_ATTR(direction, "config:20-23");
806PMU_FORMAT_ATTR(type, "config:24-31");
807PMU_FORMAT_ATTR(format, "config:32-35");
808
809static struct attribute *hisi_ptt_pmu_format_attrs[] = {
810 &format_attr_filter.attr,
811 &format_attr_direction.attr,
812 &format_attr_type.attr,
813 &format_attr_format.attr,
814 NULL
815};
816
817static struct attribute_group hisi_ptt_pmu_format_group = {
818 .name = "format",
819 .attrs = hisi_ptt_pmu_format_attrs,
820};
821
822static ssize_t hisi_ptt_filter_multiselect_show(struct device *dev,
823 struct device_attribute *attr,
824 char *buf)
825{
826 struct dev_ext_attribute *ext_attr;
827
828 ext_attr = container_of(attr, struct dev_ext_attribute, attr);
829 return sysfs_emit(buf, fmt: "%s\n", (char *)ext_attr->var);
830}
831
832static struct dev_ext_attribute root_port_filters_multiselect = {
833 .attr = {
834 .attr = { .name = "multiselect", .mode = 0400 },
835 .show = hisi_ptt_filter_multiselect_show,
836 },
837 .var = "1",
838};
839
840static struct attribute *hisi_ptt_pmu_root_ports_attrs[] = {
841 &root_port_filters_multiselect.attr.attr,
842 NULL
843};
844
845static struct attribute_group hisi_ptt_pmu_root_ports_group = {
846 .name = HISI_PTT_RP_FILTERS_GRP_NAME,
847 .attrs = hisi_ptt_pmu_root_ports_attrs,
848};
849
850static struct dev_ext_attribute requester_filters_multiselect = {
851 .attr = {
852 .attr = { .name = "multiselect", .mode = 0400 },
853 .show = hisi_ptt_filter_multiselect_show,
854 },
855 .var = "0",
856};
857
858static struct attribute *hisi_ptt_pmu_requesters_attrs[] = {
859 &requester_filters_multiselect.attr.attr,
860 NULL
861};
862
863static struct attribute_group hisi_ptt_pmu_requesters_group = {
864 .name = HISI_PTT_REQ_FILTERS_GRP_NAME,
865 .attrs = hisi_ptt_pmu_requesters_attrs,
866};
867
868static const struct attribute_group *hisi_ptt_pmu_groups[] = {
869 &hisi_ptt_cpumask_attr_group,
870 &hisi_ptt_pmu_format_group,
871 &hisi_ptt_tune_group,
872 &hisi_ptt_pmu_root_ports_group,
873 &hisi_ptt_pmu_requesters_group,
874 NULL
875};
876
877static int hisi_ptt_trace_valid_direction(u32 val)
878{
879 /*
880 * The direction values have different effects according to the data
881 * format (specified in the parentheses). TLP set A/B means different
882 * set of TLP types. See hisi_ptt.rst documentation for more details.
883 */
884 static const u32 hisi_ptt_trace_available_direction[] = {
885 0, /* inbound(4DW) or reserved(8DW) */
886 1, /* outbound(4DW) */
887 2, /* {in, out}bound(4DW) or inbound(8DW), TLP set A */
888 3, /* {in, out}bound(4DW) or inbound(8DW), TLP set B */
889 };
890 int i;
891
892 for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_direction); i++) {
893 if (val == hisi_ptt_trace_available_direction[i])
894 return 0;
895 }
896
897 return -EINVAL;
898}
899
900static int hisi_ptt_trace_valid_type(u32 val)
901{
902 /* Different types can be set simultaneously */
903 static const u32 hisi_ptt_trace_available_type[] = {
904 1, /* posted_request */
905 2, /* non-posted_request */
906 4, /* completion */
907 };
908 int i;
909
910 if (!val)
911 return -EINVAL;
912
913 /*
914 * Walk the available list and clear the valid bits of
915 * the config. If there is any resident bit after the
916 * walk then the config is invalid.
917 */
918 for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_type); i++)
919 val &= ~hisi_ptt_trace_available_type[i];
920
921 if (val)
922 return -EINVAL;
923
924 return 0;
925}
926
927static int hisi_ptt_trace_valid_format(u32 val)
928{
929 static const u32 hisi_ptt_trace_availble_format[] = {
930 0, /* 4DW */
931 1, /* 8DW */
932 };
933 int i;
934
935 for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_availble_format); i++) {
936 if (val == hisi_ptt_trace_availble_format[i])
937 return 0;
938 }
939
940 return -EINVAL;
941}
942
943static int hisi_ptt_trace_valid_filter(struct hisi_ptt *hisi_ptt, u64 config)
944{
945 unsigned long val, port_mask = hisi_ptt->port_mask;
946 struct hisi_ptt_filter_desc *filter;
947 int ret = 0;
948
949 hisi_ptt->trace_ctrl.is_port = FIELD_GET(HISI_PTT_PMU_FILTER_IS_PORT, config);
950 val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, config);
951
952 /*
953 * Port filters are defined as bit mask. For port filters, check
954 * the bits in the @val are within the range of hisi_ptt->port_mask
955 * and whether it's empty or not, otherwise user has specified
956 * some unsupported root ports.
957 *
958 * For Requester ID filters, walk the available filter list to see
959 * whether we have one matched.
960 */
961 mutex_lock(&hisi_ptt->filter_lock);
962 if (!hisi_ptt->trace_ctrl.is_port) {
963 list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
964 if (val == hisi_ptt_get_filter_val(devid: filter->devid, is_port: filter->is_port))
965 goto out;
966 }
967 } else if (bitmap_subset(src1: &val, src2: &port_mask, BITS_PER_LONG)) {
968 goto out;
969 }
970
971 ret = -EINVAL;
972out:
973 mutex_unlock(lock: &hisi_ptt->filter_lock);
974 return ret;
975}
976
977static void hisi_ptt_pmu_init_configs(struct hisi_ptt *hisi_ptt, struct perf_event *event)
978{
979 struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
980 u32 val;
981
982 val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, event->attr.config);
983 hisi_ptt->trace_ctrl.filter = val;
984
985 val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
986 ctrl->direction = val;
987
988 val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
989 ctrl->type = val;
990
991 val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
992 ctrl->format = val;
993}
994
995static int hisi_ptt_pmu_event_init(struct perf_event *event)
996{
997 struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
998 int ret;
999 u32 val;
1000
1001 if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
1002 return -ENOENT;
1003
1004 if (event->cpu < 0) {
1005 dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
1006 return -EOPNOTSUPP;
1007 }
1008
1009 if (event->attach_state & PERF_ATTACH_TASK)
1010 return -EOPNOTSUPP;
1011
1012 ret = hisi_ptt_trace_valid_filter(hisi_ptt, config: event->attr.config);
1013 if (ret < 0)
1014 return ret;
1015
1016 val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
1017 ret = hisi_ptt_trace_valid_direction(val);
1018 if (ret < 0)
1019 return ret;
1020
1021 val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
1022 ret = hisi_ptt_trace_valid_type(val);
1023 if (ret < 0)
1024 return ret;
1025
1026 val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
1027 return hisi_ptt_trace_valid_format(val);
1028}
1029
1030static void *hisi_ptt_pmu_setup_aux(struct perf_event *event, void **pages,
1031 int nr_pages, bool overwrite)
1032{
1033 struct hisi_ptt_pmu_buf *buf;
1034 struct page **pagelist;
1035 int i;
1036
1037 if (overwrite) {
1038 dev_warn(event->pmu->dev, "Overwrite mode is not supported\n");
1039 return NULL;
1040 }
1041
1042 /* If the pages size less than buffers, we cannot start trace */
1043 if (nr_pages < HISI_PTT_TRACE_TOTAL_BUF_SIZE / PAGE_SIZE)
1044 return NULL;
1045
1046 buf = kzalloc(size: sizeof(*buf), GFP_KERNEL);
1047 if (!buf)
1048 return NULL;
1049
1050 pagelist = kcalloc(n: nr_pages, size: sizeof(*pagelist), GFP_KERNEL);
1051 if (!pagelist)
1052 goto err;
1053
1054 for (i = 0; i < nr_pages; i++)
1055 pagelist[i] = virt_to_page(pages[i]);
1056
1057 buf->base = vmap(pages: pagelist, count: nr_pages, VM_MAP, PAGE_KERNEL);
1058 if (!buf->base) {
1059 kfree(objp: pagelist);
1060 goto err;
1061 }
1062
1063 buf->nr_pages = nr_pages;
1064 buf->length = nr_pages * PAGE_SIZE;
1065 buf->pos = 0;
1066
1067 kfree(objp: pagelist);
1068 return buf;
1069err:
1070 kfree(objp: buf);
1071 return NULL;
1072}
1073
1074static void hisi_ptt_pmu_free_aux(void *aux)
1075{
1076 struct hisi_ptt_pmu_buf *buf = aux;
1077
1078 vunmap(addr: buf->base);
1079 kfree(objp: buf);
1080}
1081
1082static void hisi_ptt_pmu_start(struct perf_event *event, int flags)
1083{
1084 struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
1085 struct perf_output_handle *handle = &hisi_ptt->trace_ctrl.handle;
1086 struct hw_perf_event *hwc = &event->hw;
1087 struct device *dev = event->pmu->dev;
1088 struct hisi_ptt_pmu_buf *buf;
1089 int cpu = event->cpu;
1090 int ret;
1091
1092 hwc->state = 0;
1093
1094 /* Serialize the perf process if user specified several CPUs */
1095 spin_lock(lock: &hisi_ptt->pmu_lock);
1096 if (hisi_ptt->trace_ctrl.started) {
1097 dev_dbg(dev, "trace has already started\n");
1098 goto stop;
1099 }
1100
1101 /*
1102 * Handle the interrupt on the same cpu which starts the trace to avoid
1103 * context mismatch. Otherwise we'll trigger the WARN from the perf
1104 * core in event_function_local(). If CPU passed is offline we'll fail
1105 * here, just log it since we can do nothing here.
1106 */
1107 ret = irq_set_affinity(irq: hisi_ptt->trace_irq, cpumask_of(cpu));
1108 if (ret)
1109 dev_warn(dev, "failed to set the affinity of trace interrupt\n");
1110
1111 hisi_ptt->trace_ctrl.on_cpu = cpu;
1112
1113 buf = perf_aux_output_begin(handle, event);
1114 if (!buf) {
1115 dev_dbg(dev, "aux output begin failed\n");
1116 goto stop;
1117 }
1118
1119 buf->pos = handle->head % buf->length;
1120
1121 hisi_ptt_pmu_init_configs(hisi_ptt, event);
1122
1123 ret = hisi_ptt_trace_start(hisi_ptt);
1124 if (ret) {
1125 dev_dbg(dev, "trace start failed, ret = %d\n", ret);
1126 perf_aux_output_end(handle, size: 0);
1127 goto stop;
1128 }
1129
1130 spin_unlock(lock: &hisi_ptt->pmu_lock);
1131 return;
1132stop:
1133 event->hw.state |= PERF_HES_STOPPED;
1134 spin_unlock(lock: &hisi_ptt->pmu_lock);
1135}
1136
1137static void hisi_ptt_pmu_stop(struct perf_event *event, int flags)
1138{
1139 struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
1140 struct hw_perf_event *hwc = &event->hw;
1141
1142 if (hwc->state & PERF_HES_STOPPED)
1143 return;
1144
1145 spin_lock(lock: &hisi_ptt->pmu_lock);
1146 if (hisi_ptt->trace_ctrl.started) {
1147 hisi_ptt_trace_end(hisi_ptt);
1148
1149 if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt))
1150 dev_warn(event->pmu->dev, "Device is still busy\n");
1151
1152 hisi_ptt_update_aux(hisi_ptt, index: hisi_ptt->trace_ctrl.buf_index, stop: true);
1153 }
1154 spin_unlock(lock: &hisi_ptt->pmu_lock);
1155
1156 hwc->state |= PERF_HES_STOPPED;
1157 perf_event_update_userpage(event);
1158 hwc->state |= PERF_HES_UPTODATE;
1159}
1160
1161static int hisi_ptt_pmu_add(struct perf_event *event, int flags)
1162{
1163 struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
1164 struct hw_perf_event *hwc = &event->hw;
1165 int cpu = event->cpu;
1166
1167 /* Only allow the cpus on the device's node to add the event */
1168 if (!cpumask_test_cpu(cpu, cpumask: cpumask_of_node(node: dev_to_node(dev: &hisi_ptt->pdev->dev))))
1169 return 0;
1170
1171 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1172
1173 if (flags & PERF_EF_START) {
1174 hisi_ptt_pmu_start(event, PERF_EF_RELOAD);
1175 if (hwc->state & PERF_HES_STOPPED)
1176 return -EINVAL;
1177 }
1178
1179 return 0;
1180}
1181
1182static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
1183{
1184 hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
1185}
1186
1187static void hisi_ptt_pmu_read(struct perf_event *event)
1188{
1189}
1190
1191static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
1192{
1193 cpuhp_state_remove_instance_nocalls(state: hisi_ptt_pmu_online, node: hotplug_node);
1194}
1195
1196static void hisi_ptt_unregister_pmu(void *pmu)
1197{
1198 perf_pmu_unregister(pmu);
1199}
1200
1201static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
1202{
1203 u16 core_id, sicl_id;
1204 char *pmu_name;
1205 u32 reg;
1206 int ret;
1207
1208 ret = cpuhp_state_add_instance_nocalls(state: hisi_ptt_pmu_online,
1209 node: &hisi_ptt->hotplug_node);
1210 if (ret)
1211 return ret;
1212
1213 ret = devm_add_action_or_reset(&hisi_ptt->pdev->dev,
1214 hisi_ptt_remove_cpuhp_instance,
1215 &hisi_ptt->hotplug_node);
1216 if (ret)
1217 return ret;
1218
1219 mutex_init(&hisi_ptt->tune_lock);
1220 spin_lock_init(&hisi_ptt->pmu_lock);
1221
1222 hisi_ptt->hisi_ptt_pmu = (struct pmu) {
1223 .module = THIS_MODULE,
1224 .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_NO_EXCLUDE,
1225 .task_ctx_nr = perf_sw_context,
1226 .attr_groups = hisi_ptt_pmu_groups,
1227 .event_init = hisi_ptt_pmu_event_init,
1228 .setup_aux = hisi_ptt_pmu_setup_aux,
1229 .free_aux = hisi_ptt_pmu_free_aux,
1230 .start = hisi_ptt_pmu_start,
1231 .stop = hisi_ptt_pmu_stop,
1232 .add = hisi_ptt_pmu_add,
1233 .del = hisi_ptt_pmu_del,
1234 .read = hisi_ptt_pmu_read,
1235 };
1236
1237 reg = readl(addr: hisi_ptt->iobase + HISI_PTT_LOCATION);
1238 core_id = FIELD_GET(HISI_PTT_CORE_ID, reg);
1239 sicl_id = FIELD_GET(HISI_PTT_SICL_ID, reg);
1240
1241 pmu_name = devm_kasprintf(dev: &hisi_ptt->pdev->dev, GFP_KERNEL, fmt: "hisi_ptt%u_%u",
1242 sicl_id, core_id);
1243 if (!pmu_name)
1244 return -ENOMEM;
1245
1246 ret = perf_pmu_register(pmu: &hisi_ptt->hisi_ptt_pmu, name: pmu_name, type: -1);
1247 if (ret)
1248 return ret;
1249
1250 return devm_add_action_or_reset(&hisi_ptt->pdev->dev,
1251 hisi_ptt_unregister_pmu,
1252 &hisi_ptt->hisi_ptt_pmu);
1253}
1254
1255static void hisi_ptt_unregister_filter_update_notifier(void *data)
1256{
1257 struct hisi_ptt *hisi_ptt = data;
1258
1259 bus_unregister_notifier(bus: &pci_bus_type, nb: &hisi_ptt->hisi_ptt_nb);
1260
1261 /* Cancel any work that has been queued */
1262 cancel_delayed_work_sync(dwork: &hisi_ptt->work);
1263}
1264
1265/* Register the bus notifier for dynamically updating the filter list */
1266static int hisi_ptt_register_filter_update_notifier(struct hisi_ptt *hisi_ptt)
1267{
1268 int ret;
1269
1270 hisi_ptt->hisi_ptt_nb.notifier_call = hisi_ptt_notifier_call;
1271 ret = bus_register_notifier(bus: &pci_bus_type, nb: &hisi_ptt->hisi_ptt_nb);
1272 if (ret)
1273 return ret;
1274
1275 return devm_add_action_or_reset(&hisi_ptt->pdev->dev,
1276 hisi_ptt_unregister_filter_update_notifier,
1277 hisi_ptt);
1278}
1279
1280/*
1281 * The DMA of PTT trace can only use direct mappings due to some
1282 * hardware restriction. Check whether there is no IOMMU or the
1283 * policy of the IOMMU domain is passthrough, otherwise the trace
1284 * cannot work.
1285 *
1286 * The PTT device is supposed to behind an ARM SMMUv3, which
1287 * should have passthrough the device by a quirk.
1288 */
1289static int hisi_ptt_check_iommu_mapping(struct pci_dev *pdev)
1290{
1291 struct iommu_domain *iommu_domain;
1292
1293 iommu_domain = iommu_get_domain_for_dev(dev: &pdev->dev);
1294 if (!iommu_domain || iommu_domain->type == IOMMU_DOMAIN_IDENTITY)
1295 return 0;
1296
1297 return -EOPNOTSUPP;
1298}
1299
1300static int hisi_ptt_probe(struct pci_dev *pdev,
1301 const struct pci_device_id *id)
1302{
1303 struct hisi_ptt *hisi_ptt;
1304 int ret;
1305
1306 ret = hisi_ptt_check_iommu_mapping(pdev);
1307 if (ret) {
1308 pci_err(pdev, "requires direct DMA mappings\n");
1309 return ret;
1310 }
1311
1312 hisi_ptt = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hisi_ptt), GFP_KERNEL);
1313 if (!hisi_ptt)
1314 return -ENOMEM;
1315
1316 hisi_ptt->pdev = pdev;
1317 pci_set_drvdata(pdev, data: hisi_ptt);
1318
1319 ret = pcim_enable_device(pdev);
1320 if (ret) {
1321 pci_err(pdev, "failed to enable device, ret = %d\n", ret);
1322 return ret;
1323 }
1324
1325 ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
1326 if (ret) {
1327 pci_err(pdev, "failed to remap io memory, ret = %d\n", ret);
1328 return ret;
1329 }
1330
1331 hisi_ptt->iobase = pcim_iomap_table(pdev)[2];
1332
1333 ret = dma_set_coherent_mask(dev: &pdev->dev, DMA_BIT_MASK(64));
1334 if (ret) {
1335 pci_err(pdev, "failed to set 64 bit dma mask, ret = %d\n", ret);
1336 return ret;
1337 }
1338
1339 pci_set_master(dev: pdev);
1340
1341 ret = hisi_ptt_register_irq(hisi_ptt);
1342 if (ret)
1343 return ret;
1344
1345 ret = hisi_ptt_init_ctrls(hisi_ptt);
1346 if (ret) {
1347 pci_err(pdev, "failed to init controls, ret = %d\n", ret);
1348 return ret;
1349 }
1350
1351 ret = hisi_ptt_register_filter_update_notifier(hisi_ptt);
1352 if (ret)
1353 pci_warn(pdev, "failed to register filter update notifier, ret = %d", ret);
1354
1355 ret = hisi_ptt_register_pmu(hisi_ptt);
1356 if (ret) {
1357 pci_err(pdev, "failed to register PMU device, ret = %d", ret);
1358 return ret;
1359 }
1360
1361 ret = hisi_ptt_init_filter_attributes(hisi_ptt);
1362 if (ret) {
1363 pci_err(pdev, "failed to init sysfs filter attributes, ret = %d", ret);
1364 return ret;
1365 }
1366
1367 return 0;
1368}
1369
1370static const struct pci_device_id hisi_ptt_id_tbl[] = {
1371 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12e) },
1372 { }
1373};
1374MODULE_DEVICE_TABLE(pci, hisi_ptt_id_tbl);
1375
1376static struct pci_driver hisi_ptt_driver = {
1377 .name = DRV_NAME,
1378 .id_table = hisi_ptt_id_tbl,
1379 .probe = hisi_ptt_probe,
1380};
1381
1382static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1383{
1384 struct hisi_ptt *hisi_ptt;
1385 struct device *dev;
1386 int target, src;
1387
1388 hisi_ptt = hlist_entry_safe(node, struct hisi_ptt, hotplug_node);
1389 src = hisi_ptt->trace_ctrl.on_cpu;
1390 dev = hisi_ptt->hisi_ptt_pmu.dev;
1391
1392 if (!hisi_ptt->trace_ctrl.started || src != cpu)
1393 return 0;
1394
1395 target = cpumask_any_but(mask: cpumask_of_node(node: dev_to_node(dev: &hisi_ptt->pdev->dev)), cpu);
1396 if (target >= nr_cpu_ids) {
1397 dev_err(dev, "no available cpu for perf context migration\n");
1398 return 0;
1399 }
1400
1401 perf_pmu_migrate_context(pmu: &hisi_ptt->hisi_ptt_pmu, src_cpu: src, dst_cpu: target);
1402
1403 /*
1404 * Also make sure the interrupt bind to the migrated CPU as well. Warn
1405 * the user on failure here.
1406 */
1407 if (irq_set_affinity(irq: hisi_ptt->trace_irq, cpumask_of(target)))
1408 dev_warn(dev, "failed to set the affinity of trace interrupt\n");
1409
1410 hisi_ptt->trace_ctrl.on_cpu = target;
1411 return 0;
1412}
1413
1414static int __init hisi_ptt_init(void)
1415{
1416 int ret;
1417
1418 ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, DRV_NAME, NULL,
1419 teardown: hisi_ptt_cpu_teardown);
1420 if (ret < 0)
1421 return ret;
1422 hisi_ptt_pmu_online = ret;
1423
1424 ret = pci_register_driver(&hisi_ptt_driver);
1425 if (ret)
1426 cpuhp_remove_multi_state(state: hisi_ptt_pmu_online);
1427
1428 return ret;
1429}
1430module_init(hisi_ptt_init);
1431
1432static void __exit hisi_ptt_exit(void)
1433{
1434 pci_unregister_driver(dev: &hisi_ptt_driver);
1435 cpuhp_remove_multi_state(state: hisi_ptt_pmu_online);
1436}
1437module_exit(hisi_ptt_exit);
1438
1439MODULE_LICENSE("GPL");
1440MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
1441MODULE_DESCRIPTION("Driver for HiSilicon PCIe tune and trace device");
1442

source code of linux/drivers/hwtracing/ptt/hisi_ptt.c