1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * PCIe host controller driver for Xilinx XDMA PCIe Bridge
4 *
5 * Copyright (C) 2023 Xilinx, Inc. All rights reserved.
6 */
7#include <linux/bitfield.h>
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/irqdomain.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/msi.h>
14#include <linux/of_address.h>
15#include <linux/of_pci.h>
16
17#include "../pci.h"
18#include "pcie-xilinx-common.h"
19
20/* Register definitions */
21#define XILINX_PCIE_DMA_REG_IDR 0x00000138
22#define XILINX_PCIE_DMA_REG_IMR 0x0000013c
23#define XILINX_PCIE_DMA_REG_PSCR 0x00000144
24#define XILINX_PCIE_DMA_REG_RPSC 0x00000148
25#define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c
26#define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150
27#define XILINX_PCIE_DMA_REG_RPEFR 0x00000154
28#define XILINX_PCIE_DMA_REG_IDRN 0x00000160
29#define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164
30#define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170
31#define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174
32#define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178
33#define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c
34
35#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
36
37#define XILINX_PCIE_INTR_IMR_ALL_MASK \
38 ( \
39 IMR(LINK_DOWN) | \
40 IMR(HOT_RESET) | \
41 IMR(CFG_TIMEOUT) | \
42 IMR(CORRECTABLE) | \
43 IMR(NONFATAL) | \
44 IMR(FATAL) | \
45 IMR(INTX) | \
46 IMR(MSI) | \
47 IMR(SLV_UNSUPP) | \
48 IMR(SLV_UNEXP) | \
49 IMR(SLV_COMPL) | \
50 IMR(SLV_ERRP) | \
51 IMR(SLV_CMPABT) | \
52 IMR(SLV_ILLBUR) | \
53 IMR(MST_DECERR) | \
54 IMR(MST_SLVERR) | \
55 )
56
57#define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9
58#define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff
59#define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16)
60
61/* Root Port Error Register definitions */
62#define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18)
63#define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0)
64#define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff
65
66/* Root Port Interrupt Register definitions */
67#define XILINX_PCIE_DMA_IDRN_SHIFT 16
68
69/* Root Port Status/control Register definitions */
70#define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0)
71
72/* Phy Status/Control Register definitions */
73#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
74
75/* Number of MSI IRQs */
76#define XILINX_NUM_MSI_IRQS 64
77
78struct xilinx_msi {
79 struct irq_domain *msi_domain;
80 unsigned long *bitmap;
81 struct irq_domain *dev_domain;
82 struct mutex lock; /* Protect bitmap variable */
83 int irq_msi0;
84 int irq_msi1;
85};
86
87/**
88 * struct pl_dma_pcie - PCIe port information
89 * @dev: Device pointer
90 * @reg_base: IO Mapped Register Base
91 * @irq: Interrupt number
92 * @cfg: Holds mappings of config space window
93 * @phys_reg_base: Physical address of reg base
94 * @intx_domain: Legacy IRQ domain pointer
95 * @pldma_domain: PL DMA IRQ domain pointer
96 * @resources: Bus Resources
97 * @msi: MSI information
98 * @intx_irq: INTx error interrupt number
99 * @lock: Lock protecting shared register access
100 */
101struct pl_dma_pcie {
102 struct device *dev;
103 void __iomem *reg_base;
104 int irq;
105 struct pci_config_window *cfg;
106 phys_addr_t phys_reg_base;
107 struct irq_domain *intx_domain;
108 struct irq_domain *pldma_domain;
109 struct list_head resources;
110 struct xilinx_msi msi;
111 int intx_irq;
112 raw_spinlock_t lock;
113};
114
115static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
116{
117 return readl(addr: port->reg_base + reg);
118}
119
120static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
121{
122 writel(val, addr: port->reg_base + reg);
123}
124
125static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
126{
127 return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
128 XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? true : false;
129}
130
131static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port)
132{
133 unsigned long val = pcie_read(port, XILINX_PCIE_DMA_REG_RPEFR);
134
135 if (val & XILINX_PCIE_DMA_RPEFR_ERR_VALID) {
136 dev_dbg(port->dev, "Requester ID %lu\n",
137 val & XILINX_PCIE_DMA_RPEFR_REQ_ID);
138 pcie_write(port, XILINX_PCIE_DMA_RPEFR_ALL_MASK,
139 XILINX_PCIE_DMA_REG_RPEFR);
140 }
141}
142
143static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
144 unsigned int devfn)
145{
146 struct pl_dma_pcie *port = bus->sysdata;
147
148 if (!pci_is_root_bus(pbus: bus)) {
149 /*
150 * Checking whether the link is up is the last line of
151 * defense, and this check is inherently racy by definition.
152 * Sending a PIO request to a downstream device when the link is
153 * down causes an unrecoverable error, and a reset of the entire
154 * PCIe controller will be needed. We can reduce the likelihood
155 * of that unrecoverable error by checking whether the link is
156 * up, but we can't completely prevent it because the link may
157 * go down between the link-up check and the PIO request.
158 */
159 if (!xilinx_pl_dma_pcie_link_up(port))
160 return false;
161 } else if (devfn > 0)
162 /* Only one device down on each root port */
163 return false;
164
165 return true;
166}
167
168static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
169 unsigned int devfn, int where)
170{
171 struct pl_dma_pcie *port = bus->sysdata;
172
173 if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
174 return NULL;
175
176 return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
177}
178
179/* PCIe operations */
180static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = {
181 .pci_ops = {
182 .map_bus = xilinx_pl_dma_pcie_map_bus,
183 .read = pci_generic_config_read,
184 .write = pci_generic_config_write,
185 }
186};
187
188static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie *port)
189{
190 phys_addr_t msi_addr = port->phys_reg_base;
191
192 pcie_write(port, upper_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE1);
193 pcie_write(port, lower_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE2);
194}
195
196static void xilinx_mask_intx_irq(struct irq_data *data)
197{
198 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d: data);
199 unsigned long flags;
200 u32 mask, val;
201
202 mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
203 raw_spin_lock_irqsave(&port->lock, flags);
204 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
205 pcie_write(port, val: (val & (~mask)), XILINX_PCIE_DMA_REG_IDRN_MASK);
206 raw_spin_unlock_irqrestore(&port->lock, flags);
207}
208
209static void xilinx_unmask_intx_irq(struct irq_data *data)
210{
211 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d: data);
212 unsigned long flags;
213 u32 mask, val;
214
215 mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
216 raw_spin_lock_irqsave(&port->lock, flags);
217 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
218 pcie_write(port, val: (val | mask), XILINX_PCIE_DMA_REG_IDRN_MASK);
219 raw_spin_unlock_irqrestore(&port->lock, flags);
220}
221
222static struct irq_chip xilinx_leg_irq_chip = {
223 .name = "pl_dma:INTx",
224 .irq_mask = xilinx_mask_intx_irq,
225 .irq_unmask = xilinx_unmask_intx_irq,
226};
227
228static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain,
229 unsigned int irq, irq_hw_number_t hwirq)
230{
231 irq_set_chip_and_handler(irq, chip: &xilinx_leg_irq_chip, handle: handle_level_irq);
232 irq_set_chip_data(irq, data: domain->host_data);
233 irq_set_status_flags(irq, set: IRQ_LEVEL);
234
235 return 0;
236}
237
238/* INTx IRQ Domain operations */
239static const struct irq_domain_ops intx_domain_ops = {
240 .map = xilinx_pl_dma_pcie_intx_map,
241};
242
243static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args)
244{
245 struct xilinx_msi *msi;
246 unsigned long status;
247 u32 bit, virq;
248 struct pl_dma_pcie *port = args;
249
250 msi = &port->msi;
251
252 while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) {
253 for_each_set_bit(bit, &status, 32) {
254 pcie_write(port, val: 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI);
255 bit = bit + 32;
256 virq = irq_find_mapping(domain: msi->dev_domain, hwirq: bit);
257 if (virq)
258 generic_handle_irq(irq: virq);
259 }
260 }
261
262 return IRQ_HANDLED;
263}
264
265static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args)
266{
267 struct pl_dma_pcie *port = args;
268 struct xilinx_msi *msi;
269 unsigned long status;
270 u32 bit, virq;
271
272 msi = &port->msi;
273
274 while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) {
275 for_each_set_bit(bit, &status, 32) {
276 pcie_write(port, val: 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW);
277 virq = irq_find_mapping(domain: msi->dev_domain, hwirq: bit);
278 if (virq)
279 generic_handle_irq(irq: virq);
280 }
281 }
282
283 return IRQ_HANDLED;
284}
285
286static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args)
287{
288 struct pl_dma_pcie *port = args;
289 unsigned long val;
290 int i;
291
292 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR);
293 val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
294 for_each_set_bit(i, &val, 32)
295 generic_handle_domain_irq(domain: port->pldma_domain, hwirq: i);
296
297 pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR);
298
299 return IRQ_HANDLED;
300}
301
302#define _IC(x, s) \
303 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
304
305static const struct {
306 const char *sym;
307 const char *str;
308} intr_cause[32] = {
309 _IC(LINK_DOWN, "Link Down"),
310 _IC(HOT_RESET, "Hot reset"),
311 _IC(CFG_TIMEOUT, "ECAM access timeout"),
312 _IC(CORRECTABLE, "Correctable error message"),
313 _IC(NONFATAL, "Non fatal error message"),
314 _IC(FATAL, "Fatal error message"),
315 _IC(SLV_UNSUPP, "Slave unsupported request"),
316 _IC(SLV_UNEXP, "Slave unexpected completion"),
317 _IC(SLV_COMPL, "Slave completion timeout"),
318 _IC(SLV_ERRP, "Slave Error Poison"),
319 _IC(SLV_CMPABT, "Slave Completer Abort"),
320 _IC(SLV_ILLBUR, "Slave Illegal Burst"),
321 _IC(MST_DECERR, "Master decode error"),
322 _IC(MST_SLVERR, "Master slave error"),
323};
324
325static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
326{
327 struct pl_dma_pcie *port = (struct pl_dma_pcie *)dev_id;
328 struct device *dev = port->dev;
329 struct irq_data *d;
330
331 d = irq_domain_get_irq_data(domain: port->pldma_domain, virq: irq);
332 switch (d->hwirq) {
333 case XILINX_PCIE_INTR_CORRECTABLE:
334 case XILINX_PCIE_INTR_NONFATAL:
335 case XILINX_PCIE_INTR_FATAL:
336 xilinx_pl_dma_pcie_clear_err_interrupts(port);
337 fallthrough;
338
339 default:
340 if (intr_cause[d->hwirq].str)
341 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
342 else
343 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
344 }
345
346 return IRQ_HANDLED;
347}
348
349static struct irq_chip xilinx_msi_irq_chip = {
350 .name = "pl_dma:PCIe MSI",
351 .irq_enable = pci_msi_unmask_irq,
352 .irq_disable = pci_msi_mask_irq,
353 .irq_mask = pci_msi_mask_irq,
354 .irq_unmask = pci_msi_unmask_irq,
355};
356
357static struct msi_domain_info xilinx_msi_domain_info = {
358 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
359 MSI_FLAG_MULTI_PCI_MSI),
360 .chip = &xilinx_msi_irq_chip,
361};
362
363static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
364{
365 struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(d: data);
366 phys_addr_t msi_addr = pcie->phys_reg_base;
367
368 msg->address_lo = lower_32_bits(msi_addr);
369 msg->address_hi = upper_32_bits(msi_addr);
370 msg->data = data->hwirq;
371}
372
373static int xilinx_msi_set_affinity(struct irq_data *irq_data,
374 const struct cpumask *mask, bool force)
375{
376 return -EINVAL;
377}
378
379static struct irq_chip xilinx_irq_chip = {
380 .name = "pl_dma:MSI",
381 .irq_compose_msi_msg = xilinx_compose_msi_msg,
382 .irq_set_affinity = xilinx_msi_set_affinity,
383};
384
385static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
386 unsigned int nr_irqs, void *args)
387{
388 struct pl_dma_pcie *pcie = domain->host_data;
389 struct xilinx_msi *msi = &pcie->msi;
390 int bit, i;
391
392 mutex_lock(&msi->lock);
393 bit = bitmap_find_free_region(bitmap: msi->bitmap, XILINX_NUM_MSI_IRQS,
394 order: get_count_order(count: nr_irqs));
395 if (bit < 0) {
396 mutex_unlock(lock: &msi->lock);
397 return -ENOSPC;
398 }
399
400 for (i = 0; i < nr_irqs; i++) {
401 irq_domain_set_info(domain, virq: virq + i, hwirq: bit + i, chip: &xilinx_irq_chip,
402 chip_data: domain->host_data, handler: handle_simple_irq,
403 NULL, NULL);
404 }
405 mutex_unlock(lock: &msi->lock);
406
407 return 0;
408}
409
410static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
411 unsigned int nr_irqs)
412{
413 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
414 struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(d: data);
415 struct xilinx_msi *msi = &pcie->msi;
416
417 mutex_lock(&msi->lock);
418 bitmap_release_region(bitmap: msi->bitmap, pos: data->hwirq,
419 order: get_count_order(count: nr_irqs));
420 mutex_unlock(lock: &msi->lock);
421}
422
423static const struct irq_domain_ops dev_msi_domain_ops = {
424 .alloc = xilinx_irq_domain_alloc,
425 .free = xilinx_irq_domain_free,
426};
427
428static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
429{
430 struct xilinx_msi *msi = &port->msi;
431
432 if (port->intx_domain) {
433 irq_domain_remove(host: port->intx_domain);
434 port->intx_domain = NULL;
435 }
436
437 if (msi->dev_domain) {
438 irq_domain_remove(host: msi->dev_domain);
439 msi->dev_domain = NULL;
440 }
441
442 if (msi->msi_domain) {
443 irq_domain_remove(host: msi->msi_domain);
444 msi->msi_domain = NULL;
445 }
446}
447
448static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
449{
450 struct device *dev = port->dev;
451 struct xilinx_msi *msi = &port->msi;
452 int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
453 struct fwnode_handle *fwnode = of_node_to_fwnode(node: port->dev->of_node);
454
455 msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
456 ops: &dev_msi_domain_ops, host_data: port);
457 if (!msi->dev_domain)
458 goto out;
459
460 msi->msi_domain = pci_msi_create_irq_domain(fwnode,
461 info: &xilinx_msi_domain_info,
462 parent: msi->dev_domain);
463 if (!msi->msi_domain)
464 goto out;
465
466 mutex_init(&msi->lock);
467 msi->bitmap = kzalloc(size, GFP_KERNEL);
468 if (!msi->bitmap)
469 goto out;
470
471 raw_spin_lock_init(&port->lock);
472 xilinx_pl_dma_pcie_enable_msi(port);
473
474 return 0;
475
476out:
477 xilinx_pl_dma_pcie_free_irq_domains(port);
478 dev_err(dev, "Failed to allocate MSI IRQ domains\n");
479
480 return -ENOMEM;
481}
482
483/*
484 * INTx error interrupts are Xilinx controller specific interrupt, used to
485 * notify user about errors such as cfg timeout, slave unsupported requests,
486 * fatal and non fatal error etc.
487 */
488
489static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args)
490{
491 unsigned long val;
492 int i;
493 struct pl_dma_pcie *port = args;
494
495 val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK,
496 pcie_read(port, XILINX_PCIE_DMA_REG_IDRN));
497
498 for_each_set_bit(i, &val, PCI_NUM_INTX)
499 generic_handle_domain_irq(domain: port->intx_domain, hwirq: i);
500 return IRQ_HANDLED;
501}
502
503static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d)
504{
505 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
506 u32 val;
507
508 raw_spin_lock(&port->lock);
509 val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
510 val &= ~BIT(d->hwirq);
511 pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
512 raw_spin_unlock(&port->lock);
513}
514
515static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d)
516{
517 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
518 u32 val;
519
520 raw_spin_lock(&port->lock);
521 val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
522 val |= BIT(d->hwirq);
523 pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
524 raw_spin_unlock(&port->lock);
525}
526
527static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = {
528 .name = "pl_dma:RC-Event",
529 .irq_mask = xilinx_pl_dma_pcie_mask_event_irq,
530 .irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq,
531};
532
533static int xilinx_pl_dma_pcie_event_map(struct irq_domain *domain,
534 unsigned int irq, irq_hw_number_t hwirq)
535{
536 irq_set_chip_and_handler(irq, chip: &xilinx_pl_dma_pcie_event_irq_chip,
537 handle: handle_level_irq);
538 irq_set_chip_data(irq, data: domain->host_data);
539 irq_set_status_flags(irq, set: IRQ_LEVEL);
540
541 return 0;
542}
543
544static const struct irq_domain_ops event_domain_ops = {
545 .map = xilinx_pl_dma_pcie_event_map,
546};
547
548/**
549 * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain
550 * @port: PCIe port information
551 *
552 * Return: '0' on success and error value on failure.
553 */
554static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
555{
556 struct device *dev = port->dev;
557 struct device_node *node = dev->of_node;
558 struct device_node *pcie_intc_node;
559 int ret;
560
561 /* Setup INTx */
562 pcie_intc_node = of_get_child_by_name(node, name: "interrupt-controller");
563 if (!pcie_intc_node) {
564 dev_err(dev, "No PCIe Intc node found\n");
565 return -EINVAL;
566 }
567
568 port->pldma_domain = irq_domain_add_linear(of_node: pcie_intc_node, size: 32,
569 ops: &event_domain_ops, host_data: port);
570 if (!port->pldma_domain)
571 return -ENOMEM;
572
573 irq_domain_update_bus_token(domain: port->pldma_domain, bus_token: DOMAIN_BUS_NEXUS);
574
575 port->intx_domain = irq_domain_add_linear(of_node: pcie_intc_node, PCI_NUM_INTX,
576 ops: &intx_domain_ops, host_data: port);
577 if (!port->intx_domain) {
578 dev_err(dev, "Failed to get a INTx IRQ domain\n");
579 return -ENOMEM;
580 }
581
582 irq_domain_update_bus_token(domain: port->intx_domain, bus_token: DOMAIN_BUS_WIRED);
583
584 ret = xilinx_pl_dma_pcie_init_msi_irq_domain(port);
585 if (ret != 0) {
586 irq_domain_remove(host: port->intx_domain);
587 return -ENOMEM;
588 }
589
590 of_node_put(node: pcie_intc_node);
591 raw_spin_lock_init(&port->lock);
592
593 return 0;
594}
595
596static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
597{
598 struct device *dev = port->dev;
599 struct platform_device *pdev = to_platform_device(dev);
600 int i, irq, err;
601
602 port->irq = platform_get_irq(pdev, 0);
603 if (port->irq < 0)
604 return port->irq;
605
606 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
607 int err;
608
609 if (!intr_cause[i].str)
610 continue;
611
612 irq = irq_create_mapping(host: port->pldma_domain, hwirq: i);
613 if (!irq) {
614 dev_err(dev, "Failed to map interrupt\n");
615 return -ENXIO;
616 }
617
618 err = devm_request_irq(dev, irq,
619 handler: xilinx_pl_dma_pcie_intr_handler,
620 IRQF_SHARED | IRQF_NO_THREAD,
621 devname: intr_cause[i].sym, dev_id: port);
622 if (err) {
623 dev_err(dev, "Failed to request IRQ %d\n", irq);
624 return err;
625 }
626 }
627
628 port->intx_irq = irq_create_mapping(host: port->pldma_domain,
629 XILINX_PCIE_INTR_INTX);
630 if (!port->intx_irq) {
631 dev_err(dev, "Failed to map INTx interrupt\n");
632 return -ENXIO;
633 }
634
635 err = devm_request_irq(dev, irq: port->intx_irq, handler: xilinx_pl_dma_pcie_intx_flow,
636 IRQF_SHARED | IRQF_NO_THREAD, NULL, dev_id: port);
637 if (err) {
638 dev_err(dev, "Failed to request INTx IRQ %d\n", port->intx_irq);
639 return err;
640 }
641
642 err = devm_request_irq(dev, irq: port->irq, handler: xilinx_pl_dma_pcie_event_flow,
643 IRQF_SHARED | IRQF_NO_THREAD, NULL, dev_id: port);
644 if (err) {
645 dev_err(dev, "Failed to request event IRQ %d\n", port->irq);
646 return err;
647 }
648
649 return 0;
650}
651
652static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port)
653{
654 if (xilinx_pl_dma_pcie_link_up(port))
655 dev_info(port->dev, "PCIe Link is UP\n");
656 else
657 dev_info(port->dev, "PCIe Link is DOWN\n");
658
659 /* Disable all interrupts */
660 pcie_write(port, val: ~XILINX_PCIE_DMA_IDR_ALL_MASK,
661 XILINX_PCIE_DMA_REG_IMR);
662
663 /* Clear pending interrupts */
664 pcie_write(port, val: pcie_read(port, XILINX_PCIE_DMA_REG_IDR) &
665 XILINX_PCIE_DMA_IMR_ALL_MASK,
666 XILINX_PCIE_DMA_REG_IDR);
667
668 /* Needed for MSI DECODE MODE */
669 pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
670 XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
671 pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
672 XILINX_PCIE_DMA_REG_MSI_HI_MASK);
673
674 /* Set the Bridge enable bit */
675 pcie_write(port, val: pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) |
676 XILINX_PCIE_DMA_REG_RPSC_BEN,
677 XILINX_PCIE_DMA_REG_RPSC);
678}
679
680static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
681{
682 struct device *dev = port->dev;
683 struct platform_device *pdev = to_platform_device(dev);
684 int ret;
685
686 port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
687 if (port->msi.irq_msi0 <= 0)
688 return port->msi.irq_msi0;
689
690 ret = devm_request_irq(dev, irq: port->msi.irq_msi0, handler: xilinx_pl_dma_pcie_msi_handler_low,
691 IRQF_SHARED | IRQF_NO_THREAD, devname: "xlnx-pcie-dma-pl",
692 dev_id: port);
693 if (ret) {
694 dev_err(dev, "Failed to register interrupt\n");
695 return ret;
696 }
697
698 port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
699 if (port->msi.irq_msi1 <= 0)
700 return port->msi.irq_msi1;
701
702 ret = devm_request_irq(dev, irq: port->msi.irq_msi1, handler: xilinx_pl_dma_pcie_msi_handler_high,
703 IRQF_SHARED | IRQF_NO_THREAD, devname: "xlnx-pcie-dma-pl",
704 dev_id: port);
705 if (ret) {
706 dev_err(dev, "Failed to register interrupt\n");
707 return ret;
708 }
709
710 return 0;
711}
712
713static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
714 struct resource *bus_range)
715{
716 struct device *dev = port->dev;
717 struct platform_device *pdev = to_platform_device(dev);
718 struct resource *res;
719 int err;
720
721 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
722 if (!res) {
723 dev_err(dev, "Missing \"reg\" property\n");
724 return -ENXIO;
725 }
726 port->phys_reg_base = res->start;
727
728 port->cfg = pci_ecam_create(dev, cfgres: res, busr: bus_range, ops: &xilinx_pl_dma_pcie_ops);
729 if (IS_ERR(ptr: port->cfg))
730 return PTR_ERR(ptr: port->cfg);
731
732 port->reg_base = port->cfg->win;
733
734 err = xilinx_request_msi_irq(port);
735 if (err) {
736 pci_ecam_free(cfg: port->cfg);
737 return err;
738 }
739
740 return 0;
741}
742
743static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
744{
745 struct device *dev = &pdev->dev;
746 struct pl_dma_pcie *port;
747 struct pci_host_bridge *bridge;
748 struct resource_entry *bus;
749 int err;
750
751 bridge = devm_pci_alloc_host_bridge(dev, priv: sizeof(*port));
752 if (!bridge)
753 return -ENODEV;
754
755 port = pci_host_bridge_priv(bridge);
756
757 port->dev = dev;
758
759 bus = resource_list_first_type(list: &bridge->windows, IORESOURCE_BUS);
760 if (!bus)
761 return -ENODEV;
762
763 err = xilinx_pl_dma_pcie_parse_dt(port, bus_range: bus->res);
764 if (err) {
765 dev_err(dev, "Parsing DT failed\n");
766 return err;
767 }
768
769 xilinx_pl_dma_pcie_init_port(port);
770
771 err = xilinx_pl_dma_pcie_init_irq_domain(port);
772 if (err)
773 goto err_irq_domain;
774
775 err = xilinx_pl_dma_pcie_setup_irq(port);
776
777 bridge->sysdata = port;
778 bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops;
779
780 err = pci_host_probe(bridge);
781 if (err < 0)
782 goto err_host_bridge;
783
784 return 0;
785
786err_host_bridge:
787 xilinx_pl_dma_pcie_free_irq_domains(port);
788
789err_irq_domain:
790 pci_ecam_free(cfg: port->cfg);
791 return err;
792}
793
794static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
795 {
796 .compatible = "xlnx,xdma-host-3.00",
797 },
798 {}
799};
800
801static struct platform_driver xilinx_pl_dma_pcie_driver = {
802 .driver = {
803 .name = "xilinx-xdma-pcie",
804 .of_match_table = xilinx_pl_dma_pcie_of_match,
805 .suppress_bind_attrs = true,
806 },
807 .probe = xilinx_pl_dma_pcie_probe,
808};
809
810builtin_platform_driver(xilinx_pl_dma_pcie_driver);
811

source code of linux/drivers/pci/controller/pcie-xilinx-dma-pl.c