1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * PCIe host controller driver for Xilinx XDMA PCIe Bridge |
4 | * |
5 | * Copyright (C) 2023 Xilinx, Inc. All rights reserved. |
6 | */ |
7 | #include <linux/bitfield.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/irq.h> |
10 | #include <linux/irqdomain.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> |
13 | #include <linux/msi.h> |
14 | #include <linux/of_address.h> |
15 | #include <linux/of_pci.h> |
16 | |
17 | #include "../pci.h" |
18 | #include "pcie-xilinx-common.h" |
19 | |
20 | /* Register definitions */ |
21 | #define XILINX_PCIE_DMA_REG_IDR 0x00000138 |
22 | #define XILINX_PCIE_DMA_REG_IMR 0x0000013c |
23 | #define XILINX_PCIE_DMA_REG_PSCR 0x00000144 |
24 | #define XILINX_PCIE_DMA_REG_RPSC 0x00000148 |
25 | #define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c |
26 | #define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150 |
27 | #define XILINX_PCIE_DMA_REG_RPEFR 0x00000154 |
28 | #define XILINX_PCIE_DMA_REG_IDRN 0x00000160 |
29 | #define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164 |
30 | #define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170 |
31 | #define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174 |
32 | #define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178 |
33 | #define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c |
34 | |
35 | #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x) |
36 | |
37 | #define XILINX_PCIE_INTR_IMR_ALL_MASK \ |
38 | ( \ |
39 | IMR(LINK_DOWN) | \ |
40 | IMR(HOT_RESET) | \ |
41 | IMR(CFG_TIMEOUT) | \ |
42 | IMR(CORRECTABLE) | \ |
43 | IMR(NONFATAL) | \ |
44 | IMR(FATAL) | \ |
45 | IMR(INTX) | \ |
46 | IMR(MSI) | \ |
47 | IMR(SLV_UNSUPP) | \ |
48 | IMR(SLV_UNEXP) | \ |
49 | IMR(SLV_COMPL) | \ |
50 | IMR(SLV_ERRP) | \ |
51 | IMR(SLV_CMPABT) | \ |
52 | IMR(SLV_ILLBUR) | \ |
53 | IMR(MST_DECERR) | \ |
54 | IMR(MST_SLVERR) | \ |
55 | ) |
56 | |
57 | #define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9 |
58 | #define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff |
59 | #define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16) |
60 | |
61 | /* Root Port Error Register definitions */ |
62 | #define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18) |
63 | #define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0) |
64 | #define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff |
65 | |
66 | /* Root Port Interrupt Register definitions */ |
67 | #define XILINX_PCIE_DMA_IDRN_SHIFT 16 |
68 | |
69 | /* Root Port Status/control Register definitions */ |
70 | #define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0) |
71 | |
72 | /* Phy Status/Control Register definitions */ |
73 | #define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11) |
74 | #define QDMA_BRIDGE_BASE_OFF 0xcd8 |
75 | |
76 | /* Number of MSI IRQs */ |
77 | #define XILINX_NUM_MSI_IRQS 64 |
78 | |
79 | enum xilinx_pl_dma_version { |
80 | XDMA, |
81 | QDMA, |
82 | }; |
83 | |
84 | /** |
85 | * struct xilinx_pl_dma_variant - PL DMA PCIe variant information |
86 | * @version: DMA version |
87 | */ |
88 | struct xilinx_pl_dma_variant { |
89 | enum xilinx_pl_dma_version version; |
90 | }; |
91 | |
92 | struct xilinx_msi { |
93 | struct irq_domain *msi_domain; |
94 | unsigned long *bitmap; |
95 | struct irq_domain *dev_domain; |
96 | struct mutex lock; /* Protect bitmap variable */ |
97 | int irq_msi0; |
98 | int irq_msi1; |
99 | }; |
100 | |
101 | /** |
102 | * struct pl_dma_pcie - PCIe port information |
103 | * @dev: Device pointer |
104 | * @reg_base: IO Mapped Register Base |
105 | * @cfg_base: IO Mapped Configuration Base |
106 | * @irq: Interrupt number |
107 | * @cfg: Holds mappings of config space window |
108 | * @phys_reg_base: Physical address of reg base |
109 | * @intx_domain: Legacy IRQ domain pointer |
110 | * @pldma_domain: PL DMA IRQ domain pointer |
111 | * @resources: Bus Resources |
112 | * @msi: MSI information |
113 | * @intx_irq: INTx error interrupt number |
114 | * @lock: Lock protecting shared register access |
115 | * @variant: PL DMA PCIe version check pointer |
116 | */ |
117 | struct pl_dma_pcie { |
118 | struct device *dev; |
119 | void __iomem *reg_base; |
120 | void __iomem *cfg_base; |
121 | int irq; |
122 | struct pci_config_window *cfg; |
123 | phys_addr_t phys_reg_base; |
124 | struct irq_domain *intx_domain; |
125 | struct irq_domain *pldma_domain; |
126 | struct list_head resources; |
127 | struct xilinx_msi msi; |
128 | int intx_irq; |
129 | raw_spinlock_t lock; |
130 | const struct xilinx_pl_dma_variant *variant; |
131 | }; |
132 | |
133 | static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg) |
134 | { |
135 | if (port->variant->version == QDMA) |
136 | return readl(addr: port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); |
137 | |
138 | return readl(addr: port->reg_base + reg); |
139 | } |
140 | |
141 | static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg) |
142 | { |
143 | if (port->variant->version == QDMA) |
144 | writel(val, addr: port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); |
145 | else |
146 | writel(val, addr: port->reg_base + reg); |
147 | } |
148 | |
149 | static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port) |
150 | { |
151 | return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) & |
152 | XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? true : false; |
153 | } |
154 | |
155 | static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port) |
156 | { |
157 | unsigned long val = pcie_read(port, XILINX_PCIE_DMA_REG_RPEFR); |
158 | |
159 | if (val & XILINX_PCIE_DMA_RPEFR_ERR_VALID) { |
160 | dev_dbg(port->dev, "Requester ID %lu\n" , |
161 | val & XILINX_PCIE_DMA_RPEFR_REQ_ID); |
162 | pcie_write(port, XILINX_PCIE_DMA_RPEFR_ALL_MASK, |
163 | XILINX_PCIE_DMA_REG_RPEFR); |
164 | } |
165 | } |
166 | |
167 | static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus, |
168 | unsigned int devfn) |
169 | { |
170 | struct pl_dma_pcie *port = bus->sysdata; |
171 | |
172 | if (!pci_is_root_bus(pbus: bus)) { |
173 | /* |
174 | * Checking whether the link is up is the last line of |
175 | * defense, and this check is inherently racy by definition. |
176 | * Sending a PIO request to a downstream device when the link is |
177 | * down causes an unrecoverable error, and a reset of the entire |
178 | * PCIe controller will be needed. We can reduce the likelihood |
179 | * of that unrecoverable error by checking whether the link is |
180 | * up, but we can't completely prevent it because the link may |
181 | * go down between the link-up check and the PIO request. |
182 | */ |
183 | if (!xilinx_pl_dma_pcie_link_up(port)) |
184 | return false; |
185 | } else if (devfn > 0) |
186 | /* Only one device down on each root port */ |
187 | return false; |
188 | |
189 | return true; |
190 | } |
191 | |
192 | static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus, |
193 | unsigned int devfn, int where) |
194 | { |
195 | struct pl_dma_pcie *port = bus->sysdata; |
196 | |
197 | if (!xilinx_pl_dma_pcie_valid_device(bus, devfn)) |
198 | return NULL; |
199 | |
200 | if (port->variant->version == QDMA) |
201 | return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); |
202 | |
203 | return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); |
204 | } |
205 | |
206 | /* PCIe operations */ |
207 | static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = { |
208 | .pci_ops = { |
209 | .map_bus = xilinx_pl_dma_pcie_map_bus, |
210 | .read = pci_generic_config_read, |
211 | .write = pci_generic_config_write, |
212 | } |
213 | }; |
214 | |
215 | static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie *port) |
216 | { |
217 | phys_addr_t msi_addr = port->phys_reg_base; |
218 | |
219 | pcie_write(port, upper_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE1); |
220 | pcie_write(port, lower_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE2); |
221 | } |
222 | |
223 | static void xilinx_mask_intx_irq(struct irq_data *data) |
224 | { |
225 | struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d: data); |
226 | unsigned long flags; |
227 | u32 mask, val; |
228 | |
229 | mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT); |
230 | raw_spin_lock_irqsave(&port->lock, flags); |
231 | val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK); |
232 | pcie_write(port, val: (val & (~mask)), XILINX_PCIE_DMA_REG_IDRN_MASK); |
233 | raw_spin_unlock_irqrestore(&port->lock, flags); |
234 | } |
235 | |
236 | static void xilinx_unmask_intx_irq(struct irq_data *data) |
237 | { |
238 | struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d: data); |
239 | unsigned long flags; |
240 | u32 mask, val; |
241 | |
242 | mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT); |
243 | raw_spin_lock_irqsave(&port->lock, flags); |
244 | val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK); |
245 | pcie_write(port, val: (val | mask), XILINX_PCIE_DMA_REG_IDRN_MASK); |
246 | raw_spin_unlock_irqrestore(&port->lock, flags); |
247 | } |
248 | |
249 | static struct irq_chip xilinx_leg_irq_chip = { |
250 | .name = "pl_dma:INTx" , |
251 | .irq_mask = xilinx_mask_intx_irq, |
252 | .irq_unmask = xilinx_unmask_intx_irq, |
253 | }; |
254 | |
255 | static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain, |
256 | unsigned int irq, irq_hw_number_t hwirq) |
257 | { |
258 | irq_set_chip_and_handler(irq, chip: &xilinx_leg_irq_chip, handle: handle_level_irq); |
259 | irq_set_chip_data(irq, data: domain->host_data); |
260 | irq_set_status_flags(irq, set: IRQ_LEVEL); |
261 | |
262 | return 0; |
263 | } |
264 | |
265 | /* INTx IRQ Domain operations */ |
266 | static const struct irq_domain_ops intx_domain_ops = { |
267 | .map = xilinx_pl_dma_pcie_intx_map, |
268 | }; |
269 | |
270 | static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args) |
271 | { |
272 | struct xilinx_msi *msi; |
273 | unsigned long status; |
274 | u32 bit, virq; |
275 | struct pl_dma_pcie *port = args; |
276 | |
277 | msi = &port->msi; |
278 | |
279 | while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) { |
280 | for_each_set_bit(bit, &status, 32) { |
281 | pcie_write(port, val: 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI); |
282 | bit = bit + 32; |
283 | virq = irq_find_mapping(domain: msi->dev_domain, hwirq: bit); |
284 | if (virq) |
285 | generic_handle_irq(irq: virq); |
286 | } |
287 | } |
288 | |
289 | return IRQ_HANDLED; |
290 | } |
291 | |
292 | static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args) |
293 | { |
294 | struct pl_dma_pcie *port = args; |
295 | struct xilinx_msi *msi; |
296 | unsigned long status; |
297 | u32 bit, virq; |
298 | |
299 | msi = &port->msi; |
300 | |
301 | while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) { |
302 | for_each_set_bit(bit, &status, 32) { |
303 | pcie_write(port, val: 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW); |
304 | virq = irq_find_mapping(domain: msi->dev_domain, hwirq: bit); |
305 | if (virq) |
306 | generic_handle_irq(irq: virq); |
307 | } |
308 | } |
309 | |
310 | return IRQ_HANDLED; |
311 | } |
312 | |
313 | static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args) |
314 | { |
315 | struct pl_dma_pcie *port = args; |
316 | unsigned long val; |
317 | int i; |
318 | |
319 | val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR); |
320 | val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR); |
321 | for_each_set_bit(i, &val, 32) |
322 | generic_handle_domain_irq(domain: port->pldma_domain, hwirq: i); |
323 | |
324 | pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR); |
325 | |
326 | return IRQ_HANDLED; |
327 | } |
328 | |
329 | #define _IC(x, s) \ |
330 | [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s } |
331 | |
332 | static const struct { |
333 | const char *sym; |
334 | const char *str; |
335 | } intr_cause[32] = { |
336 | _IC(LINK_DOWN, "Link Down" ), |
337 | _IC(HOT_RESET, "Hot reset" ), |
338 | _IC(CFG_TIMEOUT, "ECAM access timeout" ), |
339 | _IC(CORRECTABLE, "Correctable error message" ), |
340 | _IC(NONFATAL, "Non fatal error message" ), |
341 | _IC(FATAL, "Fatal error message" ), |
342 | _IC(SLV_UNSUPP, "Slave unsupported request" ), |
343 | _IC(SLV_UNEXP, "Slave unexpected completion" ), |
344 | _IC(SLV_COMPL, "Slave completion timeout" ), |
345 | _IC(SLV_ERRP, "Slave Error Poison" ), |
346 | _IC(SLV_CMPABT, "Slave Completer Abort" ), |
347 | _IC(SLV_ILLBUR, "Slave Illegal Burst" ), |
348 | _IC(MST_DECERR, "Master decode error" ), |
349 | _IC(MST_SLVERR, "Master slave error" ), |
350 | }; |
351 | |
352 | static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id) |
353 | { |
354 | struct pl_dma_pcie *port = (struct pl_dma_pcie *)dev_id; |
355 | struct device *dev = port->dev; |
356 | struct irq_data *d; |
357 | |
358 | d = irq_domain_get_irq_data(domain: port->pldma_domain, virq: irq); |
359 | switch (d->hwirq) { |
360 | case XILINX_PCIE_INTR_CORRECTABLE: |
361 | case XILINX_PCIE_INTR_NONFATAL: |
362 | case XILINX_PCIE_INTR_FATAL: |
363 | xilinx_pl_dma_pcie_clear_err_interrupts(port); |
364 | fallthrough; |
365 | |
366 | default: |
367 | if (intr_cause[d->hwirq].str) |
368 | dev_warn(dev, "%s\n" , intr_cause[d->hwirq].str); |
369 | else |
370 | dev_warn(dev, "Unknown IRQ %ld\n" , d->hwirq); |
371 | } |
372 | |
373 | return IRQ_HANDLED; |
374 | } |
375 | |
376 | static struct irq_chip xilinx_msi_irq_chip = { |
377 | .name = "pl_dma:PCIe MSI" , |
378 | .irq_enable = pci_msi_unmask_irq, |
379 | .irq_disable = pci_msi_mask_irq, |
380 | .irq_mask = pci_msi_mask_irq, |
381 | .irq_unmask = pci_msi_unmask_irq, |
382 | }; |
383 | |
384 | static struct msi_domain_info xilinx_msi_domain_info = { |
385 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
386 | MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, |
387 | .chip = &xilinx_msi_irq_chip, |
388 | }; |
389 | |
390 | static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
391 | { |
392 | struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(d: data); |
393 | phys_addr_t msi_addr = pcie->phys_reg_base; |
394 | |
395 | msg->address_lo = lower_32_bits(msi_addr); |
396 | msg->address_hi = upper_32_bits(msi_addr); |
397 | msg->data = data->hwirq; |
398 | } |
399 | |
400 | static struct irq_chip xilinx_irq_chip = { |
401 | .name = "pl_dma:MSI" , |
402 | .irq_compose_msi_msg = xilinx_compose_msi_msg, |
403 | }; |
404 | |
405 | static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
406 | unsigned int nr_irqs, void *args) |
407 | { |
408 | struct pl_dma_pcie *pcie = domain->host_data; |
409 | struct xilinx_msi *msi = &pcie->msi; |
410 | int bit, i; |
411 | |
412 | mutex_lock(&msi->lock); |
413 | bit = bitmap_find_free_region(bitmap: msi->bitmap, XILINX_NUM_MSI_IRQS, |
414 | order: get_count_order(count: nr_irqs)); |
415 | if (bit < 0) { |
416 | mutex_unlock(lock: &msi->lock); |
417 | return -ENOSPC; |
418 | } |
419 | |
420 | for (i = 0; i < nr_irqs; i++) { |
421 | irq_domain_set_info(domain, virq: virq + i, hwirq: bit + i, chip: &xilinx_irq_chip, |
422 | chip_data: domain->host_data, handler: handle_simple_irq, |
423 | NULL, NULL); |
424 | } |
425 | mutex_unlock(lock: &msi->lock); |
426 | |
427 | return 0; |
428 | } |
429 | |
430 | static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
431 | unsigned int nr_irqs) |
432 | { |
433 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); |
434 | struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(d: data); |
435 | struct xilinx_msi *msi = &pcie->msi; |
436 | |
437 | mutex_lock(&msi->lock); |
438 | bitmap_release_region(bitmap: msi->bitmap, pos: data->hwirq, |
439 | order: get_count_order(count: nr_irqs)); |
440 | mutex_unlock(lock: &msi->lock); |
441 | } |
442 | |
443 | static const struct irq_domain_ops dev_msi_domain_ops = { |
444 | .alloc = xilinx_irq_domain_alloc, |
445 | .free = xilinx_irq_domain_free, |
446 | }; |
447 | |
448 | static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port) |
449 | { |
450 | struct xilinx_msi *msi = &port->msi; |
451 | |
452 | if (port->intx_domain) { |
453 | irq_domain_remove(domain: port->intx_domain); |
454 | port->intx_domain = NULL; |
455 | } |
456 | |
457 | if (msi->dev_domain) { |
458 | irq_domain_remove(domain: msi->dev_domain); |
459 | msi->dev_domain = NULL; |
460 | } |
461 | |
462 | if (msi->msi_domain) { |
463 | irq_domain_remove(domain: msi->msi_domain); |
464 | msi->msi_domain = NULL; |
465 | } |
466 | } |
467 | |
468 | static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port) |
469 | { |
470 | struct device *dev = port->dev; |
471 | struct xilinx_msi *msi = &port->msi; |
472 | int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long); |
473 | struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node); |
474 | |
475 | msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS, |
476 | ops: &dev_msi_domain_ops, host_data: port); |
477 | if (!msi->dev_domain) |
478 | goto out; |
479 | |
480 | msi->msi_domain = pci_msi_create_irq_domain(fwnode, |
481 | info: &xilinx_msi_domain_info, |
482 | parent: msi->dev_domain); |
483 | if (!msi->msi_domain) |
484 | goto out; |
485 | |
486 | mutex_init(&msi->lock); |
487 | msi->bitmap = kzalloc(size, GFP_KERNEL); |
488 | if (!msi->bitmap) |
489 | goto out; |
490 | |
491 | raw_spin_lock_init(&port->lock); |
492 | xilinx_pl_dma_pcie_enable_msi(port); |
493 | |
494 | return 0; |
495 | |
496 | out: |
497 | xilinx_pl_dma_pcie_free_irq_domains(port); |
498 | dev_err(dev, "Failed to allocate MSI IRQ domains\n" ); |
499 | |
500 | return -ENOMEM; |
501 | } |
502 | |
503 | /* |
504 | * INTx error interrupts are Xilinx controller specific interrupt, used to |
505 | * notify user about errors such as cfg timeout, slave unsupported requests, |
506 | * fatal and non fatal error etc. |
507 | */ |
508 | |
509 | static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args) |
510 | { |
511 | unsigned long val; |
512 | int i; |
513 | struct pl_dma_pcie *port = args; |
514 | |
515 | val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK, |
516 | pcie_read(port, XILINX_PCIE_DMA_REG_IDRN)); |
517 | |
518 | for_each_set_bit(i, &val, PCI_NUM_INTX) |
519 | generic_handle_domain_irq(domain: port->intx_domain, hwirq: i); |
520 | return IRQ_HANDLED; |
521 | } |
522 | |
523 | static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d) |
524 | { |
525 | struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d); |
526 | u32 val; |
527 | |
528 | raw_spin_lock(&port->lock); |
529 | val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR); |
530 | val &= ~BIT(d->hwirq); |
531 | pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR); |
532 | raw_spin_unlock(&port->lock); |
533 | } |
534 | |
535 | static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d) |
536 | { |
537 | struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d); |
538 | u32 val; |
539 | |
540 | raw_spin_lock(&port->lock); |
541 | val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR); |
542 | val |= BIT(d->hwirq); |
543 | pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR); |
544 | raw_spin_unlock(&port->lock); |
545 | } |
546 | |
547 | static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = { |
548 | .name = "pl_dma:RC-Event" , |
549 | .irq_mask = xilinx_pl_dma_pcie_mask_event_irq, |
550 | .irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq, |
551 | }; |
552 | |
553 | static int xilinx_pl_dma_pcie_event_map(struct irq_domain *domain, |
554 | unsigned int irq, irq_hw_number_t hwirq) |
555 | { |
556 | irq_set_chip_and_handler(irq, chip: &xilinx_pl_dma_pcie_event_irq_chip, |
557 | handle: handle_level_irq); |
558 | irq_set_chip_data(irq, data: domain->host_data); |
559 | irq_set_status_flags(irq, set: IRQ_LEVEL); |
560 | |
561 | return 0; |
562 | } |
563 | |
564 | static const struct irq_domain_ops event_domain_ops = { |
565 | .map = xilinx_pl_dma_pcie_event_map, |
566 | }; |
567 | |
568 | /** |
569 | * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain |
570 | * @port: PCIe port information |
571 | * |
572 | * Return: '0' on success and error value on failure. |
573 | */ |
574 | static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port) |
575 | { |
576 | struct device *dev = port->dev; |
577 | struct device_node *node = dev->of_node; |
578 | struct device_node *pcie_intc_node; |
579 | int ret; |
580 | |
581 | /* Setup INTx */ |
582 | pcie_intc_node = of_get_child_by_name(node, name: "interrupt-controller" ); |
583 | if (!pcie_intc_node) { |
584 | dev_err(dev, "No PCIe Intc node found\n" ); |
585 | return -EINVAL; |
586 | } |
587 | |
588 | port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), size: 32, |
589 | ops: &event_domain_ops, host_data: port); |
590 | if (!port->pldma_domain) |
591 | return -ENOMEM; |
592 | |
593 | irq_domain_update_bus_token(domain: port->pldma_domain, bus_token: DOMAIN_BUS_NEXUS); |
594 | |
595 | port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, |
596 | ops: &intx_domain_ops, host_data: port); |
597 | if (!port->intx_domain) { |
598 | dev_err(dev, "Failed to get a INTx IRQ domain\n" ); |
599 | return -ENOMEM; |
600 | } |
601 | |
602 | irq_domain_update_bus_token(domain: port->intx_domain, bus_token: DOMAIN_BUS_WIRED); |
603 | |
604 | ret = xilinx_pl_dma_pcie_init_msi_irq_domain(port); |
605 | if (ret != 0) { |
606 | irq_domain_remove(domain: port->intx_domain); |
607 | return -ENOMEM; |
608 | } |
609 | |
610 | of_node_put(node: pcie_intc_node); |
611 | raw_spin_lock_init(&port->lock); |
612 | |
613 | return 0; |
614 | } |
615 | |
616 | static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port) |
617 | { |
618 | struct device *dev = port->dev; |
619 | struct platform_device *pdev = to_platform_device(dev); |
620 | int i, irq, err; |
621 | |
622 | port->irq = platform_get_irq(pdev, 0); |
623 | if (port->irq < 0) |
624 | return port->irq; |
625 | |
626 | for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { |
627 | int err; |
628 | |
629 | if (!intr_cause[i].str) |
630 | continue; |
631 | |
632 | irq = irq_create_mapping(domain: port->pldma_domain, hwirq: i); |
633 | if (!irq) { |
634 | dev_err(dev, "Failed to map interrupt\n" ); |
635 | return -ENXIO; |
636 | } |
637 | |
638 | err = devm_request_irq(dev, irq, |
639 | handler: xilinx_pl_dma_pcie_intr_handler, |
640 | IRQF_SHARED | IRQF_NO_THREAD, |
641 | devname: intr_cause[i].sym, dev_id: port); |
642 | if (err) { |
643 | dev_err(dev, "Failed to request IRQ %d\n" , irq); |
644 | return err; |
645 | } |
646 | } |
647 | |
648 | port->intx_irq = irq_create_mapping(domain: port->pldma_domain, |
649 | XILINX_PCIE_INTR_INTX); |
650 | if (!port->intx_irq) { |
651 | dev_err(dev, "Failed to map INTx interrupt\n" ); |
652 | return -ENXIO; |
653 | } |
654 | |
655 | err = devm_request_irq(dev, irq: port->intx_irq, handler: xilinx_pl_dma_pcie_intx_flow, |
656 | IRQF_SHARED | IRQF_NO_THREAD, NULL, dev_id: port); |
657 | if (err) { |
658 | dev_err(dev, "Failed to request INTx IRQ %d\n" , port->intx_irq); |
659 | return err; |
660 | } |
661 | |
662 | err = devm_request_irq(dev, irq: port->irq, handler: xilinx_pl_dma_pcie_event_flow, |
663 | IRQF_SHARED | IRQF_NO_THREAD, NULL, dev_id: port); |
664 | if (err) { |
665 | dev_err(dev, "Failed to request event IRQ %d\n" , port->irq); |
666 | return err; |
667 | } |
668 | |
669 | return 0; |
670 | } |
671 | |
672 | static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port) |
673 | { |
674 | if (xilinx_pl_dma_pcie_link_up(port)) |
675 | dev_info(port->dev, "PCIe Link is UP\n" ); |
676 | else |
677 | dev_info(port->dev, "PCIe Link is DOWN\n" ); |
678 | |
679 | /* Disable all interrupts */ |
680 | pcie_write(port, val: ~XILINX_PCIE_DMA_IDR_ALL_MASK, |
681 | XILINX_PCIE_DMA_REG_IMR); |
682 | |
683 | /* Clear pending interrupts */ |
684 | pcie_write(port, val: pcie_read(port, XILINX_PCIE_DMA_REG_IDR) & |
685 | XILINX_PCIE_DMA_IMR_ALL_MASK, |
686 | XILINX_PCIE_DMA_REG_IDR); |
687 | |
688 | /* Needed for MSI DECODE MODE */ |
689 | pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, |
690 | XILINX_PCIE_DMA_REG_MSI_LOW_MASK); |
691 | pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, |
692 | XILINX_PCIE_DMA_REG_MSI_HI_MASK); |
693 | |
694 | /* Set the Bridge enable bit */ |
695 | pcie_write(port, val: pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) | |
696 | XILINX_PCIE_DMA_REG_RPSC_BEN, |
697 | XILINX_PCIE_DMA_REG_RPSC); |
698 | } |
699 | |
700 | static int xilinx_request_msi_irq(struct pl_dma_pcie *port) |
701 | { |
702 | struct device *dev = port->dev; |
703 | struct platform_device *pdev = to_platform_device(dev); |
704 | int ret; |
705 | |
706 | port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0" ); |
707 | if (port->msi.irq_msi0 <= 0) |
708 | return port->msi.irq_msi0; |
709 | |
710 | ret = devm_request_irq(dev, irq: port->msi.irq_msi0, handler: xilinx_pl_dma_pcie_msi_handler_low, |
711 | IRQF_SHARED | IRQF_NO_THREAD, devname: "xlnx-pcie-dma-pl" , |
712 | dev_id: port); |
713 | if (ret) { |
714 | dev_err(dev, "Failed to register interrupt\n" ); |
715 | return ret; |
716 | } |
717 | |
718 | port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1" ); |
719 | if (port->msi.irq_msi1 <= 0) |
720 | return port->msi.irq_msi1; |
721 | |
722 | ret = devm_request_irq(dev, irq: port->msi.irq_msi1, handler: xilinx_pl_dma_pcie_msi_handler_high, |
723 | IRQF_SHARED | IRQF_NO_THREAD, devname: "xlnx-pcie-dma-pl" , |
724 | dev_id: port); |
725 | if (ret) { |
726 | dev_err(dev, "Failed to register interrupt\n" ); |
727 | return ret; |
728 | } |
729 | |
730 | return 0; |
731 | } |
732 | |
733 | static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port, |
734 | struct resource *bus_range) |
735 | { |
736 | struct device *dev = port->dev; |
737 | struct platform_device *pdev = to_platform_device(dev); |
738 | struct resource *res; |
739 | int err; |
740 | |
741 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
742 | if (!res) { |
743 | dev_err(dev, "Missing \"reg\" property\n" ); |
744 | return -ENXIO; |
745 | } |
746 | port->phys_reg_base = res->start; |
747 | |
748 | port->cfg = pci_ecam_create(dev, cfgres: res, busr: bus_range, ops: &xilinx_pl_dma_pcie_ops); |
749 | if (IS_ERR(ptr: port->cfg)) |
750 | return PTR_ERR(ptr: port->cfg); |
751 | |
752 | port->reg_base = port->cfg->win; |
753 | |
754 | if (port->variant->version == QDMA) { |
755 | port->cfg_base = port->cfg->win; |
756 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg" ); |
757 | port->reg_base = devm_ioremap_resource(dev, res); |
758 | if (IS_ERR(ptr: port->reg_base)) |
759 | return PTR_ERR(ptr: port->reg_base); |
760 | port->phys_reg_base = res->start; |
761 | } |
762 | |
763 | err = xilinx_request_msi_irq(port); |
764 | if (err) { |
765 | pci_ecam_free(cfg: port->cfg); |
766 | return err; |
767 | } |
768 | |
769 | return 0; |
770 | } |
771 | |
772 | static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev) |
773 | { |
774 | struct device *dev = &pdev->dev; |
775 | struct pl_dma_pcie *port; |
776 | struct pci_host_bridge *bridge; |
777 | struct resource_entry *bus; |
778 | int err; |
779 | |
780 | bridge = devm_pci_alloc_host_bridge(dev, priv: sizeof(*port)); |
781 | if (!bridge) |
782 | return -ENODEV; |
783 | |
784 | port = pci_host_bridge_priv(bridge); |
785 | |
786 | port->dev = dev; |
787 | |
788 | bus = resource_list_first_type(list: &bridge->windows, IORESOURCE_BUS); |
789 | if (!bus) |
790 | return -ENODEV; |
791 | |
792 | port->variant = of_device_get_match_data(dev); |
793 | |
794 | err = xilinx_pl_dma_pcie_parse_dt(port, bus_range: bus->res); |
795 | if (err) { |
796 | dev_err(dev, "Parsing DT failed\n" ); |
797 | return err; |
798 | } |
799 | |
800 | xilinx_pl_dma_pcie_init_port(port); |
801 | |
802 | err = xilinx_pl_dma_pcie_init_irq_domain(port); |
803 | if (err) |
804 | goto err_irq_domain; |
805 | |
806 | err = xilinx_pl_dma_pcie_setup_irq(port); |
807 | |
808 | bridge->sysdata = port; |
809 | bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops; |
810 | |
811 | err = pci_host_probe(bridge); |
812 | if (err < 0) |
813 | goto err_host_bridge; |
814 | |
815 | return 0; |
816 | |
817 | err_host_bridge: |
818 | xilinx_pl_dma_pcie_free_irq_domains(port); |
819 | |
820 | err_irq_domain: |
821 | pci_ecam_free(cfg: port->cfg); |
822 | return err; |
823 | } |
824 | |
825 | static const struct xilinx_pl_dma_variant xdma_host = { |
826 | .version = XDMA, |
827 | }; |
828 | |
829 | static const struct xilinx_pl_dma_variant qdma_host = { |
830 | .version = QDMA, |
831 | }; |
832 | |
833 | static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = { |
834 | { |
835 | .compatible = "xlnx,xdma-host-3.00" , |
836 | .data = &xdma_host, |
837 | }, |
838 | { |
839 | .compatible = "xlnx,qdma-host-3.00" , |
840 | .data = &qdma_host, |
841 | }, |
842 | {} |
843 | }; |
844 | |
845 | static struct platform_driver xilinx_pl_dma_pcie_driver = { |
846 | .driver = { |
847 | .name = "xilinx-xdma-pcie" , |
848 | .of_match_table = xilinx_pl_dma_pcie_of_match, |
849 | .suppress_bind_attrs = true, |
850 | }, |
851 | .probe = xilinx_pl_dma_pcie_probe, |
852 | }; |
853 | |
854 | builtin_platform_driver(xilinx_pl_dma_pcie_driver); |
855 | |