1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host bridge driver for Apple system-on-chips.
4 *
5 * The HW is ECAM compliant, so once the controller is initialized,
6 * the driver mostly deals MSI mapping and handling of per-port
7 * interrupts (INTx, management and error signals).
8 *
9 * Initialization requires enabling power and clocks, along with a
10 * number of register pokes.
11 *
12 * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
13 * Copyright (C) 2021 Google LLC
14 * Copyright (C) 2021 Corellium LLC
15 * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
16 *
17 * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
18 * Author: Marc Zyngier <maz@kernel.org>
19 */
20
21#include <linux/gpio/consumer.h>
22#include <linux/kernel.h>
23#include <linux/iopoll.h>
24#include <linux/irqchip/chained_irq.h>
25#include <linux/irqdomain.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/msi.h>
29#include <linux/notifier.h>
30#include <linux/of_irq.h>
31#include <linux/pci-ecam.h>
32
33#define CORE_RC_PHYIF_CTL 0x00024
34#define CORE_RC_PHYIF_CTL_RUN BIT(0)
35#define CORE_RC_PHYIF_STAT 0x00028
36#define CORE_RC_PHYIF_STAT_REFCLK BIT(4)
37#define CORE_RC_CTL 0x00050
38#define CORE_RC_CTL_RUN BIT(0)
39#define CORE_RC_STAT 0x00058
40#define CORE_RC_STAT_READY BIT(0)
41#define CORE_FABRIC_STAT 0x04000
42#define CORE_FABRIC_STAT_MASK 0x001F001F
43#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
44#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
45#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
46#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
47#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
48#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
49#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
50#define CORE_LANE_CTL_CFGACC BIT(15)
51
52#define PORT_LTSSMCTL 0x00080
53#define PORT_LTSSMCTL_START BIT(0)
54#define PORT_INTSTAT 0x00100
55#define PORT_INT_TUNNEL_ERR 31
56#define PORT_INT_CPL_TIMEOUT 23
57#define PORT_INT_RID2SID_MAPERR 22
58#define PORT_INT_CPL_ABORT 21
59#define PORT_INT_MSI_BAD_DATA 19
60#define PORT_INT_MSI_ERR 18
61#define PORT_INT_REQADDR_GT32 17
62#define PORT_INT_AF_TIMEOUT 15
63#define PORT_INT_LINK_DOWN 14
64#define PORT_INT_LINK_UP 12
65#define PORT_INT_LINK_BWMGMT 11
66#define PORT_INT_AER_MASK (15 << 4)
67#define PORT_INT_PORT_ERR 4
68#define PORT_INT_INTx(i) i
69#define PORT_INT_INTx_MASK 15
70#define PORT_INTMSK 0x00104
71#define PORT_INTMSKSET 0x00108
72#define PORT_INTMSKCLR 0x0010c
73#define PORT_MSICFG 0x00124
74#define PORT_MSICFG_EN BIT(0)
75#define PORT_MSICFG_L2MSINUM_SHIFT 4
76#define PORT_MSIBASE 0x00128
77#define PORT_MSIBASE_1_SHIFT 16
78#define PORT_MSIADDR 0x00168
79#define PORT_LINKSTS 0x00208
80#define PORT_LINKSTS_UP BIT(0)
81#define PORT_LINKSTS_BUSY BIT(2)
82#define PORT_LINKCMDSTS 0x00210
83#define PORT_OUTS_NPREQS 0x00284
84#define PORT_OUTS_NPREQS_REQ BIT(24)
85#define PORT_OUTS_NPREQS_CPL BIT(16)
86#define PORT_RXWR_FIFO 0x00288
87#define PORT_RXWR_FIFO_HDR GENMASK(15, 10)
88#define PORT_RXWR_FIFO_DATA GENMASK(9, 0)
89#define PORT_RXRD_FIFO 0x0028C
90#define PORT_RXRD_FIFO_REQ GENMASK(6, 0)
91#define PORT_OUTS_CPLS 0x00290
92#define PORT_OUTS_CPLS_SHRD GENMASK(14, 8)
93#define PORT_OUTS_CPLS_WAIT GENMASK(6, 0)
94#define PORT_APPCLK 0x00800
95#define PORT_APPCLK_EN BIT(0)
96#define PORT_APPCLK_CGDIS BIT(8)
97#define PORT_STATUS 0x00804
98#define PORT_STATUS_READY BIT(0)
99#define PORT_REFCLK 0x00810
100#define PORT_REFCLK_EN BIT(0)
101#define PORT_REFCLK_CGDIS BIT(8)
102#define PORT_PERST 0x00814
103#define PORT_PERST_OFF BIT(0)
104#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
105#define PORT_RID2SID_VALID BIT(31)
106#define PORT_RID2SID_SID_SHIFT 16
107#define PORT_RID2SID_BUS_SHIFT 8
108#define PORT_RID2SID_DEV_SHIFT 3
109#define PORT_RID2SID_FUNC_SHIFT 0
110#define PORT_OUTS_PREQS_HDR 0x00980
111#define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0)
112#define PORT_OUTS_PREQS_DATA 0x00984
113#define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0)
114#define PORT_TUNCTRL 0x00988
115#define PORT_TUNCTRL_PERST_ON BIT(0)
116#define PORT_TUNCTRL_PERST_ACK_REQ BIT(1)
117#define PORT_TUNSTAT 0x0098c
118#define PORT_TUNSTAT_PERST_ON BIT(0)
119#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
120#define PORT_PREFMEM_ENABLE 0x00994
121
122#define MAX_RID2SID 64
123
124/*
125 * The doorbell address is set to 0xfffff000, which by convention
126 * matches what MacOS does, and it is possible to use any other
127 * address (in the bottom 4GB, as the base register is only 32bit).
128 * However, it has to be excluded from the IOVA range, and the DART
129 * driver has to know about it.
130 */
131#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
132
133struct apple_pcie {
134 struct mutex lock;
135 struct device *dev;
136 void __iomem *base;
137 struct irq_domain *domain;
138 unsigned long *bitmap;
139 struct list_head ports;
140 struct completion event;
141 struct irq_fwspec fwspec;
142 u32 nvecs;
143};
144
145struct apple_pcie_port {
146 struct apple_pcie *pcie;
147 struct device_node *np;
148 void __iomem *base;
149 struct irq_domain *domain;
150 struct list_head entry;
151 DECLARE_BITMAP(sid_map, MAX_RID2SID);
152 int sid_map_sz;
153 int idx;
154};
155
156static void rmw_set(u32 set, void __iomem *addr)
157{
158 writel_relaxed(readl_relaxed(addr) | set, addr);
159}
160
161static void rmw_clear(u32 clr, void __iomem *addr)
162{
163 writel_relaxed(readl_relaxed(addr) & ~clr, addr);
164}
165
166static void apple_msi_top_irq_mask(struct irq_data *d)
167{
168 pci_msi_mask_irq(data: d);
169 irq_chip_mask_parent(data: d);
170}
171
172static void apple_msi_top_irq_unmask(struct irq_data *d)
173{
174 pci_msi_unmask_irq(data: d);
175 irq_chip_unmask_parent(data: d);
176}
177
178static struct irq_chip apple_msi_top_chip = {
179 .name = "PCIe MSI",
180 .irq_mask = apple_msi_top_irq_mask,
181 .irq_unmask = apple_msi_top_irq_unmask,
182 .irq_eoi = irq_chip_eoi_parent,
183 .irq_set_affinity = irq_chip_set_affinity_parent,
184 .irq_set_type = irq_chip_set_type_parent,
185};
186
187static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
188{
189 msg->address_hi = upper_32_bits(DOORBELL_ADDR);
190 msg->address_lo = lower_32_bits(DOORBELL_ADDR);
191 msg->data = data->hwirq;
192}
193
194static struct irq_chip apple_msi_bottom_chip = {
195 .name = "MSI",
196 .irq_mask = irq_chip_mask_parent,
197 .irq_unmask = irq_chip_unmask_parent,
198 .irq_eoi = irq_chip_eoi_parent,
199 .irq_set_affinity = irq_chip_set_affinity_parent,
200 .irq_set_type = irq_chip_set_type_parent,
201 .irq_compose_msi_msg = apple_msi_compose_msg,
202};
203
204static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
205 unsigned int nr_irqs, void *args)
206{
207 struct apple_pcie *pcie = domain->host_data;
208 struct irq_fwspec fwspec = pcie->fwspec;
209 unsigned int i;
210 int ret, hwirq;
211
212 mutex_lock(&pcie->lock);
213
214 hwirq = bitmap_find_free_region(bitmap: pcie->bitmap, bits: pcie->nvecs,
215 order_base_2(nr_irqs));
216
217 mutex_unlock(lock: &pcie->lock);
218
219 if (hwirq < 0)
220 return -ENOSPC;
221
222 fwspec.param[fwspec.param_count - 2] += hwirq;
223
224 ret = irq_domain_alloc_irqs_parent(domain, irq_base: virq, nr_irqs, arg: &fwspec);
225 if (ret)
226 return ret;
227
228 for (i = 0; i < nr_irqs; i++) {
229 irq_domain_set_hwirq_and_chip(domain, virq: virq + i, hwirq: hwirq + i,
230 chip: &apple_msi_bottom_chip,
231 chip_data: domain->host_data);
232 }
233
234 return 0;
235}
236
237static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
238 unsigned int nr_irqs)
239{
240 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
241 struct apple_pcie *pcie = domain->host_data;
242
243 mutex_lock(&pcie->lock);
244
245 bitmap_release_region(bitmap: pcie->bitmap, pos: d->hwirq, order_base_2(nr_irqs));
246
247 mutex_unlock(lock: &pcie->lock);
248}
249
250static const struct irq_domain_ops apple_msi_domain_ops = {
251 .alloc = apple_msi_domain_alloc,
252 .free = apple_msi_domain_free,
253};
254
255static struct msi_domain_info apple_msi_info = {
256 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
257 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
258 .chip = &apple_msi_top_chip,
259};
260
261static void apple_port_irq_mask(struct irq_data *data)
262{
263 struct apple_pcie_port *port = irq_data_get_irq_chip_data(d: data);
264
265 writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
266}
267
268static void apple_port_irq_unmask(struct irq_data *data)
269{
270 struct apple_pcie_port *port = irq_data_get_irq_chip_data(d: data);
271
272 writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
273}
274
275static bool hwirq_is_intx(unsigned int hwirq)
276{
277 return BIT(hwirq) & PORT_INT_INTx_MASK;
278}
279
280static void apple_port_irq_ack(struct irq_data *data)
281{
282 struct apple_pcie_port *port = irq_data_get_irq_chip_data(d: data);
283
284 if (!hwirq_is_intx(hwirq: data->hwirq))
285 writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
286}
287
288static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
289{
290 /*
291 * It doesn't seem that there is any way to configure the
292 * trigger, so assume INTx have to be level (as per the spec),
293 * and the rest is edge (which looks likely).
294 */
295 if (hwirq_is_intx(hwirq: data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
296 return -EINVAL;
297
298 irqd_set_trigger_type(d: data, type);
299 return 0;
300}
301
302static struct irq_chip apple_port_irqchip = {
303 .name = "PCIe",
304 .irq_ack = apple_port_irq_ack,
305 .irq_mask = apple_port_irq_mask,
306 .irq_unmask = apple_port_irq_unmask,
307 .irq_set_type = apple_port_irq_set_type,
308};
309
310static int apple_port_irq_domain_alloc(struct irq_domain *domain,
311 unsigned int virq, unsigned int nr_irqs,
312 void *args)
313{
314 struct apple_pcie_port *port = domain->host_data;
315 struct irq_fwspec *fwspec = args;
316 int i;
317
318 for (i = 0; i < nr_irqs; i++) {
319 irq_flow_handler_t flow = handle_edge_irq;
320 unsigned int type = IRQ_TYPE_EDGE_RISING;
321
322 if (hwirq_is_intx(hwirq: fwspec->param[0] + i)) {
323 flow = handle_level_irq;
324 type = IRQ_TYPE_LEVEL_HIGH;
325 }
326
327 irq_domain_set_info(domain, virq: virq + i, hwirq: fwspec->param[0] + i,
328 chip: &apple_port_irqchip, chip_data: port, handler: flow,
329 NULL, NULL);
330
331 irq_set_irq_type(irq: virq + i, type);
332 }
333
334 return 0;
335}
336
337static void apple_port_irq_domain_free(struct irq_domain *domain,
338 unsigned int virq, unsigned int nr_irqs)
339{
340 int i;
341
342 for (i = 0; i < nr_irqs; i++) {
343 struct irq_data *d = irq_domain_get_irq_data(domain, virq: virq + i);
344
345 irq_set_handler(irq: virq + i, NULL);
346 irq_domain_reset_irq_data(irq_data: d);
347 }
348}
349
350static const struct irq_domain_ops apple_port_irq_domain_ops = {
351 .translate = irq_domain_translate_onecell,
352 .alloc = apple_port_irq_domain_alloc,
353 .free = apple_port_irq_domain_free,
354};
355
356static void apple_port_irq_handler(struct irq_desc *desc)
357{
358 struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
359 struct irq_chip *chip = irq_desc_get_chip(desc);
360 unsigned long stat;
361 int i;
362
363 chained_irq_enter(chip, desc);
364
365 stat = readl_relaxed(port->base + PORT_INTSTAT);
366
367 for_each_set_bit(i, &stat, 32)
368 generic_handle_domain_irq(domain: port->domain, hwirq: i);
369
370 chained_irq_exit(chip, desc);
371}
372
373static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
374{
375 struct fwnode_handle *fwnode = &port->np->fwnode;
376 unsigned int irq;
377
378 /* FIXME: consider moving each interrupt under each port */
379 irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
380 index: port->idx);
381 if (!irq)
382 return -ENXIO;
383
384 port->domain = irq_domain_create_linear(fwnode, size: 32,
385 ops: &apple_port_irq_domain_ops,
386 host_data: port);
387 if (!port->domain)
388 return -ENOMEM;
389
390 /* Disable all interrupts */
391 writel_relaxed(~0, port->base + PORT_INTMSKSET);
392 writel_relaxed(~0, port->base + PORT_INTSTAT);
393
394 irq_set_chained_handler_and_data(irq, handle: apple_port_irq_handler, data: port);
395
396 /* Configure MSI base address */
397 BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
398 writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
399
400 /* Enable MSIs, shared between all ports */
401 writel_relaxed(0, port->base + PORT_MSIBASE);
402 writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
403 PORT_MSICFG_EN, port->base + PORT_MSICFG);
404
405 return 0;
406}
407
408static irqreturn_t apple_pcie_port_irq(int irq, void *data)
409{
410 struct apple_pcie_port *port = data;
411 unsigned int hwirq = irq_domain_get_irq_data(domain: port->domain, virq: irq)->hwirq;
412
413 switch (hwirq) {
414 case PORT_INT_LINK_UP:
415 dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
416 port->np);
417 complete_all(&port->pcie->event);
418 break;
419 case PORT_INT_LINK_DOWN:
420 dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
421 port->np);
422 break;
423 default:
424 return IRQ_NONE;
425 }
426
427 return IRQ_HANDLED;
428}
429
430static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
431{
432 static struct {
433 unsigned int hwirq;
434 const char *name;
435 } port_irqs[] = {
436 { PORT_INT_LINK_UP, "Link up", },
437 { PORT_INT_LINK_DOWN, "Link down", },
438 };
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
442 struct irq_fwspec fwspec = {
443 .fwnode = &port->np->fwnode,
444 .param_count = 1,
445 .param = {
446 [0] = port_irqs[i].hwirq,
447 },
448 };
449 unsigned int irq;
450 int ret;
451
452 irq = irq_domain_alloc_irqs(domain: port->domain, nr_irqs: 1, NUMA_NO_NODE,
453 arg: &fwspec);
454 if (WARN_ON(!irq))
455 continue;
456
457 ret = request_irq(irq, handler: apple_pcie_port_irq, flags: 0,
458 name: port_irqs[i].name, dev: port);
459 WARN_ON(ret);
460 }
461
462 return 0;
463}
464
465static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
466 struct apple_pcie_port *port)
467{
468 u32 stat;
469 int res;
470
471 res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
472 stat & CORE_RC_PHYIF_STAT_REFCLK,
473 100, 50000);
474 if (res < 0)
475 return res;
476
477 rmw_set(CORE_LANE_CTL_CFGACC, addr: pcie->base + CORE_LANE_CTL(port->idx));
478 rmw_set(CORE_LANE_CFG_REFCLK0REQ, addr: pcie->base + CORE_LANE_CFG(port->idx));
479
480 res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
481 stat, stat & CORE_LANE_CFG_REFCLK0ACK,
482 100, 50000);
483 if (res < 0)
484 return res;
485
486 rmw_set(CORE_LANE_CFG_REFCLK1REQ, addr: pcie->base + CORE_LANE_CFG(port->idx));
487 res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
488 stat, stat & CORE_LANE_CFG_REFCLK1ACK,
489 100, 50000);
490
491 if (res < 0)
492 return res;
493
494 rmw_clear(CORE_LANE_CTL_CFGACC, addr: pcie->base + CORE_LANE_CTL(port->idx));
495
496 rmw_set(CORE_LANE_CFG_REFCLKEN, addr: pcie->base + CORE_LANE_CFG(port->idx));
497 rmw_set(PORT_REFCLK_EN, addr: port->base + PORT_REFCLK);
498
499 return 0;
500}
501
502static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
503 int idx, u32 val)
504{
505 writel_relaxed(val, port->base + PORT_RID2SID(idx));
506 /* Read back to ensure completion of the write */
507 return readl_relaxed(port->base + PORT_RID2SID(idx));
508}
509
510static int apple_pcie_setup_port(struct apple_pcie *pcie,
511 struct device_node *np)
512{
513 struct platform_device *platform = to_platform_device(pcie->dev);
514 struct apple_pcie_port *port;
515 struct gpio_desc *reset;
516 u32 stat, idx;
517 int ret, i;
518
519 reset = devm_fwnode_gpiod_get(dev: pcie->dev, of_fwnode_handle(np), con_id: "reset",
520 flags: GPIOD_OUT_LOW, label: "PERST#");
521 if (IS_ERR(ptr: reset))
522 return PTR_ERR(ptr: reset);
523
524 port = devm_kzalloc(dev: pcie->dev, size: sizeof(*port), GFP_KERNEL);
525 if (!port)
526 return -ENOMEM;
527
528 ret = of_property_read_u32_index(np, propname: "reg", index: 0, out_value: &idx);
529 if (ret)
530 return ret;
531
532 /* Use the first reg entry to work out the port index */
533 port->idx = idx >> 11;
534 port->pcie = pcie;
535 port->np = np;
536
537 port->base = devm_platform_ioremap_resource(pdev: platform, index: port->idx + 2);
538 if (IS_ERR(ptr: port->base))
539 return PTR_ERR(ptr: port->base);
540
541 rmw_set(PORT_APPCLK_EN, addr: port->base + PORT_APPCLK);
542
543 /* Assert PERST# before setting up the clock */
544 gpiod_set_value(desc: reset, value: 1);
545
546 ret = apple_pcie_setup_refclk(pcie, port);
547 if (ret < 0)
548 return ret;
549
550 /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
551 usleep_range(min: 100, max: 200);
552
553 /* Deassert PERST# */
554 rmw_set(PORT_PERST_OFF, addr: port->base + PORT_PERST);
555 gpiod_set_value(desc: reset, value: 0);
556
557 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
558 msleep(msecs: 100);
559
560 ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
561 stat & PORT_STATUS_READY, 100, 250000);
562 if (ret < 0) {
563 dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
564 return ret;
565 }
566
567 rmw_clear(PORT_REFCLK_CGDIS, addr: port->base + PORT_REFCLK);
568 rmw_clear(PORT_APPCLK_CGDIS, addr: port->base + PORT_APPCLK);
569
570 ret = apple_pcie_port_setup_irq(port);
571 if (ret)
572 return ret;
573
574 /* Reset all RID/SID mappings, and check for RAZ/WI registers */
575 for (i = 0; i < MAX_RID2SID; i++) {
576 if (apple_pcie_rid2sid_write(port, idx: i, val: 0xbad1d) != 0xbad1d)
577 break;
578 apple_pcie_rid2sid_write(port, idx: i, val: 0);
579 }
580
581 dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
582
583 port->sid_map_sz = i;
584
585 list_add_tail(new: &port->entry, head: &pcie->ports);
586 init_completion(x: &pcie->event);
587
588 ret = apple_pcie_port_register_irqs(port);
589 WARN_ON(ret);
590
591 writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
592
593 if (!wait_for_completion_timeout(x: &pcie->event, HZ / 10))
594 dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
595
596 return 0;
597}
598
599static int apple_msi_init(struct apple_pcie *pcie)
600{
601 struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
602 struct of_phandle_args args = {};
603 struct irq_domain *parent;
604 int ret;
605
606 ret = of_parse_phandle_with_args(to_of_node(fwnode), list_name: "msi-ranges",
607 cells_name: "#interrupt-cells", index: 0, out_args: &args);
608 if (ret)
609 return ret;
610
611 ret = of_property_read_u32_index(to_of_node(fwnode), propname: "msi-ranges",
612 index: args.args_count + 1, out_value: &pcie->nvecs);
613 if (ret)
614 return ret;
615
616 of_phandle_args_to_fwspec(np: args.np, args: args.args, count: args.args_count,
617 fwspec: &pcie->fwspec);
618
619 pcie->bitmap = devm_bitmap_zalloc(dev: pcie->dev, nbits: pcie->nvecs, GFP_KERNEL);
620 if (!pcie->bitmap)
621 return -ENOMEM;
622
623 parent = irq_find_matching_fwspec(fwspec: &pcie->fwspec, bus_token: DOMAIN_BUS_WIRED);
624 if (!parent) {
625 dev_err(pcie->dev, "failed to find parent domain\n");
626 return -ENXIO;
627 }
628
629 parent = irq_domain_create_hierarchy(parent, flags: 0, size: pcie->nvecs, fwnode,
630 ops: &apple_msi_domain_ops, host_data: pcie);
631 if (!parent) {
632 dev_err(pcie->dev, "failed to create IRQ domain\n");
633 return -ENOMEM;
634 }
635 irq_domain_update_bus_token(domain: parent, bus_token: DOMAIN_BUS_NEXUS);
636
637 pcie->domain = pci_msi_create_irq_domain(fwnode, info: &apple_msi_info,
638 parent);
639 if (!pcie->domain) {
640 dev_err(pcie->dev, "failed to create MSI domain\n");
641 irq_domain_remove(host: parent);
642 return -ENOMEM;
643 }
644
645 return 0;
646}
647
648static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
649{
650 struct pci_config_window *cfg = pdev->sysdata;
651 struct apple_pcie *pcie = cfg->priv;
652 struct pci_dev *port_pdev;
653 struct apple_pcie_port *port;
654
655 /* Find the root port this device is on */
656 port_pdev = pcie_find_root_port(dev: pdev);
657
658 /* If finding the port itself, nothing to do */
659 if (WARN_ON(!port_pdev) || pdev == port_pdev)
660 return NULL;
661
662 list_for_each_entry(port, &pcie->ports, entry) {
663 if (port->idx == PCI_SLOT(port_pdev->devfn))
664 return port;
665 }
666
667 return NULL;
668}
669
670static int apple_pcie_add_device(struct apple_pcie_port *port,
671 struct pci_dev *pdev)
672{
673 u32 sid, rid = pci_dev_id(dev: pdev);
674 int idx, err;
675
676 dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
677 pci_name(pdev->bus->self), port->idx);
678
679 err = of_map_id(np: port->pcie->dev->of_node, id: rid, map_name: "iommu-map",
680 map_mask_name: "iommu-map-mask", NULL, id_out: &sid);
681 if (err)
682 return err;
683
684 mutex_lock(&port->pcie->lock);
685
686 idx = bitmap_find_free_region(bitmap: port->sid_map, bits: port->sid_map_sz, order: 0);
687 if (idx >= 0) {
688 apple_pcie_rid2sid_write(port, idx,
689 PORT_RID2SID_VALID |
690 (sid << PORT_RID2SID_SID_SHIFT) | rid);
691
692 dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
693 rid, sid, idx);
694 }
695
696 mutex_unlock(lock: &port->pcie->lock);
697
698 return idx >= 0 ? 0 : -ENOSPC;
699}
700
701static void apple_pcie_release_device(struct apple_pcie_port *port,
702 struct pci_dev *pdev)
703{
704 u32 rid = pci_dev_id(dev: pdev);
705 int idx;
706
707 mutex_lock(&port->pcie->lock);
708
709 for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
710 u32 val;
711
712 val = readl_relaxed(port->base + PORT_RID2SID(idx));
713 if ((val & 0xffff) == rid) {
714 apple_pcie_rid2sid_write(port, idx, val: 0);
715 bitmap_release_region(bitmap: port->sid_map, pos: idx, order: 0);
716 dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
717 break;
718 }
719 }
720
721 mutex_unlock(lock: &port->pcie->lock);
722}
723
724static int apple_pcie_bus_notifier(struct notifier_block *nb,
725 unsigned long action,
726 void *data)
727{
728 struct device *dev = data;
729 struct pci_dev *pdev = to_pci_dev(dev);
730 struct apple_pcie_port *port;
731 int err;
732
733 /*
734 * This is a bit ugly. We assume that if we get notified for
735 * any PCI device, we must be in charge of it, and that there
736 * is no other PCI controller in the whole system. It probably
737 * holds for now, but who knows for how long?
738 */
739 port = apple_pcie_get_port(pdev);
740 if (!port)
741 return NOTIFY_DONE;
742
743 switch (action) {
744 case BUS_NOTIFY_ADD_DEVICE:
745 err = apple_pcie_add_device(port, pdev);
746 if (err)
747 return notifier_from_errno(err);
748 break;
749 case BUS_NOTIFY_DEL_DEVICE:
750 apple_pcie_release_device(port, pdev);
751 break;
752 default:
753 return NOTIFY_DONE;
754 }
755
756 return NOTIFY_OK;
757}
758
759static struct notifier_block apple_pcie_nb = {
760 .notifier_call = apple_pcie_bus_notifier,
761};
762
763static int apple_pcie_init(struct pci_config_window *cfg)
764{
765 struct device *dev = cfg->parent;
766 struct platform_device *platform = to_platform_device(dev);
767 struct device_node *of_port;
768 struct apple_pcie *pcie;
769 int ret;
770
771 pcie = devm_kzalloc(dev, size: sizeof(*pcie), GFP_KERNEL);
772 if (!pcie)
773 return -ENOMEM;
774
775 pcie->dev = dev;
776
777 mutex_init(&pcie->lock);
778
779 pcie->base = devm_platform_ioremap_resource(pdev: platform, index: 1);
780 if (IS_ERR(ptr: pcie->base))
781 return PTR_ERR(ptr: pcie->base);
782
783 cfg->priv = pcie;
784 INIT_LIST_HEAD(list: &pcie->ports);
785
786 ret = apple_msi_init(pcie);
787 if (ret)
788 return ret;
789
790 for_each_child_of_node(dev->of_node, of_port) {
791 ret = apple_pcie_setup_port(pcie, np: of_port);
792 if (ret) {
793 dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
794 of_node_put(node: of_port);
795 return ret;
796 }
797 }
798
799 return 0;
800}
801
802static int apple_pcie_probe(struct platform_device *pdev)
803{
804 int ret;
805
806 ret = bus_register_notifier(bus: &pci_bus_type, nb: &apple_pcie_nb);
807 if (ret)
808 return ret;
809
810 ret = pci_host_common_probe(pdev);
811 if (ret)
812 bus_unregister_notifier(bus: &pci_bus_type, nb: &apple_pcie_nb);
813
814 return ret;
815}
816
817static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
818 .init = apple_pcie_init,
819 .pci_ops = {
820 .map_bus = pci_ecam_map_bus,
821 .read = pci_generic_config_read,
822 .write = pci_generic_config_write,
823 }
824};
825
826static const struct of_device_id apple_pcie_of_match[] = {
827 { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
828 { }
829};
830MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
831
832static struct platform_driver apple_pcie_driver = {
833 .probe = apple_pcie_probe,
834 .driver = {
835 .name = "pcie-apple",
836 .of_match_table = apple_pcie_of_match,
837 .suppress_bind_attrs = true,
838 },
839};
840module_platform_driver(apple_pcie_driver);
841
842MODULE_LICENSE("GPL v2");
843

source code of linux/drivers/pci/controller/pcie-apple.c