1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe endpoint driver for Renesas R-Car SoCs
4 * Copyright (c) 2020 Renesas Electronics Europe GmbH
5 *
6 * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
7 */
8
9#include <linux/delay.h>
10#include <linux/of_address.h>
11#include <linux/of_platform.h>
12#include <linux/pci.h>
13#include <linux/pci-epc.h>
14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h>
16
17#include "pcie-rcar.h"
18
19#define RCAR_EPC_MAX_FUNCTIONS 1
20
21/* Structure representing the PCIe interface */
22struct rcar_pcie_endpoint {
23 struct rcar_pcie pcie;
24 phys_addr_t *ob_mapped_addr;
25 struct pci_epc_mem_window *ob_window;
26 u8 max_functions;
27 unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS];
28 unsigned long *ib_window_map;
29 u32 num_ib_windows;
30 u32 num_ob_windows;
31};
32
33static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
34{
35 u32 val;
36
37 rcar_pci_write_reg(pcie, val: 0, PCIETCTLR);
38
39 /* Set endpoint mode */
40 rcar_pci_write_reg(pcie, val: 0, PCIEMSR);
41
42 /* Initialize default capabilities. */
43 rcar_rmw32(pcie, REXPCAP(0), mask: 0xff, PCI_CAP_ID_EXP);
44 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
45 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
46 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
47 PCI_HEADER_TYPE_NORMAL);
48
49 /* Write out the physical slot number = 0 */
50 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, data: 0);
51
52 val = rcar_pci_read_reg(pcie, EXPCAP(1));
53 /* device supports fixed 128 bytes MPSS */
54 val &= ~GENMASK(2, 0);
55 rcar_pci_write_reg(pcie, val, EXPCAP(1));
56
57 val = rcar_pci_read_reg(pcie, EXPCAP(2));
58 /* read requests size 128 bytes */
59 val &= ~GENMASK(14, 12);
60 /* payload size 128 bytes */
61 val &= ~GENMASK(7, 5);
62 rcar_pci_write_reg(pcie, val, EXPCAP(2));
63
64 /* Set target link speed to 5.0 GT/s */
65 rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
66 PCI_EXP_LNKSTA_CLS_5_0GB);
67
68 /* Set the completion timer timeout to the maximum 50ms. */
69 rcar_rmw32(pcie, TLCTLR + 1, mask: 0x3f, data: 50);
70
71 /* Terminate list of capabilities (Next Capability Offset=0) */
72 rcar_rmw32(pcie, RVCCAP(0), mask: 0xfff00000, data: 0);
73
74 /* flush modifications */
75 wmb();
76}
77
78static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
79 phys_addr_t addr)
80{
81 int i;
82
83 for (i = 0; i < ep->num_ob_windows; i++)
84 if (ep->ob_window[i].phys_base == addr)
85 return i;
86
87 return -EINVAL;
88}
89
90static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
91 struct platform_device *pdev)
92{
93 struct rcar_pcie *pcie = &ep->pcie;
94 char outbound_name[10];
95 struct resource *res;
96 unsigned int i = 0;
97
98 ep->num_ob_windows = 0;
99 for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
100 sprintf(buf: outbound_name, fmt: "memory%u", i);
101 res = platform_get_resource_byname(pdev,
102 IORESOURCE_MEM,
103 outbound_name);
104 if (!res) {
105 dev_err(pcie->dev, "missing outbound window %u\n", i);
106 return -EINVAL;
107 }
108 if (!devm_request_mem_region(&pdev->dev, res->start,
109 resource_size(res),
110 outbound_name)) {
111 dev_err(pcie->dev, "Cannot request memory region %s.\n",
112 outbound_name);
113 return -EIO;
114 }
115
116 ep->ob_window[i].phys_base = res->start;
117 ep->ob_window[i].size = resource_size(res);
118 /* controller doesn't support multiple allocation
119 * from same window, so set page_size to window size
120 */
121 ep->ob_window[i].page_size = resource_size(res);
122 }
123 ep->num_ob_windows = i;
124
125 return 0;
126}
127
128static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
129 struct platform_device *pdev)
130{
131 struct rcar_pcie *pcie = &ep->pcie;
132 struct pci_epc_mem_window *window;
133 struct device *dev = pcie->dev;
134 struct resource res;
135 int err;
136
137 err = of_address_to_resource(dev: dev->of_node, index: 0, r: &res);
138 if (err)
139 return err;
140 pcie->base = devm_ioremap_resource(dev, res: &res);
141 if (IS_ERR(ptr: pcie->base))
142 return PTR_ERR(ptr: pcie->base);
143
144 ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
145 size: sizeof(*window), GFP_KERNEL);
146 if (!ep->ob_window)
147 return -ENOMEM;
148
149 rcar_pcie_parse_outbound_ranges(ep, pdev);
150
151 err = of_property_read_u8(np: dev->of_node, propname: "max-functions",
152 out_value: &ep->max_functions);
153 if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
154 ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
155
156 return 0;
157}
158
159static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
160 struct pci_epf_header *hdr)
161{
162 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
163 struct rcar_pcie *pcie = &ep->pcie;
164 u32 val;
165
166 if (!fn)
167 val = hdr->vendorid;
168 else
169 val = rcar_pci_read_reg(pcie, IDSETR0);
170 val |= hdr->deviceid << 16;
171 rcar_pci_write_reg(pcie, val, IDSETR0);
172
173 val = hdr->revid;
174 val |= hdr->progif_code << 8;
175 val |= hdr->subclass_code << 16;
176 val |= hdr->baseclass_code << 24;
177 rcar_pci_write_reg(pcie, val, IDSETR1);
178
179 if (!fn)
180 val = hdr->subsys_vendor_id;
181 else
182 val = rcar_pci_read_reg(pcie, SUBIDSETR);
183 val |= hdr->subsys_id << 16;
184 rcar_pci_write_reg(pcie, val, SUBIDSETR);
185
186 if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
187 return -EINVAL;
188 val = rcar_pci_read_reg(pcie, PCICONF(15));
189 val |= (hdr->interrupt_pin << 8);
190 rcar_pci_write_reg(pcie, val, PCICONF(15));
191
192 return 0;
193}
194
195static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
196 struct pci_epf_bar *epf_bar)
197{
198 int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
199 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
200 u64 size = 1ULL << fls64(x: epf_bar->size - 1);
201 dma_addr_t cpu_addr = epf_bar->phys_addr;
202 enum pci_barno bar = epf_bar->barno;
203 struct rcar_pcie *pcie = &ep->pcie;
204 u32 mask;
205 int idx;
206 int err;
207
208 idx = find_first_zero_bit(addr: ep->ib_window_map, size: ep->num_ib_windows);
209 if (idx >= ep->num_ib_windows) {
210 dev_err(pcie->dev, "no free inbound window\n");
211 return -EINVAL;
212 }
213
214 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
215 flags |= IO_SPACE;
216
217 ep->bar_to_atu[bar] = idx;
218 /* use 64-bit BARs */
219 set_bit(nr: idx, addr: ep->ib_window_map);
220 set_bit(nr: idx + 1, addr: ep->ib_window_map);
221
222 if (cpu_addr > 0) {
223 unsigned long nr_zeros = __ffs64(word: cpu_addr);
224 u64 alignment = 1ULL << nr_zeros;
225
226 size = min(size, alignment);
227 }
228
229 size = min(size, 1ULL << 32);
230
231 mask = roundup_pow_of_two(size) - 1;
232 mask &= ~0xf;
233
234 rcar_pcie_set_inbound(pcie, cpu_addr,
235 pci_addr: 0x0, flags: mask | flags, idx, host: false);
236
237 err = rcar_pcie_wait_for_phyrdy(pcie);
238 if (err) {
239 dev_err(pcie->dev, "phy not ready\n");
240 return -EINVAL;
241 }
242
243 return 0;
244}
245
246static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
247 struct pci_epf_bar *epf_bar)
248{
249 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
250 enum pci_barno bar = epf_bar->barno;
251 u32 atu_index = ep->bar_to_atu[bar];
252
253 rcar_pcie_set_inbound(pcie: &ep->pcie, cpu_addr: 0x0, pci_addr: 0x0, flags: 0x0, idx: bar, host: false);
254
255 clear_bit(nr: atu_index, addr: ep->ib_window_map);
256 clear_bit(nr: atu_index + 1, addr: ep->ib_window_map);
257}
258
259static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
260 u8 interrupts)
261{
262 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
263 struct rcar_pcie *pcie = &ep->pcie;
264 u32 flags;
265
266 flags = rcar_pci_read_reg(pcie, MSICAP(fn));
267 flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
268 rcar_pci_write_reg(pcie, val: flags, MSICAP(fn));
269
270 return 0;
271}
272
273static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
274{
275 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
276 struct rcar_pcie *pcie = &ep->pcie;
277 u32 flags;
278
279 flags = rcar_pci_read_reg(pcie, MSICAP(fn));
280 if (!(flags & MSICAP0_MSIE))
281 return -EINVAL;
282
283 return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
284}
285
286static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
287 phys_addr_t addr, u64 pci_addr, size_t size)
288{
289 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
290 struct rcar_pcie *pcie = &ep->pcie;
291 struct resource_entry win;
292 struct resource res;
293 int window;
294 int err;
295
296 /* check if we have a link. */
297 err = rcar_pcie_wait_for_dl(pcie);
298 if (err) {
299 dev_err(pcie->dev, "link not up\n");
300 return err;
301 }
302
303 window = rcar_pcie_ep_get_window(ep, addr);
304 if (window < 0) {
305 dev_err(pcie->dev, "failed to get corresponding window\n");
306 return -EINVAL;
307 }
308
309 memset(&win, 0x0, sizeof(win));
310 memset(&res, 0x0, sizeof(res));
311 res.start = pci_addr;
312 res.end = pci_addr + size - 1;
313 res.flags = IORESOURCE_MEM;
314 win.res = &res;
315
316 rcar_pcie_set_outbound(pcie, win: window, window: &win);
317
318 ep->ob_mapped_addr[window] = addr;
319
320 return 0;
321}
322
323static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
324 phys_addr_t addr)
325{
326 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
327 struct resource_entry win;
328 struct resource res;
329 int idx;
330
331 for (idx = 0; idx < ep->num_ob_windows; idx++)
332 if (ep->ob_mapped_addr[idx] == addr)
333 break;
334
335 if (idx >= ep->num_ob_windows)
336 return;
337
338 memset(&win, 0x0, sizeof(win));
339 memset(&res, 0x0, sizeof(res));
340 win.res = &res;
341 rcar_pcie_set_outbound(pcie: &ep->pcie, win: idx, window: &win);
342
343 ep->ob_mapped_addr[idx] = 0;
344}
345
346static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
347 u8 fn, u8 intx)
348{
349 struct rcar_pcie *pcie = &ep->pcie;
350 u32 val;
351
352 val = rcar_pci_read_reg(pcie, PCIEMSITXR);
353 if ((val & PCI_MSI_FLAGS_ENABLE)) {
354 dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
355 return -EINVAL;
356 }
357
358 val = rcar_pci_read_reg(pcie, PCICONF(1));
359 if ((val & INTDIS)) {
360 dev_err(pcie->dev, "INTx message transmission is disabled\n");
361 return -EINVAL;
362 }
363
364 val = rcar_pci_read_reg(pcie, PCIEINTXR);
365 if ((val & ASTINTX)) {
366 dev_err(pcie->dev, "INTx is already asserted\n");
367 return -EINVAL;
368 }
369
370 val |= ASTINTX;
371 rcar_pci_write_reg(pcie, val, PCIEINTXR);
372 usleep_range(min: 1000, max: 1001);
373 val = rcar_pci_read_reg(pcie, PCIEINTXR);
374 val &= ~ASTINTX;
375 rcar_pci_write_reg(pcie, val, PCIEINTXR);
376
377 return 0;
378}
379
380static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
381 u8 fn, u8 interrupt_num)
382{
383 u16 msi_count;
384 u32 val;
385
386 /* Check MSI enable bit */
387 val = rcar_pci_read_reg(pcie, MSICAP(fn));
388 if (!(val & MSICAP0_MSIE))
389 return -EINVAL;
390
391 /* Get MSI numbers from MME */
392 msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
393 msi_count = 1 << msi_count;
394
395 if (!interrupt_num || interrupt_num > msi_count)
396 return -EINVAL;
397
398 val = rcar_pci_read_reg(pcie, PCIEMSITXR);
399 rcar_pci_write_reg(pcie, val: val | (interrupt_num - 1), PCIEMSITXR);
400
401 return 0;
402}
403
404static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
405 unsigned int type, u16 interrupt_num)
406{
407 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
408
409 switch (type) {
410 case PCI_IRQ_INTX:
411 return rcar_pcie_ep_assert_intx(ep, fn, intx: 0);
412
413 case PCI_IRQ_MSI:
414 return rcar_pcie_ep_assert_msi(pcie: &ep->pcie, fn, interrupt_num);
415
416 default:
417 return -EINVAL;
418 }
419}
420
421static int rcar_pcie_ep_start(struct pci_epc *epc)
422{
423 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
424
425 rcar_pci_write_reg(pcie: &ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
426 rcar_pci_write_reg(pcie: &ep->pcie, CFINIT, PCIETCTLR);
427
428 return 0;
429}
430
431static void rcar_pcie_ep_stop(struct pci_epc *epc)
432{
433 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
434
435 rcar_pci_write_reg(pcie: &ep->pcie, val: 0, PCIETCTLR);
436}
437
438static const struct pci_epc_features rcar_pcie_epc_features = {
439 .linkup_notifier = false,
440 .msi_capable = true,
441 .msix_capable = false,
442 /* use 64-bit BARs so mark BAR[1,3,5] as reserved */
443 .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
444 .only_64bit = true, },
445 .bar[BAR_1] = { .type = BAR_RESERVED, },
446 .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256,
447 .only_64bit = true, },
448 .bar[BAR_3] = { .type = BAR_RESERVED, },
449 .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256,
450 .only_64bit = true, },
451 .bar[BAR_5] = { .type = BAR_RESERVED, },
452};
453
454static const struct pci_epc_features*
455rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
456{
457 return &rcar_pcie_epc_features;
458}
459
460static const struct pci_epc_ops rcar_pcie_epc_ops = {
461 .write_header = rcar_pcie_ep_write_header,
462 .set_bar = rcar_pcie_ep_set_bar,
463 .clear_bar = rcar_pcie_ep_clear_bar,
464 .set_msi = rcar_pcie_ep_set_msi,
465 .get_msi = rcar_pcie_ep_get_msi,
466 .map_addr = rcar_pcie_ep_map_addr,
467 .unmap_addr = rcar_pcie_ep_unmap_addr,
468 .raise_irq = rcar_pcie_ep_raise_irq,
469 .start = rcar_pcie_ep_start,
470 .stop = rcar_pcie_ep_stop,
471 .get_features = rcar_pcie_ep_get_features,
472};
473
474static const struct of_device_id rcar_pcie_ep_of_match[] = {
475 { .compatible = "renesas,r8a774c0-pcie-ep", },
476 { .compatible = "renesas,rcar-gen3-pcie-ep" },
477 { },
478};
479
480static int rcar_pcie_ep_probe(struct platform_device *pdev)
481{
482 struct device *dev = &pdev->dev;
483 struct rcar_pcie_endpoint *ep;
484 struct rcar_pcie *pcie;
485 struct pci_epc *epc;
486 int err;
487
488 ep = devm_kzalloc(dev, size: sizeof(*ep), GFP_KERNEL);
489 if (!ep)
490 return -ENOMEM;
491
492 pcie = &ep->pcie;
493 pcie->dev = dev;
494
495 pm_runtime_enable(dev);
496 err = pm_runtime_resume_and_get(dev);
497 if (err < 0) {
498 dev_err(dev, "pm_runtime_resume_and_get failed\n");
499 goto err_pm_disable;
500 }
501
502 err = rcar_pcie_ep_get_pdata(ep, pdev);
503 if (err < 0) {
504 dev_err(dev, "failed to request resources: %d\n", err);
505 goto err_pm_put;
506 }
507
508 ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
509 ep->ib_window_map =
510 devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
511 size: sizeof(long), GFP_KERNEL);
512 if (!ep->ib_window_map) {
513 err = -ENOMEM;
514 dev_err(dev, "failed to allocate memory for inbound map\n");
515 goto err_pm_put;
516 }
517
518 ep->ob_mapped_addr = devm_kcalloc(dev, n: ep->num_ob_windows,
519 size: sizeof(*ep->ob_mapped_addr),
520 GFP_KERNEL);
521 if (!ep->ob_mapped_addr) {
522 err = -ENOMEM;
523 dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
524 goto err_pm_put;
525 }
526
527 epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
528 if (IS_ERR(ptr: epc)) {
529 dev_err(dev, "failed to create epc device\n");
530 err = PTR_ERR(ptr: epc);
531 goto err_pm_put;
532 }
533
534 epc->max_functions = ep->max_functions;
535 epc_set_drvdata(epc, data: ep);
536
537 rcar_pcie_ep_hw_init(pcie);
538
539 err = pci_epc_multi_mem_init(epc, window: ep->ob_window, num_windows: ep->num_ob_windows);
540 if (err < 0) {
541 dev_err(dev, "failed to initialize the epc memory space\n");
542 goto err_pm_put;
543 }
544
545 return 0;
546
547err_pm_put:
548 pm_runtime_put(dev);
549
550err_pm_disable:
551 pm_runtime_disable(dev);
552
553 return err;
554}
555
556static struct platform_driver rcar_pcie_ep_driver = {
557 .driver = {
558 .name = "rcar-pcie-ep",
559 .of_match_table = rcar_pcie_ep_of_match,
560 .suppress_bind_attrs = true,
561 },
562 .probe = rcar_pcie_ep_probe,
563};
564builtin_platform_driver(rcar_pcie_ep_driver);
565

source code of linux/drivers/pci/controller/pcie-rcar-ep.c