1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de> |
4 | * Copyright (C) 2015 Broadcom Corporation |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/pci.h> |
9 | #include <linux/pci-ecam.h> |
10 | #include <linux/msi.h> |
11 | #include <linux/clk.h> |
12 | #include <linux/module.h> |
13 | #include <linux/mbus.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/delay.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/irqchip/arm-gic-v3.h> |
18 | #include <linux/platform_device.h> |
19 | #include <linux/of_address.h> |
20 | #include <linux/of_pci.h> |
21 | #include <linux/of_platform.h> |
22 | #include <linux/phy/phy.h> |
23 | |
24 | #include "pcie-iproc.h" |
25 | |
26 | #define EP_PERST_SOURCE_SELECT_SHIFT 2 |
27 | #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) |
28 | #define EP_MODE_SURVIVE_PERST_SHIFT 1 |
29 | #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) |
30 | #define RC_PCIE_RST_OUTPUT_SHIFT 0 |
31 | #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) |
32 | #define PAXC_RESET_MASK 0x7f |
33 | |
34 | #define GIC_V3_CFG_SHIFT 0 |
35 | #define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) |
36 | |
37 | #define MSI_ENABLE_CFG_SHIFT 0 |
38 | #define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) |
39 | |
40 | #define CFG_IND_ADDR_MASK 0x00001ffc |
41 | |
42 | #define CFG_ADDR_REG_NUM_MASK 0x00000ffc |
43 | #define CFG_ADDR_CFG_TYPE_1 1 |
44 | |
45 | #define SYS_RC_INTX_MASK 0xf |
46 | |
47 | #define PCIE_PHYLINKUP_SHIFT 3 |
48 | #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) |
49 | #define PCIE_DL_ACTIVE_SHIFT 2 |
50 | #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) |
51 | |
52 | #define APB_ERR_EN_SHIFT 0 |
53 | #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) |
54 | |
55 | #define CFG_RD_SUCCESS 0 |
56 | #define CFG_RD_UR 1 |
57 | #define CFG_RD_CRS 2 |
58 | #define CFG_RD_CA 3 |
59 | #define CFG_RETRY_STATUS 0xffff0001 |
60 | #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ |
61 | |
62 | /* derive the enum index of the outbound/inbound mapping registers */ |
63 | #define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) |
64 | |
65 | /* |
66 | * Maximum number of outbound mapping window sizes that can be supported by any |
67 | * OARR/OMAP mapping pair |
68 | */ |
69 | #define MAX_NUM_OB_WINDOW_SIZES 4 |
70 | |
71 | #define OARR_VALID_SHIFT 0 |
72 | #define OARR_VALID BIT(OARR_VALID_SHIFT) |
73 | #define OARR_SIZE_CFG_SHIFT 1 |
74 | |
75 | /* |
76 | * Maximum number of inbound mapping region sizes that can be supported by an |
77 | * IARR |
78 | */ |
79 | #define MAX_NUM_IB_REGION_SIZES 9 |
80 | |
81 | #define IMAP_VALID_SHIFT 0 |
82 | #define IMAP_VALID BIT(IMAP_VALID_SHIFT) |
83 | |
84 | #define IPROC_PCI_PM_CAP 0x48 |
85 | #define IPROC_PCI_PM_CAP_MASK 0xffff |
86 | #define IPROC_PCI_EXP_CAP 0xac |
87 | |
88 | #define IPROC_PCIE_REG_INVALID 0xffff |
89 | |
90 | /** |
91 | * struct iproc_pcie_ob_map - iProc PCIe outbound mapping controller-specific |
92 | * parameters |
93 | * @window_sizes: list of supported outbound mapping window sizes in MB |
94 | * @nr_sizes: number of supported outbound mapping window sizes |
95 | */ |
96 | struct iproc_pcie_ob_map { |
97 | resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; |
98 | unsigned int nr_sizes; |
99 | }; |
100 | |
101 | static const struct iproc_pcie_ob_map paxb_ob_map[] = { |
102 | { |
103 | /* OARR0/OMAP0 */ |
104 | .window_sizes = { 128, 256 }, |
105 | .nr_sizes = 2, |
106 | }, |
107 | { |
108 | /* OARR1/OMAP1 */ |
109 | .window_sizes = { 128, 256 }, |
110 | .nr_sizes = 2, |
111 | }, |
112 | }; |
113 | |
114 | static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { |
115 | { |
116 | /* OARR0/OMAP0 */ |
117 | .window_sizes = { 128, 256 }, |
118 | .nr_sizes = 2, |
119 | }, |
120 | { |
121 | /* OARR1/OMAP1 */ |
122 | .window_sizes = { 128, 256 }, |
123 | .nr_sizes = 2, |
124 | }, |
125 | { |
126 | /* OARR2/OMAP2 */ |
127 | .window_sizes = { 128, 256, 512, 1024 }, |
128 | .nr_sizes = 4, |
129 | }, |
130 | { |
131 | /* OARR3/OMAP3 */ |
132 | .window_sizes = { 128, 256, 512, 1024 }, |
133 | .nr_sizes = 4, |
134 | }, |
135 | }; |
136 | |
137 | /** |
138 | * enum iproc_pcie_ib_map_type - iProc PCIe inbound mapping type |
139 | * @IPROC_PCIE_IB_MAP_MEM: DDR memory |
140 | * @IPROC_PCIE_IB_MAP_IO: device I/O memory |
141 | * @IPROC_PCIE_IB_MAP_INVALID: invalid or unused |
142 | */ |
143 | enum iproc_pcie_ib_map_type { |
144 | IPROC_PCIE_IB_MAP_MEM = 0, |
145 | IPROC_PCIE_IB_MAP_IO, |
146 | IPROC_PCIE_IB_MAP_INVALID |
147 | }; |
148 | |
149 | /** |
150 | * struct iproc_pcie_ib_map - iProc PCIe inbound mapping controller-specific |
151 | * parameters |
152 | * @type: inbound mapping region type |
153 | * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or |
154 | * SZ_1G |
155 | * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or |
156 | * GB, depending on the size unit |
157 | * @nr_sizes: number of supported inbound mapping region sizes |
158 | * @nr_windows: number of supported inbound mapping windows for the region |
159 | * @imap_addr_offset: register offset between the upper and lower 32-bit |
160 | * IMAP address registers |
161 | * @imap_window_offset: register offset between each IMAP window |
162 | */ |
163 | struct iproc_pcie_ib_map { |
164 | enum iproc_pcie_ib_map_type type; |
165 | unsigned int size_unit; |
166 | resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; |
167 | unsigned int nr_sizes; |
168 | unsigned int nr_windows; |
169 | u16 imap_addr_offset; |
170 | u16 imap_window_offset; |
171 | }; |
172 | |
173 | static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { |
174 | { |
175 | /* IARR0/IMAP0 */ |
176 | .type = IPROC_PCIE_IB_MAP_IO, |
177 | .size_unit = SZ_1K, |
178 | .region_sizes = { 32 }, |
179 | .nr_sizes = 1, |
180 | .nr_windows = 8, |
181 | .imap_addr_offset = 0x40, |
182 | .imap_window_offset = 0x4, |
183 | }, |
184 | { |
185 | /* IARR1/IMAP1 */ |
186 | .type = IPROC_PCIE_IB_MAP_MEM, |
187 | .size_unit = SZ_1M, |
188 | .region_sizes = { 8 }, |
189 | .nr_sizes = 1, |
190 | .nr_windows = 8, |
191 | .imap_addr_offset = 0x4, |
192 | .imap_window_offset = 0x8, |
193 | |
194 | }, |
195 | { |
196 | /* IARR2/IMAP2 */ |
197 | .type = IPROC_PCIE_IB_MAP_MEM, |
198 | .size_unit = SZ_1M, |
199 | .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, |
200 | 16384 }, |
201 | .nr_sizes = 9, |
202 | .nr_windows = 1, |
203 | .imap_addr_offset = 0x4, |
204 | .imap_window_offset = 0x8, |
205 | }, |
206 | { |
207 | /* IARR3/IMAP3 */ |
208 | .type = IPROC_PCIE_IB_MAP_MEM, |
209 | .size_unit = SZ_1G, |
210 | .region_sizes = { 1, 2, 4, 8, 16, 32 }, |
211 | .nr_sizes = 6, |
212 | .nr_windows = 8, |
213 | .imap_addr_offset = 0x4, |
214 | .imap_window_offset = 0x8, |
215 | }, |
216 | { |
217 | /* IARR4/IMAP4 */ |
218 | .type = IPROC_PCIE_IB_MAP_MEM, |
219 | .size_unit = SZ_1G, |
220 | .region_sizes = { 32, 64, 128, 256, 512 }, |
221 | .nr_sizes = 5, |
222 | .nr_windows = 8, |
223 | .imap_addr_offset = 0x4, |
224 | .imap_window_offset = 0x8, |
225 | }, |
226 | }; |
227 | |
228 | /* |
229 | * iProc PCIe host registers |
230 | */ |
231 | enum iproc_pcie_reg { |
232 | /* clock/reset signal control */ |
233 | IPROC_PCIE_CLK_CTRL = 0, |
234 | |
235 | /* |
236 | * To allow MSI to be steered to an external MSI controller (e.g., ARM |
237 | * GICv3 ITS) |
238 | */ |
239 | IPROC_PCIE_MSI_GIC_MODE, |
240 | |
241 | /* |
242 | * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the |
243 | * window where the MSI posted writes are written, for the writes to be |
244 | * interpreted as MSI writes. |
245 | */ |
246 | IPROC_PCIE_MSI_BASE_ADDR, |
247 | IPROC_PCIE_MSI_WINDOW_SIZE, |
248 | |
249 | /* |
250 | * To hold the address of the register where the MSI writes are |
251 | * programmed. When ARM GICv3 ITS is used, this should be programmed |
252 | * with the address of the GITS_TRANSLATER register. |
253 | */ |
254 | IPROC_PCIE_MSI_ADDR_LO, |
255 | IPROC_PCIE_MSI_ADDR_HI, |
256 | |
257 | /* enable MSI */ |
258 | IPROC_PCIE_MSI_EN_CFG, |
259 | |
260 | /* allow access to root complex configuration space */ |
261 | IPROC_PCIE_CFG_IND_ADDR, |
262 | IPROC_PCIE_CFG_IND_DATA, |
263 | |
264 | /* allow access to device configuration space */ |
265 | IPROC_PCIE_CFG_ADDR, |
266 | IPROC_PCIE_CFG_DATA, |
267 | |
268 | /* enable INTx */ |
269 | IPROC_PCIE_INTX_EN, |
270 | |
271 | /* outbound address mapping */ |
272 | IPROC_PCIE_OARR0, |
273 | IPROC_PCIE_OMAP0, |
274 | IPROC_PCIE_OARR1, |
275 | IPROC_PCIE_OMAP1, |
276 | IPROC_PCIE_OARR2, |
277 | IPROC_PCIE_OMAP2, |
278 | IPROC_PCIE_OARR3, |
279 | IPROC_PCIE_OMAP3, |
280 | |
281 | /* inbound address mapping */ |
282 | IPROC_PCIE_IARR0, |
283 | IPROC_PCIE_IMAP0, |
284 | IPROC_PCIE_IARR1, |
285 | IPROC_PCIE_IMAP1, |
286 | IPROC_PCIE_IARR2, |
287 | IPROC_PCIE_IMAP2, |
288 | IPROC_PCIE_IARR3, |
289 | IPROC_PCIE_IMAP3, |
290 | IPROC_PCIE_IARR4, |
291 | IPROC_PCIE_IMAP4, |
292 | |
293 | /* config read status */ |
294 | IPROC_PCIE_CFG_RD_STATUS, |
295 | |
296 | /* link status */ |
297 | IPROC_PCIE_LINK_STATUS, |
298 | |
299 | /* enable APB error for unsupported requests */ |
300 | IPROC_PCIE_APB_ERR_EN, |
301 | |
302 | /* total number of core registers */ |
303 | IPROC_PCIE_MAX_NUM_REG, |
304 | }; |
305 | |
306 | /* iProc PCIe PAXB BCMA registers */ |
307 | static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = { |
308 | [IPROC_PCIE_CLK_CTRL] = 0x000, |
309 | [IPROC_PCIE_CFG_IND_ADDR] = 0x120, |
310 | [IPROC_PCIE_CFG_IND_DATA] = 0x124, |
311 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, |
312 | [IPROC_PCIE_CFG_DATA] = 0x1fc, |
313 | [IPROC_PCIE_INTX_EN] = 0x330, |
314 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, |
315 | }; |
316 | |
317 | /* iProc PCIe PAXB registers */ |
318 | static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = { |
319 | [IPROC_PCIE_CLK_CTRL] = 0x000, |
320 | [IPROC_PCIE_CFG_IND_ADDR] = 0x120, |
321 | [IPROC_PCIE_CFG_IND_DATA] = 0x124, |
322 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, |
323 | [IPROC_PCIE_CFG_DATA] = 0x1fc, |
324 | [IPROC_PCIE_INTX_EN] = 0x330, |
325 | [IPROC_PCIE_OARR0] = 0xd20, |
326 | [IPROC_PCIE_OMAP0] = 0xd40, |
327 | [IPROC_PCIE_OARR1] = 0xd28, |
328 | [IPROC_PCIE_OMAP1] = 0xd48, |
329 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, |
330 | [IPROC_PCIE_APB_ERR_EN] = 0xf40, |
331 | }; |
332 | |
333 | /* iProc PCIe PAXB v2 registers */ |
334 | static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = { |
335 | [IPROC_PCIE_CLK_CTRL] = 0x000, |
336 | [IPROC_PCIE_CFG_IND_ADDR] = 0x120, |
337 | [IPROC_PCIE_CFG_IND_DATA] = 0x124, |
338 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, |
339 | [IPROC_PCIE_CFG_DATA] = 0x1fc, |
340 | [IPROC_PCIE_INTX_EN] = 0x330, |
341 | [IPROC_PCIE_OARR0] = 0xd20, |
342 | [IPROC_PCIE_OMAP0] = 0xd40, |
343 | [IPROC_PCIE_OARR1] = 0xd28, |
344 | [IPROC_PCIE_OMAP1] = 0xd48, |
345 | [IPROC_PCIE_OARR2] = 0xd60, |
346 | [IPROC_PCIE_OMAP2] = 0xd68, |
347 | [IPROC_PCIE_OARR3] = 0xdf0, |
348 | [IPROC_PCIE_OMAP3] = 0xdf8, |
349 | [IPROC_PCIE_IARR0] = 0xd00, |
350 | [IPROC_PCIE_IMAP0] = 0xc00, |
351 | [IPROC_PCIE_IARR1] = 0xd08, |
352 | [IPROC_PCIE_IMAP1] = 0xd70, |
353 | [IPROC_PCIE_IARR2] = 0xd10, |
354 | [IPROC_PCIE_IMAP2] = 0xcc0, |
355 | [IPROC_PCIE_IARR3] = 0xe00, |
356 | [IPROC_PCIE_IMAP3] = 0xe08, |
357 | [IPROC_PCIE_IARR4] = 0xe68, |
358 | [IPROC_PCIE_IMAP4] = 0xe70, |
359 | [IPROC_PCIE_CFG_RD_STATUS] = 0xee0, |
360 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, |
361 | [IPROC_PCIE_APB_ERR_EN] = 0xf40, |
362 | }; |
363 | |
364 | /* iProc PCIe PAXC v1 registers */ |
365 | static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = { |
366 | [IPROC_PCIE_CLK_CTRL] = 0x000, |
367 | [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, |
368 | [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, |
369 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, |
370 | [IPROC_PCIE_CFG_DATA] = 0x1fc, |
371 | }; |
372 | |
373 | /* iProc PCIe PAXC v2 registers */ |
374 | static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = { |
375 | [IPROC_PCIE_MSI_GIC_MODE] = 0x050, |
376 | [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, |
377 | [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, |
378 | [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, |
379 | [IPROC_PCIE_MSI_ADDR_HI] = 0x080, |
380 | [IPROC_PCIE_MSI_EN_CFG] = 0x09c, |
381 | [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, |
382 | [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, |
383 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, |
384 | [IPROC_PCIE_CFG_DATA] = 0x1fc, |
385 | }; |
386 | |
387 | /* |
388 | * List of device IDs of controllers that have corrupted capability list that |
389 | * require SW fixup |
390 | */ |
391 | static const u16 iproc_pcie_corrupt_cap_did[] = { |
392 | 0x16cd, |
393 | 0x16f0, |
394 | 0xd802, |
395 | 0xd804 |
396 | }; |
397 | |
398 | static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) |
399 | { |
400 | struct iproc_pcie *pcie = bus->sysdata; |
401 | return pcie; |
402 | } |
403 | |
404 | static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) |
405 | { |
406 | return !!(reg_offset == IPROC_PCIE_REG_INVALID); |
407 | } |
408 | |
409 | static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, |
410 | enum iproc_pcie_reg reg) |
411 | { |
412 | return pcie->reg_offsets[reg]; |
413 | } |
414 | |
415 | static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, |
416 | enum iproc_pcie_reg reg) |
417 | { |
418 | u16 offset = iproc_pcie_reg_offset(pcie, reg); |
419 | |
420 | if (iproc_pcie_reg_is_invalid(reg_offset: offset)) |
421 | return 0; |
422 | |
423 | return readl(addr: pcie->base + offset); |
424 | } |
425 | |
426 | static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, |
427 | enum iproc_pcie_reg reg, u32 val) |
428 | { |
429 | u16 offset = iproc_pcie_reg_offset(pcie, reg); |
430 | |
431 | if (iproc_pcie_reg_is_invalid(reg_offset: offset)) |
432 | return; |
433 | |
434 | writel(val, addr: pcie->base + offset); |
435 | } |
436 | |
437 | /* |
438 | * APB error forwarding can be disabled during access of configuration |
439 | * registers of the endpoint device, to prevent unsupported requests |
440 | * (typically seen during enumeration with multi-function devices) from |
441 | * triggering a system exception. |
442 | */ |
443 | static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, |
444 | bool disable) |
445 | { |
446 | struct iproc_pcie *pcie = iproc_data(bus); |
447 | u32 val; |
448 | |
449 | if (bus->number && pcie->has_apb_err_disable) { |
450 | val = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_APB_ERR_EN); |
451 | if (disable) |
452 | val &= ~APB_ERR_EN; |
453 | else |
454 | val |= APB_ERR_EN; |
455 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_APB_ERR_EN, val); |
456 | } |
457 | } |
458 | |
459 | static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, |
460 | unsigned int busno, |
461 | unsigned int devfn, |
462 | int where) |
463 | { |
464 | u16 offset; |
465 | u32 val; |
466 | |
467 | /* EP device access */ |
468 | val = ALIGN_DOWN(PCIE_ECAM_OFFSET(busno, devfn, where), 4) | |
469 | CFG_ADDR_CFG_TYPE_1; |
470 | |
471 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_CFG_ADDR, val); |
472 | offset = iproc_pcie_reg_offset(pcie, reg: IPROC_PCIE_CFG_DATA); |
473 | |
474 | if (iproc_pcie_reg_is_invalid(reg_offset: offset)) |
475 | return NULL; |
476 | |
477 | return (pcie->base + offset); |
478 | } |
479 | |
480 | static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie, |
481 | void __iomem *cfg_data_p) |
482 | { |
483 | int timeout = CFG_RETRY_STATUS_TIMEOUT_US; |
484 | unsigned int data; |
485 | u32 status; |
486 | |
487 | /* |
488 | * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only |
489 | * affects config reads of the Vendor ID. For config writes or any |
490 | * other config reads, the Root may automatically reissue the |
491 | * configuration request again as a new request. |
492 | * |
493 | * For config reads, this hardware returns CFG_RETRY_STATUS data |
494 | * when it receives a CRS completion, regardless of the address of |
495 | * the read or the CRS Software Visibility Enable bit. As a |
496 | * partial workaround for this, we retry in software any read that |
497 | * returns CFG_RETRY_STATUS. |
498 | * |
499 | * Note that a non-Vendor ID config register may have a value of |
500 | * CFG_RETRY_STATUS. If we read that, we can't distinguish it from |
501 | * a CRS completion, so we will incorrectly retry the read and |
502 | * eventually return the wrong data (0xffffffff). |
503 | */ |
504 | data = readl(addr: cfg_data_p); |
505 | while (data == CFG_RETRY_STATUS && timeout--) { |
506 | /* |
507 | * CRS state is set in CFG_RD status register |
508 | * This will handle the case where CFG_RETRY_STATUS is |
509 | * valid config data. |
510 | */ |
511 | status = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_CFG_RD_STATUS); |
512 | if (status != CFG_RD_CRS) |
513 | return data; |
514 | |
515 | udelay(1); |
516 | data = readl(addr: cfg_data_p); |
517 | } |
518 | |
519 | if (data == CFG_RETRY_STATUS) |
520 | data = 0xffffffff; |
521 | |
522 | return data; |
523 | } |
524 | |
525 | static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val) |
526 | { |
527 | u32 i, dev_id; |
528 | |
529 | switch (where & ~0x3) { |
530 | case PCI_VENDOR_ID: |
531 | dev_id = *val >> 16; |
532 | |
533 | /* |
534 | * Activate fixup for those controllers that have corrupted |
535 | * capability list registers |
536 | */ |
537 | for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++) |
538 | if (dev_id == iproc_pcie_corrupt_cap_did[i]) |
539 | pcie->fix_paxc_cap = true; |
540 | break; |
541 | |
542 | case IPROC_PCI_PM_CAP: |
543 | if (pcie->fix_paxc_cap) { |
544 | /* advertise PM, force next capability to PCIe */ |
545 | *val &= ~IPROC_PCI_PM_CAP_MASK; |
546 | *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM; |
547 | } |
548 | break; |
549 | |
550 | case IPROC_PCI_EXP_CAP: |
551 | if (pcie->fix_paxc_cap) { |
552 | /* advertise root port, version 2, terminate here */ |
553 | *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 | |
554 | PCI_CAP_ID_EXP; |
555 | } |
556 | break; |
557 | |
558 | case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: |
559 | /* Don't advertise CRS SV support */ |
560 | *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); |
561 | break; |
562 | |
563 | default: |
564 | break; |
565 | } |
566 | } |
567 | |
568 | static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, |
569 | int where, int size, u32 *val) |
570 | { |
571 | struct iproc_pcie *pcie = iproc_data(bus); |
572 | unsigned int busno = bus->number; |
573 | void __iomem *cfg_data_p; |
574 | unsigned int data; |
575 | int ret; |
576 | |
577 | /* root complex access */ |
578 | if (busno == 0) { |
579 | ret = pci_generic_config_read32(bus, devfn, where, size, val); |
580 | if (ret == PCIBIOS_SUCCESSFUL) |
581 | iproc_pcie_fix_cap(pcie, where, val); |
582 | |
583 | return ret; |
584 | } |
585 | |
586 | cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where); |
587 | |
588 | if (!cfg_data_p) |
589 | return PCIBIOS_DEVICE_NOT_FOUND; |
590 | |
591 | data = iproc_pcie_cfg_retry(pcie, cfg_data_p); |
592 | |
593 | *val = data; |
594 | if (size <= 2) |
595 | *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); |
596 | |
597 | /* |
598 | * For PAXC and PAXCv2, the total number of PFs that one can enumerate |
599 | * depends on the firmware configuration. Unfortunately, due to an ASIC |
600 | * bug, unconfigured PFs cannot be properly hidden from the root |
601 | * complex. As a result, write access to these PFs will cause bus lock |
602 | * up on the embedded processor |
603 | * |
604 | * Since all unconfigured PFs are left with an incorrect, staled device |
605 | * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access |
606 | * early here and reject them all |
607 | */ |
608 | #define DEVICE_ID_MASK 0xffff0000 |
609 | #define DEVICE_ID_SHIFT 16 |
610 | if (pcie->rej_unconfig_pf && |
611 | (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID) |
612 | if ((*val & DEVICE_ID_MASK) == |
613 | (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT)) |
614 | return PCIBIOS_FUNC_NOT_SUPPORTED; |
615 | |
616 | return PCIBIOS_SUCCESSFUL; |
617 | } |
618 | |
619 | /* |
620 | * Note access to the configuration registers are protected at the higher layer |
621 | * by 'pci_lock' in drivers/pci/access.c |
622 | */ |
623 | static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, |
624 | int busno, unsigned int devfn, |
625 | int where) |
626 | { |
627 | u16 offset; |
628 | |
629 | /* root complex access */ |
630 | if (busno == 0) { |
631 | if (PCIE_ECAM_DEVFN(devfn) > 0) |
632 | return NULL; |
633 | |
634 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_CFG_IND_ADDR, |
635 | val: where & CFG_IND_ADDR_MASK); |
636 | offset = iproc_pcie_reg_offset(pcie, reg: IPROC_PCIE_CFG_IND_DATA); |
637 | if (iproc_pcie_reg_is_invalid(reg_offset: offset)) |
638 | return NULL; |
639 | else |
640 | return (pcie->base + offset); |
641 | } |
642 | |
643 | return iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where); |
644 | } |
645 | |
646 | static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, |
647 | unsigned int devfn, |
648 | int where) |
649 | { |
650 | return iproc_pcie_map_cfg_bus(pcie: iproc_data(bus), busno: bus->number, devfn, |
651 | where); |
652 | } |
653 | |
654 | static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, |
655 | unsigned int devfn, int where, |
656 | int size, u32 *val) |
657 | { |
658 | void __iomem *addr; |
659 | |
660 | addr = iproc_pcie_map_cfg_bus(pcie, busno: 0, devfn, where: where & ~0x3); |
661 | if (!addr) |
662 | return PCIBIOS_DEVICE_NOT_FOUND; |
663 | |
664 | *val = readl(addr); |
665 | |
666 | if (size <= 2) |
667 | *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); |
668 | |
669 | return PCIBIOS_SUCCESSFUL; |
670 | } |
671 | |
672 | static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, |
673 | unsigned int devfn, int where, |
674 | int size, u32 val) |
675 | { |
676 | void __iomem *addr; |
677 | u32 mask, tmp; |
678 | |
679 | addr = iproc_pcie_map_cfg_bus(pcie, busno: 0, devfn, where: where & ~0x3); |
680 | if (!addr) |
681 | return PCIBIOS_DEVICE_NOT_FOUND; |
682 | |
683 | if (size == 4) { |
684 | writel(val, addr); |
685 | return PCIBIOS_SUCCESSFUL; |
686 | } |
687 | |
688 | mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); |
689 | tmp = readl(addr) & mask; |
690 | tmp |= val << ((where & 0x3) * 8); |
691 | writel(val: tmp, addr); |
692 | |
693 | return PCIBIOS_SUCCESSFUL; |
694 | } |
695 | |
696 | static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, |
697 | int where, int size, u32 *val) |
698 | { |
699 | int ret; |
700 | struct iproc_pcie *pcie = iproc_data(bus); |
701 | |
702 | iproc_pcie_apb_err_disable(bus, disable: true); |
703 | if (pcie->iproc_cfg_read) |
704 | ret = iproc_pcie_config_read(bus, devfn, where, size, val); |
705 | else |
706 | ret = pci_generic_config_read32(bus, devfn, where, size, val); |
707 | iproc_pcie_apb_err_disable(bus, disable: false); |
708 | |
709 | return ret; |
710 | } |
711 | |
712 | static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, |
713 | int where, int size, u32 val) |
714 | { |
715 | int ret; |
716 | |
717 | iproc_pcie_apb_err_disable(bus, disable: true); |
718 | ret = pci_generic_config_write32(bus, devfn, where, size, val); |
719 | iproc_pcie_apb_err_disable(bus, disable: false); |
720 | |
721 | return ret; |
722 | } |
723 | |
724 | static struct pci_ops iproc_pcie_ops = { |
725 | .map_bus = iproc_pcie_bus_map_cfg_bus, |
726 | .read = iproc_pcie_config_read32, |
727 | .write = iproc_pcie_config_write32, |
728 | }; |
729 | |
730 | static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert) |
731 | { |
732 | u32 val; |
733 | |
734 | /* |
735 | * PAXC and the internal emulated endpoint device downstream should not |
736 | * be reset. If firmware has been loaded on the endpoint device at an |
737 | * earlier boot stage, reset here causes issues. |
738 | */ |
739 | if (pcie->ep_is_internal) |
740 | return; |
741 | |
742 | if (assert) { |
743 | val = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_CLK_CTRL); |
744 | val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & |
745 | ~RC_PCIE_RST_OUTPUT; |
746 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_CLK_CTRL, val); |
747 | udelay(250); |
748 | } else { |
749 | val = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_CLK_CTRL); |
750 | val |= RC_PCIE_RST_OUTPUT; |
751 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_CLK_CTRL, val); |
752 | msleep(msecs: 100); |
753 | } |
754 | } |
755 | |
756 | int iproc_pcie_shutdown(struct iproc_pcie *pcie) |
757 | { |
758 | iproc_pcie_perst_ctrl(pcie, assert: true); |
759 | msleep(msecs: 500); |
760 | |
761 | return 0; |
762 | } |
763 | EXPORT_SYMBOL_GPL(iproc_pcie_shutdown); |
764 | |
765 | static int iproc_pcie_check_link(struct iproc_pcie *pcie) |
766 | { |
767 | struct device *dev = pcie->dev; |
768 | u32 hdr_type, link_ctrl, link_status, class, val; |
769 | bool link_is_active = false; |
770 | |
771 | /* |
772 | * PAXC connects to emulated endpoint devices directly and does not |
773 | * have a Serdes. Therefore skip the link detection logic here. |
774 | */ |
775 | if (pcie->ep_is_internal) |
776 | return 0; |
777 | |
778 | val = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_LINK_STATUS); |
779 | if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { |
780 | dev_err(dev, "PHY or data link is INACTIVE!\n" ); |
781 | return -ENODEV; |
782 | } |
783 | |
784 | /* make sure we are not in EP mode */ |
785 | iproc_pci_raw_config_read32(pcie, devfn: 0, PCI_HEADER_TYPE, size: 1, val: &hdr_type); |
786 | if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE) { |
787 | dev_err(dev, "in EP mode, hdr=%#02x\n" , hdr_type); |
788 | return -EFAULT; |
789 | } |
790 | |
791 | /* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */ |
792 | #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c |
793 | #define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff |
794 | iproc_pci_raw_config_read32(pcie, devfn: 0, PCI_BRIDGE_CTRL_REG_OFFSET, |
795 | size: 4, val: &class); |
796 | class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK; |
797 | class |= PCI_CLASS_BRIDGE_PCI_NORMAL; |
798 | iproc_pci_raw_config_write32(pcie, devfn: 0, PCI_BRIDGE_CTRL_REG_OFFSET, |
799 | size: 4, val: class); |
800 | |
801 | /* check link status to see if link is active */ |
802 | iproc_pci_raw_config_read32(pcie, devfn: 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, |
803 | size: 2, val: &link_status); |
804 | if (link_status & PCI_EXP_LNKSTA_NLW) |
805 | link_is_active = true; |
806 | |
807 | if (!link_is_active) { |
808 | /* try GEN 1 link speed */ |
809 | #define PCI_TARGET_LINK_SPEED_MASK 0xf |
810 | #define PCI_TARGET_LINK_SPEED_GEN2 0x2 |
811 | #define PCI_TARGET_LINK_SPEED_GEN1 0x1 |
812 | iproc_pci_raw_config_read32(pcie, devfn: 0, |
813 | IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, |
814 | size: 4, val: &link_ctrl); |
815 | if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == |
816 | PCI_TARGET_LINK_SPEED_GEN2) { |
817 | link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; |
818 | link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; |
819 | iproc_pci_raw_config_write32(pcie, devfn: 0, |
820 | IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, |
821 | size: 4, val: link_ctrl); |
822 | msleep(msecs: 100); |
823 | |
824 | iproc_pci_raw_config_read32(pcie, devfn: 0, |
825 | IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, |
826 | size: 2, val: &link_status); |
827 | if (link_status & PCI_EXP_LNKSTA_NLW) |
828 | link_is_active = true; |
829 | } |
830 | } |
831 | |
832 | dev_info(dev, "link: %s\n" , link_is_active ? "UP" : "DOWN" ); |
833 | |
834 | return link_is_active ? 0 : -ENODEV; |
835 | } |
836 | |
837 | static void iproc_pcie_enable(struct iproc_pcie *pcie) |
838 | { |
839 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); |
840 | } |
841 | |
842 | static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, |
843 | int window_idx) |
844 | { |
845 | u32 val; |
846 | |
847 | val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); |
848 | |
849 | return !!(val & OARR_VALID); |
850 | } |
851 | |
852 | static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, |
853 | int size_idx, u64 axi_addr, u64 pci_addr) |
854 | { |
855 | struct device *dev = pcie->dev; |
856 | u16 oarr_offset, omap_offset; |
857 | |
858 | /* |
859 | * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based |
860 | * on window index. |
861 | */ |
862 | oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, |
863 | window_idx)); |
864 | omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, |
865 | window_idx)); |
866 | if (iproc_pcie_reg_is_invalid(reg_offset: oarr_offset) || |
867 | iproc_pcie_reg_is_invalid(reg_offset: omap_offset)) |
868 | return -EINVAL; |
869 | |
870 | /* |
871 | * Program the OARR registers. The upper 32-bit OARR register is |
872 | * always right after the lower 32-bit OARR register. |
873 | */ |
874 | writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | |
875 | OARR_VALID, addr: pcie->base + oarr_offset); |
876 | writel(upper_32_bits(axi_addr), addr: pcie->base + oarr_offset + 4); |
877 | |
878 | /* now program the OMAP registers */ |
879 | writel(lower_32_bits(pci_addr), addr: pcie->base + omap_offset); |
880 | writel(upper_32_bits(pci_addr), addr: pcie->base + omap_offset + 4); |
881 | |
882 | dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n" , |
883 | window_idx, oarr_offset, &axi_addr, &pci_addr); |
884 | dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n" , |
885 | readl(pcie->base + oarr_offset), |
886 | readl(pcie->base + oarr_offset + 4)); |
887 | dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n" , |
888 | readl(pcie->base + omap_offset), |
889 | readl(pcie->base + omap_offset + 4)); |
890 | |
891 | return 0; |
892 | } |
893 | |
894 | /* |
895 | * Some iProc SoCs require the SW to configure the outbound address mapping |
896 | * |
897 | * Outbound address translation: |
898 | * |
899 | * iproc_pcie_address = axi_address - axi_offset |
900 | * OARR = iproc_pcie_address |
901 | * OMAP = pci_addr |
902 | * |
903 | * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address |
904 | */ |
905 | static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, |
906 | u64 pci_addr, resource_size_t size) |
907 | { |
908 | struct iproc_pcie_ob *ob = &pcie->ob; |
909 | struct device *dev = pcie->dev; |
910 | int ret = -EINVAL, window_idx, size_idx; |
911 | |
912 | if (axi_addr < ob->axi_offset) { |
913 | dev_err(dev, "axi address %pap less than offset %pap\n" , |
914 | &axi_addr, &ob->axi_offset); |
915 | return -EINVAL; |
916 | } |
917 | |
918 | /* |
919 | * Translate the AXI address to the internal address used by the iProc |
920 | * PCIe core before programming the OARR |
921 | */ |
922 | axi_addr -= ob->axi_offset; |
923 | |
924 | /* iterate through all OARR/OMAP mapping windows */ |
925 | for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { |
926 | const struct iproc_pcie_ob_map *ob_map = |
927 | &pcie->ob_map[window_idx]; |
928 | |
929 | /* |
930 | * If current outbound window is already in use, move on to the |
931 | * next one. |
932 | */ |
933 | if (iproc_pcie_ob_is_valid(pcie, window_idx)) |
934 | continue; |
935 | |
936 | /* |
937 | * Iterate through all supported window sizes within the |
938 | * OARR/OMAP pair to find a match. Go through the window sizes |
939 | * in a descending order. |
940 | */ |
941 | for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; |
942 | size_idx--) { |
943 | resource_size_t window_size = |
944 | ob_map->window_sizes[size_idx] * SZ_1M; |
945 | |
946 | /* |
947 | * Keep iterating until we reach the last window and |
948 | * with the minimal window size at index zero. In this |
949 | * case, we take a compromise by mapping it using the |
950 | * minimum window size that can be supported |
951 | */ |
952 | if (size < window_size) { |
953 | if (size_idx > 0 || window_idx > 0) |
954 | continue; |
955 | |
956 | /* |
957 | * For the corner case of reaching the minimal |
958 | * window size that can be supported on the |
959 | * last window |
960 | */ |
961 | axi_addr = ALIGN_DOWN(axi_addr, window_size); |
962 | pci_addr = ALIGN_DOWN(pci_addr, window_size); |
963 | size = window_size; |
964 | } |
965 | |
966 | if (!IS_ALIGNED(axi_addr, window_size) || |
967 | !IS_ALIGNED(pci_addr, window_size)) { |
968 | dev_err(dev, |
969 | "axi %pap or pci %pap not aligned\n" , |
970 | &axi_addr, &pci_addr); |
971 | return -EINVAL; |
972 | } |
973 | |
974 | /* |
975 | * Match found! Program both OARR and OMAP and mark |
976 | * them as a valid entry. |
977 | */ |
978 | ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, |
979 | axi_addr, pci_addr); |
980 | if (ret) |
981 | goto err_ob; |
982 | |
983 | size -= window_size; |
984 | if (size == 0) |
985 | return 0; |
986 | |
987 | /* |
988 | * If we are here, we are done with the current window, |
989 | * but not yet finished all mappings. Need to move on |
990 | * to the next window. |
991 | */ |
992 | axi_addr += window_size; |
993 | pci_addr += window_size; |
994 | break; |
995 | } |
996 | } |
997 | |
998 | err_ob: |
999 | dev_err(dev, "unable to configure outbound mapping\n" ); |
1000 | dev_err(dev, |
1001 | "axi %pap, axi offset %pap, pci %pap, res size %pap\n" , |
1002 | &axi_addr, &ob->axi_offset, &pci_addr, &size); |
1003 | |
1004 | return ret; |
1005 | } |
1006 | |
1007 | static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, |
1008 | struct list_head *resources) |
1009 | { |
1010 | struct device *dev = pcie->dev; |
1011 | struct resource_entry *window; |
1012 | int ret; |
1013 | |
1014 | resource_list_for_each_entry(window, resources) { |
1015 | struct resource *res = window->res; |
1016 | u64 res_type = resource_type(res); |
1017 | |
1018 | switch (res_type) { |
1019 | case IORESOURCE_IO: |
1020 | case IORESOURCE_BUS: |
1021 | break; |
1022 | case IORESOURCE_MEM: |
1023 | ret = iproc_pcie_setup_ob(pcie, axi_addr: res->start, |
1024 | pci_addr: res->start - window->offset, |
1025 | size: resource_size(res)); |
1026 | if (ret) |
1027 | return ret; |
1028 | break; |
1029 | default: |
1030 | dev_err(dev, "invalid resource %pR\n" , res); |
1031 | return -EINVAL; |
1032 | } |
1033 | } |
1034 | |
1035 | return 0; |
1036 | } |
1037 | |
1038 | static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, |
1039 | int region_idx) |
1040 | { |
1041 | const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; |
1042 | u32 val; |
1043 | |
1044 | val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); |
1045 | |
1046 | return !!(val & (BIT(ib_map->nr_sizes) - 1)); |
1047 | } |
1048 | |
1049 | static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, |
1050 | enum iproc_pcie_ib_map_type type) |
1051 | { |
1052 | return !!(ib_map->type == type); |
1053 | } |
1054 | |
1055 | static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, |
1056 | int size_idx, int nr_windows, u64 axi_addr, |
1057 | u64 pci_addr, resource_size_t size) |
1058 | { |
1059 | struct device *dev = pcie->dev; |
1060 | const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; |
1061 | u16 iarr_offset, imap_offset; |
1062 | u32 val; |
1063 | int window_idx; |
1064 | |
1065 | iarr_offset = iproc_pcie_reg_offset(pcie, |
1066 | MAP_REG(IPROC_PCIE_IARR0, region_idx)); |
1067 | imap_offset = iproc_pcie_reg_offset(pcie, |
1068 | MAP_REG(IPROC_PCIE_IMAP0, region_idx)); |
1069 | if (iproc_pcie_reg_is_invalid(reg_offset: iarr_offset) || |
1070 | iproc_pcie_reg_is_invalid(reg_offset: imap_offset)) |
1071 | return -EINVAL; |
1072 | |
1073 | dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n" , |
1074 | region_idx, iarr_offset, &axi_addr, &pci_addr); |
1075 | |
1076 | /* |
1077 | * Program the IARR registers. The upper 32-bit IARR register is |
1078 | * always right after the lower 32-bit IARR register. |
1079 | */ |
1080 | writel(lower_32_bits(pci_addr) | BIT(size_idx), |
1081 | addr: pcie->base + iarr_offset); |
1082 | writel(upper_32_bits(pci_addr), addr: pcie->base + iarr_offset + 4); |
1083 | |
1084 | dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n" , |
1085 | readl(pcie->base + iarr_offset), |
1086 | readl(pcie->base + iarr_offset + 4)); |
1087 | |
1088 | /* |
1089 | * Now program the IMAP registers. Each IARR region may have one or |
1090 | * more IMAP windows. |
1091 | */ |
1092 | size >>= ilog2(nr_windows); |
1093 | for (window_idx = 0; window_idx < nr_windows; window_idx++) { |
1094 | val = readl(addr: pcie->base + imap_offset); |
1095 | val |= lower_32_bits(axi_addr) | IMAP_VALID; |
1096 | writel(val, addr: pcie->base + imap_offset); |
1097 | writel(upper_32_bits(axi_addr), |
1098 | addr: pcie->base + imap_offset + ib_map->imap_addr_offset); |
1099 | |
1100 | dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n" , |
1101 | window_idx, readl(pcie->base + imap_offset), |
1102 | readl(pcie->base + imap_offset + |
1103 | ib_map->imap_addr_offset)); |
1104 | |
1105 | imap_offset += ib_map->imap_window_offset; |
1106 | axi_addr += size; |
1107 | } |
1108 | |
1109 | return 0; |
1110 | } |
1111 | |
1112 | static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, |
1113 | struct resource_entry *entry, |
1114 | enum iproc_pcie_ib_map_type type) |
1115 | { |
1116 | struct device *dev = pcie->dev; |
1117 | struct iproc_pcie_ib *ib = &pcie->ib; |
1118 | int ret; |
1119 | unsigned int region_idx, size_idx; |
1120 | u64 axi_addr = entry->res->start; |
1121 | u64 pci_addr = entry->res->start - entry->offset; |
1122 | resource_size_t size = resource_size(res: entry->res); |
1123 | |
1124 | /* iterate through all IARR mapping regions */ |
1125 | for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { |
1126 | const struct iproc_pcie_ib_map *ib_map = |
1127 | &pcie->ib_map[region_idx]; |
1128 | |
1129 | /* |
1130 | * If current inbound region is already in use or not a |
1131 | * compatible type, move on to the next. |
1132 | */ |
1133 | if (iproc_pcie_ib_is_in_use(pcie, region_idx) || |
1134 | !iproc_pcie_ib_check_type(ib_map, type)) |
1135 | continue; |
1136 | |
1137 | /* iterate through all supported region sizes to find a match */ |
1138 | for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { |
1139 | resource_size_t region_size = |
1140 | ib_map->region_sizes[size_idx] * ib_map->size_unit; |
1141 | |
1142 | if (size != region_size) |
1143 | continue; |
1144 | |
1145 | if (!IS_ALIGNED(axi_addr, region_size) || |
1146 | !IS_ALIGNED(pci_addr, region_size)) { |
1147 | dev_err(dev, |
1148 | "axi %pap or pci %pap not aligned\n" , |
1149 | &axi_addr, &pci_addr); |
1150 | return -EINVAL; |
1151 | } |
1152 | |
1153 | /* Match found! Program IARR and all IMAP windows. */ |
1154 | ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, |
1155 | nr_windows: ib_map->nr_windows, axi_addr, |
1156 | pci_addr, size); |
1157 | if (ret) |
1158 | goto err_ib; |
1159 | else |
1160 | return 0; |
1161 | |
1162 | } |
1163 | } |
1164 | ret = -EINVAL; |
1165 | |
1166 | err_ib: |
1167 | dev_err(dev, "unable to configure inbound mapping\n" ); |
1168 | dev_err(dev, "axi %pap, pci %pap, res size %pap\n" , |
1169 | &axi_addr, &pci_addr, &size); |
1170 | |
1171 | return ret; |
1172 | } |
1173 | |
1174 | static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) |
1175 | { |
1176 | struct pci_host_bridge *host = pci_host_bridge_from_priv(priv: pcie); |
1177 | struct resource_entry *entry; |
1178 | int ret = 0; |
1179 | |
1180 | resource_list_for_each_entry(entry, &host->dma_ranges) { |
1181 | /* Each range entry corresponds to an inbound mapping region */ |
1182 | ret = iproc_pcie_setup_ib(pcie, entry, type: IPROC_PCIE_IB_MAP_MEM); |
1183 | if (ret) |
1184 | break; |
1185 | } |
1186 | |
1187 | return ret; |
1188 | } |
1189 | |
1190 | static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie) |
1191 | { |
1192 | struct iproc_pcie_ib *ib = &pcie->ib; |
1193 | struct iproc_pcie_ob *ob = &pcie->ob; |
1194 | int idx; |
1195 | |
1196 | if (pcie->ep_is_internal) |
1197 | return; |
1198 | |
1199 | if (pcie->need_ob_cfg) { |
1200 | /* iterate through all OARR mapping regions */ |
1201 | for (idx = ob->nr_windows - 1; idx >= 0; idx--) { |
1202 | iproc_pcie_write_reg(pcie, |
1203 | MAP_REG(IPROC_PCIE_OARR0, idx), val: 0); |
1204 | } |
1205 | } |
1206 | |
1207 | if (pcie->need_ib_cfg) { |
1208 | /* iterate through all IARR mapping regions */ |
1209 | for (idx = 0; idx < ib->nr_regions; idx++) { |
1210 | iproc_pcie_write_reg(pcie, |
1211 | MAP_REG(IPROC_PCIE_IARR0, idx), val: 0); |
1212 | } |
1213 | } |
1214 | } |
1215 | |
1216 | static int iproce_pcie_get_msi(struct iproc_pcie *pcie, |
1217 | struct device_node *msi_node, |
1218 | u64 *msi_addr) |
1219 | { |
1220 | struct device *dev = pcie->dev; |
1221 | int ret; |
1222 | struct resource res; |
1223 | |
1224 | /* |
1225 | * Check if 'msi-map' points to ARM GICv3 ITS, which is the only |
1226 | * supported external MSI controller that requires steering. |
1227 | */ |
1228 | if (!of_device_is_compatible(device: msi_node, "arm,gic-v3-its" )) { |
1229 | dev_err(dev, "unable to find compatible MSI controller\n" ); |
1230 | return -ENODEV; |
1231 | } |
1232 | |
1233 | /* derive GITS_TRANSLATER address from GICv3 */ |
1234 | ret = of_address_to_resource(dev: msi_node, index: 0, r: &res); |
1235 | if (ret < 0) { |
1236 | dev_err(dev, "unable to obtain MSI controller resources\n" ); |
1237 | return ret; |
1238 | } |
1239 | |
1240 | *msi_addr = res.start + GITS_TRANSLATER; |
1241 | return 0; |
1242 | } |
1243 | |
1244 | static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) |
1245 | { |
1246 | int ret; |
1247 | struct resource_entry entry; |
1248 | |
1249 | memset(&entry, 0, sizeof(entry)); |
1250 | entry.res = &entry.__res; |
1251 | |
1252 | msi_addr &= ~(SZ_32K - 1); |
1253 | entry.res->start = msi_addr; |
1254 | entry.res->end = msi_addr + SZ_32K - 1; |
1255 | |
1256 | ret = iproc_pcie_setup_ib(pcie, entry: &entry, type: IPROC_PCIE_IB_MAP_IO); |
1257 | return ret; |
1258 | } |
1259 | |
1260 | static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr, |
1261 | bool enable) |
1262 | { |
1263 | u32 val; |
1264 | |
1265 | if (!enable) { |
1266 | /* |
1267 | * Disable PAXC MSI steering. All write transfers will be |
1268 | * treated as non-MSI transfers |
1269 | */ |
1270 | val = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_MSI_EN_CFG); |
1271 | val &= ~MSI_ENABLE_CFG; |
1272 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_MSI_EN_CFG, val); |
1273 | return; |
1274 | } |
1275 | |
1276 | /* |
1277 | * Program bits [43:13] of address of GITS_TRANSLATER register into |
1278 | * bits [30:0] of the MSI base address register. In fact, in all iProc |
1279 | * based SoCs, all I/O register bases are well below the 32-bit |
1280 | * boundary, so we can safely assume bits [43:32] are always zeros. |
1281 | */ |
1282 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_MSI_BASE_ADDR, |
1283 | val: (u32)(msi_addr >> 13)); |
1284 | |
1285 | /* use a default 8K window size */ |
1286 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_MSI_WINDOW_SIZE, val: 0); |
1287 | |
1288 | /* steering MSI to GICv3 ITS */ |
1289 | val = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_MSI_GIC_MODE); |
1290 | val |= GIC_V3_CFG; |
1291 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_MSI_GIC_MODE, val); |
1292 | |
1293 | /* |
1294 | * Program bits [43:2] of address of GITS_TRANSLATER register into the |
1295 | * iProc MSI address registers. |
1296 | */ |
1297 | msi_addr >>= 2; |
1298 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_MSI_ADDR_HI, |
1299 | upper_32_bits(msi_addr)); |
1300 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_MSI_ADDR_LO, |
1301 | lower_32_bits(msi_addr)); |
1302 | |
1303 | /* enable MSI */ |
1304 | val = iproc_pcie_read_reg(pcie, reg: IPROC_PCIE_MSI_EN_CFG); |
1305 | val |= MSI_ENABLE_CFG; |
1306 | iproc_pcie_write_reg(pcie, reg: IPROC_PCIE_MSI_EN_CFG, val); |
1307 | } |
1308 | |
1309 | static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, |
1310 | struct device_node *msi_node) |
1311 | { |
1312 | struct device *dev = pcie->dev; |
1313 | int ret; |
1314 | u64 msi_addr; |
1315 | |
1316 | ret = iproce_pcie_get_msi(pcie, msi_node, msi_addr: &msi_addr); |
1317 | if (ret < 0) { |
1318 | dev_err(dev, "msi steering failed\n" ); |
1319 | return ret; |
1320 | } |
1321 | |
1322 | switch (pcie->type) { |
1323 | case IPROC_PCIE_PAXB_V2: |
1324 | ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr); |
1325 | if (ret) |
1326 | return ret; |
1327 | break; |
1328 | case IPROC_PCIE_PAXC_V2: |
1329 | iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, enable: true); |
1330 | break; |
1331 | default: |
1332 | return -EINVAL; |
1333 | } |
1334 | |
1335 | return 0; |
1336 | } |
1337 | |
1338 | static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) |
1339 | { |
1340 | struct device_node *msi_node; |
1341 | int ret; |
1342 | |
1343 | /* |
1344 | * Either the "msi-parent" or the "msi-map" phandle needs to exist |
1345 | * for us to obtain the MSI node. |
1346 | */ |
1347 | |
1348 | msi_node = of_parse_phandle(np: pcie->dev->of_node, phandle_name: "msi-parent" , index: 0); |
1349 | if (!msi_node) { |
1350 | const __be32 *msi_map = NULL; |
1351 | int len; |
1352 | u32 phandle; |
1353 | |
1354 | msi_map = of_get_property(node: pcie->dev->of_node, name: "msi-map" , lenp: &len); |
1355 | if (!msi_map) |
1356 | return -ENODEV; |
1357 | |
1358 | phandle = be32_to_cpup(p: msi_map + 1); |
1359 | msi_node = of_find_node_by_phandle(handle: phandle); |
1360 | if (!msi_node) |
1361 | return -ENODEV; |
1362 | } |
1363 | |
1364 | /* |
1365 | * Certain revisions of the iProc PCIe controller require additional |
1366 | * configurations to steer the MSI writes towards an external MSI |
1367 | * controller. |
1368 | */ |
1369 | if (pcie->need_msi_steer) { |
1370 | ret = iproc_pcie_msi_steer(pcie, msi_node); |
1371 | if (ret) |
1372 | goto out_put_node; |
1373 | } |
1374 | |
1375 | /* |
1376 | * If another MSI controller is being used, the call below should fail |
1377 | * but that is okay |
1378 | */ |
1379 | ret = iproc_msi_init(pcie, node: msi_node); |
1380 | |
1381 | out_put_node: |
1382 | of_node_put(node: msi_node); |
1383 | return ret; |
1384 | } |
1385 | |
1386 | static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) |
1387 | { |
1388 | iproc_msi_exit(pcie); |
1389 | } |
1390 | |
1391 | static int iproc_pcie_rev_init(struct iproc_pcie *pcie) |
1392 | { |
1393 | struct device *dev = pcie->dev; |
1394 | unsigned int reg_idx; |
1395 | const u16 *regs; |
1396 | |
1397 | switch (pcie->type) { |
1398 | case IPROC_PCIE_PAXB_BCMA: |
1399 | regs = iproc_pcie_reg_paxb_bcma; |
1400 | break; |
1401 | case IPROC_PCIE_PAXB: |
1402 | regs = iproc_pcie_reg_paxb; |
1403 | pcie->has_apb_err_disable = true; |
1404 | if (pcie->need_ob_cfg) { |
1405 | pcie->ob_map = paxb_ob_map; |
1406 | pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map); |
1407 | } |
1408 | break; |
1409 | case IPROC_PCIE_PAXB_V2: |
1410 | regs = iproc_pcie_reg_paxb_v2; |
1411 | pcie->iproc_cfg_read = true; |
1412 | pcie->has_apb_err_disable = true; |
1413 | if (pcie->need_ob_cfg) { |
1414 | pcie->ob_map = paxb_v2_ob_map; |
1415 | pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); |
1416 | } |
1417 | pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); |
1418 | pcie->ib_map = paxb_v2_ib_map; |
1419 | pcie->need_msi_steer = true; |
1420 | dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n" , |
1421 | CFG_RETRY_STATUS); |
1422 | break; |
1423 | case IPROC_PCIE_PAXC: |
1424 | regs = iproc_pcie_reg_paxc; |
1425 | pcie->ep_is_internal = true; |
1426 | pcie->iproc_cfg_read = true; |
1427 | pcie->rej_unconfig_pf = true; |
1428 | break; |
1429 | case IPROC_PCIE_PAXC_V2: |
1430 | regs = iproc_pcie_reg_paxc_v2; |
1431 | pcie->ep_is_internal = true; |
1432 | pcie->iproc_cfg_read = true; |
1433 | pcie->rej_unconfig_pf = true; |
1434 | pcie->need_msi_steer = true; |
1435 | break; |
1436 | default: |
1437 | dev_err(dev, "incompatible iProc PCIe interface\n" ); |
1438 | return -EINVAL; |
1439 | } |
1440 | |
1441 | pcie->reg_offsets = devm_kcalloc(dev, n: IPROC_PCIE_MAX_NUM_REG, |
1442 | size: sizeof(*pcie->reg_offsets), |
1443 | GFP_KERNEL); |
1444 | if (!pcie->reg_offsets) |
1445 | return -ENOMEM; |
1446 | |
1447 | /* go through the register table and populate all valid registers */ |
1448 | pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? |
1449 | IPROC_PCIE_REG_INVALID : regs[0]; |
1450 | for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++) |
1451 | pcie->reg_offsets[reg_idx] = regs[reg_idx] ? |
1452 | regs[reg_idx] : IPROC_PCIE_REG_INVALID; |
1453 | |
1454 | return 0; |
1455 | } |
1456 | |
1457 | int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) |
1458 | { |
1459 | struct device *dev; |
1460 | int ret; |
1461 | struct pci_dev *pdev; |
1462 | struct pci_host_bridge *host = pci_host_bridge_from_priv(priv: pcie); |
1463 | |
1464 | dev = pcie->dev; |
1465 | |
1466 | ret = iproc_pcie_rev_init(pcie); |
1467 | if (ret) { |
1468 | dev_err(dev, "unable to initialize controller parameters\n" ); |
1469 | return ret; |
1470 | } |
1471 | |
1472 | ret = phy_init(phy: pcie->phy); |
1473 | if (ret) { |
1474 | dev_err(dev, "unable to initialize PCIe PHY\n" ); |
1475 | return ret; |
1476 | } |
1477 | |
1478 | ret = phy_power_on(phy: pcie->phy); |
1479 | if (ret) { |
1480 | dev_err(dev, "unable to power on PCIe PHY\n" ); |
1481 | goto err_exit_phy; |
1482 | } |
1483 | |
1484 | iproc_pcie_perst_ctrl(pcie, assert: true); |
1485 | iproc_pcie_perst_ctrl(pcie, assert: false); |
1486 | |
1487 | iproc_pcie_invalidate_mapping(pcie); |
1488 | |
1489 | if (pcie->need_ob_cfg) { |
1490 | ret = iproc_pcie_map_ranges(pcie, resources: res); |
1491 | if (ret) { |
1492 | dev_err(dev, "map failed\n" ); |
1493 | goto err_power_off_phy; |
1494 | } |
1495 | } |
1496 | |
1497 | if (pcie->need_ib_cfg) { |
1498 | ret = iproc_pcie_map_dma_ranges(pcie); |
1499 | if (ret && ret != -ENOENT) |
1500 | goto err_power_off_phy; |
1501 | } |
1502 | |
1503 | ret = iproc_pcie_check_link(pcie); |
1504 | if (ret) { |
1505 | dev_err(dev, "no PCIe EP device detected\n" ); |
1506 | goto err_power_off_phy; |
1507 | } |
1508 | |
1509 | iproc_pcie_enable(pcie); |
1510 | |
1511 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
1512 | if (iproc_pcie_msi_enable(pcie)) |
1513 | dev_info(dev, "not using iProc MSI\n" ); |
1514 | |
1515 | host->ops = &iproc_pcie_ops; |
1516 | host->sysdata = pcie; |
1517 | host->map_irq = pcie->map_irq; |
1518 | |
1519 | ret = pci_host_probe(bridge: host); |
1520 | if (ret < 0) { |
1521 | dev_err(dev, "failed to scan host: %d\n" , ret); |
1522 | goto err_power_off_phy; |
1523 | } |
1524 | |
1525 | for_each_pci_bridge(pdev, host->bus) { |
1526 | if (pci_pcie_type(dev: pdev) == PCI_EXP_TYPE_ROOT_PORT) |
1527 | pcie_print_link_status(dev: pdev); |
1528 | } |
1529 | |
1530 | return 0; |
1531 | |
1532 | err_power_off_phy: |
1533 | phy_power_off(phy: pcie->phy); |
1534 | err_exit_phy: |
1535 | phy_exit(phy: pcie->phy); |
1536 | return ret; |
1537 | } |
1538 | EXPORT_SYMBOL(iproc_pcie_setup); |
1539 | |
1540 | void iproc_pcie_remove(struct iproc_pcie *pcie) |
1541 | { |
1542 | struct pci_host_bridge *host = pci_host_bridge_from_priv(priv: pcie); |
1543 | |
1544 | pci_stop_root_bus(bus: host->bus); |
1545 | pci_remove_root_bus(bus: host->bus); |
1546 | |
1547 | iproc_pcie_msi_disable(pcie); |
1548 | |
1549 | phy_power_off(phy: pcie->phy); |
1550 | phy_exit(phy: pcie->phy); |
1551 | } |
1552 | EXPORT_SYMBOL(iproc_pcie_remove); |
1553 | |
1554 | /* |
1555 | * The MSI parsing logic in certain revisions of Broadcom PAXC based root |
1556 | * complex does not work and needs to be disabled |
1557 | */ |
1558 | static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev) |
1559 | { |
1560 | struct iproc_pcie *pcie = iproc_data(bus: pdev->bus); |
1561 | |
1562 | if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) |
1563 | iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr: 0, enable: false); |
1564 | } |
1565 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, |
1566 | quirk_paxc_disable_msi_parsing); |
1567 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, |
1568 | quirk_paxc_disable_msi_parsing); |
1569 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, |
1570 | quirk_paxc_disable_msi_parsing); |
1571 | |
1572 | static void quirk_paxc_bridge(struct pci_dev *pdev) |
1573 | { |
1574 | /* |
1575 | * The PCI config space is shared with the PAXC root port and the first |
1576 | * Ethernet device. So, we need to workaround this by telling the PCI |
1577 | * code that the bridge is not an Ethernet device. |
1578 | */ |
1579 | if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) |
1580 | pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; |
1581 | |
1582 | /* |
1583 | * MPSS is not being set properly (as it is currently 0). This is |
1584 | * because that area of the PCI config space is hard coded to zero, and |
1585 | * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) |
1586 | * so that the MPS can be set to the real max value. |
1587 | */ |
1588 | pdev->pcie_mpss = 2; |
1589 | } |
1590 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge); |
1591 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge); |
1592 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge); |
1593 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge); |
1594 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge); |
1595 | |
1596 | MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>" ); |
1597 | MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver" ); |
1598 | MODULE_LICENSE("GPL v2" ); |
1599 | |