1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | // Copyright (c) 2017 Cadence |
3 | // Cadence PCIe controller driver. |
4 | // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> |
5 | |
6 | #ifndef _PCIE_CADENCE_H |
7 | #define _PCIE_CADENCE_H |
8 | |
9 | #include <linux/kernel.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/pci-epf.h> |
12 | #include <linux/phy/phy.h> |
13 | |
14 | /* Parameters for the waiting for link up routine */ |
15 | #define LINK_WAIT_MAX_RETRIES 10 |
16 | #define LINK_WAIT_USLEEP_MIN 90000 |
17 | #define LINK_WAIT_USLEEP_MAX 100000 |
18 | |
19 | /* |
20 | * Local Management Registers |
21 | */ |
22 | #define CDNS_PCIE_LM_BASE 0x00100000 |
23 | |
24 | /* Vendor ID Register */ |
25 | #define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044) |
26 | #define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0) |
27 | #define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0 |
28 | #define CDNS_PCIE_LM_ID_VENDOR(vid) \ |
29 | (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) |
30 | #define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16) |
31 | #define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16 |
32 | #define CDNS_PCIE_LM_ID_SUBSYS(sub) \ |
33 | (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) |
34 | |
35 | /* Root Port Requester ID Register */ |
36 | #define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228) |
37 | #define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0) |
38 | #define CDNS_PCIE_LM_RP_RID_SHIFT 0 |
39 | #define CDNS_PCIE_LM_RP_RID_(rid) \ |
40 | (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) |
41 | |
42 | /* Endpoint Bus and Device Number Register */ |
43 | #define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c) |
44 | #define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0) |
45 | #define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0 |
46 | #define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8) |
47 | #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 |
48 | |
49 | /* Endpoint Function f BAR b Configuration Registers */ |
50 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \ |
51 | (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn)) |
52 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ |
53 | (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) |
54 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ |
55 | (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) |
56 | #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \ |
57 | (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn)) |
58 | #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \ |
59 | (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008) |
60 | #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \ |
61 | (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008) |
62 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ |
63 | (GENMASK(4, 0) << ((b) * 8)) |
64 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ |
65 | (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) |
66 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ |
67 | (GENMASK(7, 5) << ((b) * 8)) |
68 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ |
69 | (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) |
70 | |
71 | /* Endpoint Function Configuration Register */ |
72 | #define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0) |
73 | |
74 | /* Root Complex BAR Configuration Register */ |
75 | #define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300) |
76 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0) |
77 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ |
78 | (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) |
79 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6) |
80 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ |
81 | (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) |
82 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9) |
83 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ |
84 | (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) |
85 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14) |
86 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ |
87 | (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) |
88 | #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17) |
89 | #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0 |
90 | #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18) |
91 | #define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19) |
92 | #define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0 |
93 | #define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20) |
94 | #define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31) |
95 | |
96 | /* BAR control values applicable to both Endpoint Function and Root Complex */ |
97 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0 |
98 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1 |
99 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4 |
100 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 |
101 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 |
102 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 |
103 | |
104 | #define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ |
105 | (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) |
106 | #define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ |
107 | (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) |
108 | #define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ |
109 | (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) |
110 | #define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ |
111 | (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) |
112 | #define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ |
113 | (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) |
114 | #define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ |
115 | (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) |
116 | #define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ |
117 | (((aperture) - 2) << ((bar) * 8)) |
118 | |
119 | /* PTM Control Register */ |
120 | #define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8) |
121 | #define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17) |
122 | |
123 | /* |
124 | * Endpoint Function Registers (PCI configuration space for endpoint functions) |
125 | */ |
126 | #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) |
127 | |
128 | #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 |
129 | #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 |
130 | #define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0 |
131 | #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200 |
132 | |
133 | /* |
134 | * Endpoint PF Registers |
135 | */ |
136 | #define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000) |
137 | #define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8) |
138 | |
139 | /* |
140 | * Root Port Registers (PCI configuration space for the root port function) |
141 | */ |
142 | #define CDNS_PCIE_RP_BASE 0x00200000 |
143 | #define CDNS_PCIE_RP_CAP_OFFSET 0xc0 |
144 | |
145 | /* |
146 | * Address Translation Registers |
147 | */ |
148 | #define CDNS_PCIE_AT_BASE 0x00400000 |
149 | |
150 | /* Region r Outbound AXI to PCIe Address Translation Register 0 */ |
151 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ |
152 | (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) |
153 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) |
154 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ |
155 | (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) |
156 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) |
157 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ |
158 | (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) |
159 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) |
160 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ |
161 | (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) |
162 | |
163 | /* Region r Outbound AXI to PCIe Address Translation Register 1 */ |
164 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ |
165 | (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) |
166 | |
167 | /* Region r Outbound PCIe Descriptor Register 0 */ |
168 | #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ |
169 | (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) |
170 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0) |
171 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2 |
172 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6 |
173 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa |
174 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb |
175 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc |
176 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd |
177 | /* Bit 23 MUST be set in RC mode. */ |
178 | #define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) |
179 | #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) |
180 | #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ |
181 | (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) |
182 | |
183 | /* Region r Outbound PCIe Descriptor Register 1 */ |
184 | #define CDNS_PCIE_AT_OB_REGION_DESC1(r) \ |
185 | (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020) |
186 | #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0) |
187 | #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ |
188 | ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) |
189 | |
190 | /* Region r AXI Region Base Address Register 0 */ |
191 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ |
192 | (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) |
193 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) |
194 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ |
195 | (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) |
196 | |
197 | /* Region r AXI Region Base Address Register 1 */ |
198 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ |
199 | (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020) |
200 | |
201 | /* Root Port BAR Inbound PCIe to AXI Address Translation Register */ |
202 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ |
203 | (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) |
204 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) |
205 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ |
206 | (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) |
207 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ |
208 | (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) |
209 | |
210 | /* AXI link down register */ |
211 | #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) |
212 | |
213 | /* LTSSM Capabilities register */ |
214 | #define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054) |
215 | #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1) |
216 | #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1 |
217 | #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \ |
218 | (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \ |
219 | CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
220 | |
221 | enum cdns_pcie_rp_bar { |
222 | RP_BAR_UNDEFINED = -1, |
223 | RP_BAR0, |
224 | RP_BAR1, |
225 | RP_NO_BAR |
226 | }; |
227 | |
228 | #define CDNS_PCIE_RP_MAX_IB 0x3 |
229 | #define CDNS_PCIE_MAX_OB 32 |
230 | |
231 | struct cdns_pcie_rp_ib_bar { |
232 | u64 size; |
233 | bool free; |
234 | }; |
235 | |
236 | /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ |
237 | #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ |
238 | (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) |
239 | #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ |
240 | (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) |
241 | |
242 | /* Normal/Vendor specific message access: offset inside some outbound region */ |
243 | #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5) |
244 | #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ |
245 | (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) |
246 | #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) |
247 | #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ |
248 | (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) |
249 | #define CDNS_PCIE_MSG_DATA BIT(16) |
250 | |
251 | struct cdns_pcie; |
252 | |
253 | enum cdns_pcie_msg_routing { |
254 | /* Route to Root Complex */ |
255 | MSG_ROUTING_TO_RC, |
256 | |
257 | /* Use Address Routing */ |
258 | MSG_ROUTING_BY_ADDR, |
259 | |
260 | /* Use ID Routing */ |
261 | MSG_ROUTING_BY_ID, |
262 | |
263 | /* Route as Broadcast Message from Root Complex */ |
264 | MSG_ROUTING_BCAST, |
265 | |
266 | /* Local message; terminate at receiver (INTx messages) */ |
267 | MSG_ROUTING_LOCAL, |
268 | |
269 | /* Gather & route to Root Complex (PME_TO_Ack message) */ |
270 | MSG_ROUTING_GATHER, |
271 | }; |
272 | |
273 | struct cdns_pcie_ops { |
274 | int (*start_link)(struct cdns_pcie *pcie); |
275 | void (*stop_link)(struct cdns_pcie *pcie); |
276 | bool (*link_up)(struct cdns_pcie *pcie); |
277 | u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr); |
278 | }; |
279 | |
280 | /** |
281 | * struct cdns_pcie - private data for Cadence PCIe controller drivers |
282 | * @reg_base: IO mapped register base |
283 | * @mem_res: start/end offsets in the physical system memory to map PCI accesses |
284 | * @dev: PCIe controller |
285 | * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. |
286 | * @phy_count: number of supported PHY devices |
287 | * @phy: list of pointers to specific PHY control blocks |
288 | * @link: list of pointers to corresponding device link representations |
289 | * @ops: Platform-specific ops to control various inputs from Cadence PCIe |
290 | * wrapper |
291 | */ |
292 | struct cdns_pcie { |
293 | void __iomem *reg_base; |
294 | struct resource *mem_res; |
295 | struct device *dev; |
296 | bool is_rc; |
297 | int phy_count; |
298 | struct phy **phy; |
299 | struct device_link **link; |
300 | const struct cdns_pcie_ops *ops; |
301 | }; |
302 | |
303 | /** |
304 | * struct cdns_pcie_rc - private data for this PCIe Root Complex driver |
305 | * @pcie: Cadence PCIe controller |
306 | * @cfg_res: start/end offsets in the physical system memory to map PCI |
307 | * configuration space accesses |
308 | * @cfg_base: IO mapped window to access the PCI configuration space of a |
309 | * single function at a time |
310 | * @vendor_id: PCI vendor ID |
311 | * @device_id: PCI device ID |
312 | * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or |
313 | * available |
314 | * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2 |
315 | * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk |
316 | */ |
317 | struct cdns_pcie_rc { |
318 | struct cdns_pcie pcie; |
319 | struct resource *cfg_res; |
320 | void __iomem *cfg_base; |
321 | u32 vendor_id; |
322 | u32 device_id; |
323 | bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; |
324 | unsigned int quirk_retrain_flag:1; |
325 | unsigned int quirk_detect_quiet_flag:1; |
326 | }; |
327 | |
328 | /** |
329 | * struct cdns_pcie_epf - Structure to hold info about endpoint function |
330 | * @epf: Info about virtual functions attached to the physical function |
331 | * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers |
332 | */ |
333 | struct cdns_pcie_epf { |
334 | struct cdns_pcie_epf *epf; |
335 | struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; |
336 | }; |
337 | |
338 | /** |
339 | * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver |
340 | * @pcie: Cadence PCIe controller |
341 | * @max_regions: maximum number of regions supported by hardware |
342 | * @ob_region_map: bitmask of mapped outbound regions |
343 | * @ob_addr: base addresses in the AXI bus where the outbound regions start |
344 | * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ |
345 | * dedicated outbound regions is mapped. |
346 | * @irq_cpu_addr: base address in the CPU space where a write access triggers |
347 | * the sending of a memory write (MSI) / normal message (INTX |
348 | * IRQ) TLP through the PCIe bus. |
349 | * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ |
350 | * dedicated outbound region. |
351 | * @irq_pci_fn: the latest PCI function that has updated the mapping of |
352 | * the MSI/INTX IRQ dedicated outbound region. |
353 | * @irq_pending: bitmask of asserted INTX IRQs. |
354 | * @lock: spin lock to disable interrupts while modifying PCIe controller |
355 | * registers fields (RMW) accessible by both remote RC and EP to |
356 | * minimize time between read and write |
357 | * @epf: Structure to hold info about endpoint function |
358 | * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk |
359 | * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag |
360 | */ |
361 | struct cdns_pcie_ep { |
362 | struct cdns_pcie pcie; |
363 | u32 max_regions; |
364 | unsigned long ob_region_map; |
365 | phys_addr_t *ob_addr; |
366 | phys_addr_t irq_phys_addr; |
367 | void __iomem *irq_cpu_addr; |
368 | u64 irq_pci_addr; |
369 | u8 irq_pci_fn; |
370 | u8 irq_pending; |
371 | /* protect writing to PCI_STATUS while raising INTX interrupts */ |
372 | spinlock_t lock; |
373 | struct cdns_pcie_epf *epf; |
374 | unsigned int quirk_detect_quiet_flag:1; |
375 | unsigned int quirk_disable_flr:1; |
376 | }; |
377 | |
378 | |
379 | /* Register access */ |
380 | static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) |
381 | { |
382 | writel(val: value, addr: pcie->reg_base + reg); |
383 | } |
384 | |
385 | static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) |
386 | { |
387 | return readl(addr: pcie->reg_base + reg); |
388 | } |
389 | |
390 | static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size) |
391 | { |
392 | void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); |
393 | unsigned int offset = (unsigned long)addr & 0x3; |
394 | u32 val = readl(addr: aligned_addr); |
395 | |
396 | if (!IS_ALIGNED((uintptr_t)addr, size)) { |
397 | pr_warn("Address %p and size %d are not aligned\n", addr, size); |
398 | return 0; |
399 | } |
400 | |
401 | if (size > 2) |
402 | return val; |
403 | |
404 | return (val >> (8 * offset)) & ((1 << (size * 8)) - 1); |
405 | } |
406 | |
407 | static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value) |
408 | { |
409 | void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); |
410 | unsigned int offset = (unsigned long)addr & 0x3; |
411 | u32 mask; |
412 | u32 val; |
413 | |
414 | if (!IS_ALIGNED((uintptr_t)addr, size)) { |
415 | pr_warn("Address %p and size %d are not aligned\n", addr, size); |
416 | return; |
417 | } |
418 | |
419 | if (size > 2) { |
420 | writel(val: value, addr); |
421 | return; |
422 | } |
423 | |
424 | mask = ~(((1 << (size * 8)) - 1) << (offset * 8)); |
425 | val = readl(addr: aligned_addr) & mask; |
426 | val |= value << (offset * 8); |
427 | writel(val, addr: aligned_addr); |
428 | } |
429 | |
430 | /* Root Port register access */ |
431 | static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, |
432 | u32 reg, u8 value) |
433 | { |
434 | void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; |
435 | |
436 | cdns_pcie_write_sz(addr, size: 0x1, value); |
437 | } |
438 | |
439 | static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, |
440 | u32 reg, u16 value) |
441 | { |
442 | void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; |
443 | |
444 | cdns_pcie_write_sz(addr, size: 0x2, value); |
445 | } |
446 | |
447 | static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg) |
448 | { |
449 | void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; |
450 | |
451 | return cdns_pcie_read_sz(addr, size: 0x2); |
452 | } |
453 | |
454 | /* Endpoint Function register access */ |
455 | static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, |
456 | u32 reg, u8 value) |
457 | { |
458 | void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; |
459 | |
460 | cdns_pcie_write_sz(addr, size: 0x1, value); |
461 | } |
462 | |
463 | static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, |
464 | u32 reg, u16 value) |
465 | { |
466 | void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; |
467 | |
468 | cdns_pcie_write_sz(addr, size: 0x2, value); |
469 | } |
470 | |
471 | static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, |
472 | u32 reg, u32 value) |
473 | { |
474 | writel(val: value, addr: pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); |
475 | } |
476 | |
477 | static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) |
478 | { |
479 | void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; |
480 | |
481 | return cdns_pcie_read_sz(addr, size: 0x2); |
482 | } |
483 | |
484 | static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) |
485 | { |
486 | return readl(addr: pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); |
487 | } |
488 | |
489 | static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) |
490 | { |
491 | if (pcie->ops->start_link) |
492 | return pcie->ops->start_link(pcie); |
493 | |
494 | return 0; |
495 | } |
496 | |
497 | static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie) |
498 | { |
499 | if (pcie->ops->stop_link) |
500 | pcie->ops->stop_link(pcie); |
501 | } |
502 | |
503 | static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) |
504 | { |
505 | if (pcie->ops->link_up) |
506 | return pcie->ops->link_up(pcie); |
507 | |
508 | return true; |
509 | } |
510 | |
511 | #if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST) |
512 | int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc); |
513 | int cdns_pcie_host_init(struct cdns_pcie_rc *rc); |
514 | int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); |
515 | void cdns_pcie_host_disable(struct cdns_pcie_rc *rc); |
516 | void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, |
517 | int where); |
518 | #else |
519 | static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc) |
520 | { |
521 | return 0; |
522 | } |
523 | |
524 | static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc) |
525 | { |
526 | return 0; |
527 | } |
528 | |
529 | static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) |
530 | { |
531 | return 0; |
532 | } |
533 | |
534 | static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc) |
535 | { |
536 | } |
537 | |
538 | static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, |
539 | int where) |
540 | { |
541 | return NULL; |
542 | } |
543 | #endif |
544 | |
545 | #if IS_ENABLED(CONFIG_PCIE_CADENCE_EP) |
546 | int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep); |
547 | void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep); |
548 | #else |
549 | static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) |
550 | { |
551 | return 0; |
552 | } |
553 | |
554 | static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep) |
555 | { |
556 | } |
557 | #endif |
558 | |
559 | void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie); |
560 | |
561 | void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, |
562 | u32 r, bool is_io, |
563 | u64 cpu_addr, u64 pci_addr, size_t size); |
564 | |
565 | void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, |
566 | u8 busnr, u8 fn, |
567 | u32 r, u64 cpu_addr); |
568 | |
569 | void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); |
570 | void cdns_pcie_disable_phy(struct cdns_pcie *pcie); |
571 | int cdns_pcie_enable_phy(struct cdns_pcie *pcie); |
572 | int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie); |
573 | extern const struct dev_pm_ops cdns_pcie_pm_ops; |
574 | |
575 | #endif /* _PCIE_CADENCE_H */ |
576 |
Definitions
- cdns_pcie_rp_bar
- cdns_pcie_rp_ib_bar
- cdns_pcie_msg_routing
- cdns_pcie_ops
- cdns_pcie
- cdns_pcie_rc
- cdns_pcie_epf
- cdns_pcie_ep
- cdns_pcie_writel
- cdns_pcie_readl
- cdns_pcie_read_sz
- cdns_pcie_write_sz
- cdns_pcie_rp_writeb
- cdns_pcie_rp_writew
- cdns_pcie_rp_readw
- cdns_pcie_ep_fn_writeb
- cdns_pcie_ep_fn_writew
- cdns_pcie_ep_fn_writel
- cdns_pcie_ep_fn_readw
- cdns_pcie_ep_fn_readl
- cdns_pcie_start_link
- cdns_pcie_stop_link
Improve your Profiling and Debugging skills
Find out more