1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Driver for the Aardvark PCIe controller, used on Marvell Armada |
4 | * 3700. |
5 | * |
6 | * Copyright (C) 2016 Marvell |
7 | * |
8 | * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com> |
9 | */ |
10 | |
11 | #include <linux/bitfield.h> |
12 | #include <linux/delay.h> |
13 | #include <linux/gpio/consumer.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/irq.h> |
16 | #include <linux/irqdomain.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> |
19 | #include <linux/pci.h> |
20 | #include <linux/pci-ecam.h> |
21 | #include <linux/init.h> |
22 | #include <linux/phy/phy.h> |
23 | #include <linux/platform_device.h> |
24 | #include <linux/msi.h> |
25 | #include <linux/of_address.h> |
26 | #include <linux/of_pci.h> |
27 | |
28 | #include "../pci.h" |
29 | #include "../pci-bridge-emul.h" |
30 | |
31 | /* PCIe core registers */ |
32 | #define PCIE_CORE_DEV_ID_REG 0x0 |
33 | #define PCIE_CORE_CMD_STATUS_REG 0x4 |
34 | #define PCIE_CORE_DEV_REV_REG 0x8 |
35 | #define PCIE_CORE_SSDEV_ID_REG 0x2c |
36 | #define PCIE_CORE_PCIEXP_CAP 0xc0 |
37 | #define PCIE_CORE_PCIERR_CAP 0x100 |
38 | #define PCIE_CORE_ERR_CAPCTL_REG 0x118 |
39 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) |
40 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) |
41 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) |
42 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) |
43 | /* PIO registers base address and register offsets */ |
44 | #define PIO_BASE_ADDR 0x4000 |
45 | #define PIO_CTRL (PIO_BASE_ADDR + 0x0) |
46 | #define PIO_CTRL_TYPE_MASK GENMASK(3, 0) |
47 | #define PIO_CTRL_ADDR_WIN_DISABLE BIT(24) |
48 | #define PIO_STAT (PIO_BASE_ADDR + 0x4) |
49 | #define PIO_COMPLETION_STATUS_SHIFT 7 |
50 | #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) |
51 | #define PIO_COMPLETION_STATUS_OK 0 |
52 | #define PIO_COMPLETION_STATUS_UR 1 |
53 | #define PIO_COMPLETION_STATUS_RRS 2 |
54 | #define PIO_COMPLETION_STATUS_CA 4 |
55 | #define PIO_NON_POSTED_REQ BIT(10) |
56 | #define PIO_ERR_STATUS BIT(11) |
57 | #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) |
58 | #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) |
59 | #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) |
60 | #define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14) |
61 | #define PIO_RD_DATA (PIO_BASE_ADDR + 0x18) |
62 | #define PIO_START (PIO_BASE_ADDR + 0x1c) |
63 | #define PIO_ISR (PIO_BASE_ADDR + 0x20) |
64 | #define PIO_ISRM (PIO_BASE_ADDR + 0x24) |
65 | |
66 | /* Aardvark Control registers */ |
67 | #define CONTROL_BASE_ADDR 0x4800 |
68 | #define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0) |
69 | #define PCIE_GEN_SEL_MSK 0x3 |
70 | #define PCIE_GEN_SEL_SHIFT 0x0 |
71 | #define SPEED_GEN_1 0 |
72 | #define SPEED_GEN_2 1 |
73 | #define SPEED_GEN_3 2 |
74 | #define IS_RC_MSK 1 |
75 | #define IS_RC_SHIFT 2 |
76 | #define LANE_CNT_MSK 0x18 |
77 | #define LANE_CNT_SHIFT 0x3 |
78 | #define LANE_COUNT_1 (0 << LANE_CNT_SHIFT) |
79 | #define LANE_COUNT_2 (1 << LANE_CNT_SHIFT) |
80 | #define LANE_COUNT_4 (2 << LANE_CNT_SHIFT) |
81 | #define LANE_COUNT_8 (3 << LANE_CNT_SHIFT) |
82 | #define LINK_TRAINING_EN BIT(6) |
83 | #define LEGACY_INTA BIT(28) |
84 | #define LEGACY_INTB BIT(29) |
85 | #define LEGACY_INTC BIT(30) |
86 | #define LEGACY_INTD BIT(31) |
87 | #define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4) |
88 | #define HOT_RESET_GEN BIT(0) |
89 | #define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8) |
90 | #define PCIE_CORE_CTRL2_RESERVED 0x7 |
91 | #define PCIE_CORE_CTRL2_TD_ENABLE BIT(4) |
92 | #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) |
93 | #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) |
94 | #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) |
95 | #define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14) |
96 | #define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1) |
97 | #define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2) |
98 | #define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30) |
99 | #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) |
100 | #define PCIE_MSG_PM_PME_MASK BIT(7) |
101 | #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) |
102 | #define PCIE_ISR0_MSI_INT_PENDING BIT(24) |
103 | #define PCIE_ISR0_CORR_ERR BIT(11) |
104 | #define PCIE_ISR0_NFAT_ERR BIT(12) |
105 | #define PCIE_ISR0_FAT_ERR BIT(13) |
106 | #define PCIE_ISR0_ERR_MASK GENMASK(13, 11) |
107 | #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) |
108 | #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) |
109 | #define PCIE_ISR0_ALL_MASK GENMASK(31, 0) |
110 | #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) |
111 | #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) |
112 | #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) |
113 | #define PCIE_ISR1_FLUSH BIT(5) |
114 | #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) |
115 | #define PCIE_ISR1_ALL_MASK GENMASK(31, 0) |
116 | #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) |
117 | #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) |
118 | #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) |
119 | #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) |
120 | #define PCIE_MSI_ALL_MASK GENMASK(31, 0) |
121 | #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) |
122 | #define PCIE_MSI_DATA_MASK GENMASK(15, 0) |
123 | |
124 | /* PCIe window configuration */ |
125 | #define OB_WIN_BASE_ADDR 0x4c00 |
126 | #define OB_WIN_BLOCK_SIZE 0x20 |
127 | #define OB_WIN_COUNT 8 |
128 | #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ |
129 | OB_WIN_BLOCK_SIZE * (win) + \ |
130 | (offset)) |
131 | #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) |
132 | #define OB_WIN_ENABLE BIT(0) |
133 | #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) |
134 | #define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) |
135 | #define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) |
136 | #define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) |
137 | #define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) |
138 | #define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) |
139 | #define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4) |
140 | #define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24) |
141 | #define OB_WIN_FUNC_NUM_SHIFT 24 |
142 | #define OB_WIN_FUNC_NUM_ENABLE BIT(23) |
143 | #define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20) |
144 | #define OB_WIN_BUS_NUM_BITS_SHIFT 20 |
145 | #define OB_WIN_MSG_CODE_ENABLE BIT(22) |
146 | #define OB_WIN_MSG_CODE_MASK GENMASK(21, 14) |
147 | #define OB_WIN_MSG_CODE_SHIFT 14 |
148 | #define OB_WIN_MSG_PAYLOAD_LEN BIT(12) |
149 | #define OB_WIN_ATTR_ENABLE BIT(11) |
150 | #define OB_WIN_ATTR_TC_MASK GENMASK(10, 8) |
151 | #define OB_WIN_ATTR_TC_SHIFT 8 |
152 | #define OB_WIN_ATTR_RELAXED BIT(7) |
153 | #define OB_WIN_ATTR_NOSNOOP BIT(6) |
154 | #define OB_WIN_ATTR_POISON BIT(5) |
155 | #define OB_WIN_ATTR_IDO BIT(4) |
156 | #define OB_WIN_TYPE_MASK GENMASK(3, 0) |
157 | #define OB_WIN_TYPE_SHIFT 0 |
158 | #define OB_WIN_TYPE_MEM 0x0 |
159 | #define OB_WIN_TYPE_IO 0x4 |
160 | #define OB_WIN_TYPE_CONFIG_TYPE0 0x8 |
161 | #define OB_WIN_TYPE_CONFIG_TYPE1 0x9 |
162 | #define OB_WIN_TYPE_MSG 0xc |
163 | |
164 | /* LMI registers base address and register offsets */ |
165 | #define LMI_BASE_ADDR 0x6000 |
166 | #define CFG_REG (LMI_BASE_ADDR + 0x0) |
167 | #define LTSSM_SHIFT 24 |
168 | #define LTSSM_MASK 0x3f |
169 | #define RC_BAR_CONFIG 0x300 |
170 | |
171 | /* LTSSM values in CFG_REG */ |
172 | enum { |
173 | LTSSM_DETECT_QUIET = 0x0, |
174 | LTSSM_DETECT_ACTIVE = 0x1, |
175 | LTSSM_POLLING_ACTIVE = 0x2, |
176 | LTSSM_POLLING_COMPLIANCE = 0x3, |
177 | LTSSM_POLLING_CONFIGURATION = 0x4, |
178 | LTSSM_CONFIG_LINKWIDTH_START = 0x5, |
179 | LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6, |
180 | LTSSM_CONFIG_LANENUM_ACCEPT = 0x7, |
181 | LTSSM_CONFIG_LANENUM_WAIT = 0x8, |
182 | LTSSM_CONFIG_COMPLETE = 0x9, |
183 | LTSSM_CONFIG_IDLE = 0xa, |
184 | LTSSM_RECOVERY_RCVR_LOCK = 0xb, |
185 | LTSSM_RECOVERY_SPEED = 0xc, |
186 | LTSSM_RECOVERY_RCVR_CFG = 0xd, |
187 | LTSSM_RECOVERY_IDLE = 0xe, |
188 | LTSSM_L0 = 0x10, |
189 | LTSSM_RX_L0S_ENTRY = 0x11, |
190 | LTSSM_RX_L0S_IDLE = 0x12, |
191 | LTSSM_RX_L0S_FTS = 0x13, |
192 | LTSSM_TX_L0S_ENTRY = 0x14, |
193 | LTSSM_TX_L0S_IDLE = 0x15, |
194 | LTSSM_TX_L0S_FTS = 0x16, |
195 | LTSSM_L1_ENTRY = 0x17, |
196 | LTSSM_L1_IDLE = 0x18, |
197 | LTSSM_L2_IDLE = 0x19, |
198 | LTSSM_L2_TRANSMIT_WAKE = 0x1a, |
199 | LTSSM_DISABLED = 0x20, |
200 | LTSSM_LOOPBACK_ENTRY_MASTER = 0x21, |
201 | LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22, |
202 | LTSSM_LOOPBACK_EXIT_MASTER = 0x23, |
203 | LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24, |
204 | LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25, |
205 | LTSSM_LOOPBACK_EXIT_SLAVE = 0x26, |
206 | LTSSM_HOT_RESET = 0x27, |
207 | LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28, |
208 | LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29, |
209 | LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a, |
210 | LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b, |
211 | }; |
212 | |
213 | #define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44) |
214 | |
215 | /* PCIe core controller registers */ |
216 | #define CTRL_CORE_BASE_ADDR 0x18000 |
217 | #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) |
218 | #define CTRL_MODE_SHIFT 0x0 |
219 | #define CTRL_MODE_MASK 0x1 |
220 | #define PCIE_CORE_MODE_DIRECT 0x0 |
221 | #define PCIE_CORE_MODE_COMMAND 0x1 |
222 | |
223 | /* PCIe Central Interrupts Registers */ |
224 | #define CENTRAL_INT_BASE_ADDR 0x1b000 |
225 | #define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0) |
226 | #define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4) |
227 | #define PCIE_IRQ_CMDQ_INT BIT(0) |
228 | #define PCIE_IRQ_MSI_STATUS_INT BIT(1) |
229 | #define PCIE_IRQ_CMD_SENT_DONE BIT(3) |
230 | #define PCIE_IRQ_DMA_INT BIT(4) |
231 | #define PCIE_IRQ_IB_DXFERDONE BIT(5) |
232 | #define PCIE_IRQ_OB_DXFERDONE BIT(6) |
233 | #define PCIE_IRQ_OB_RXFERDONE BIT(7) |
234 | #define PCIE_IRQ_COMPQ_INT BIT(12) |
235 | #define PCIE_IRQ_DIR_RD_DDR_DET BIT(13) |
236 | #define PCIE_IRQ_DIR_WR_DDR_DET BIT(14) |
237 | #define PCIE_IRQ_CORE_INT BIT(16) |
238 | #define PCIE_IRQ_CORE_INT_PIO BIT(17) |
239 | #define PCIE_IRQ_DPMU_INT BIT(18) |
240 | #define PCIE_IRQ_PCIE_MIS_INT BIT(19) |
241 | #define PCIE_IRQ_MSI_INT1_DET BIT(20) |
242 | #define PCIE_IRQ_MSI_INT2_DET BIT(21) |
243 | #define PCIE_IRQ_RC_DBELL_DET BIT(22) |
244 | #define PCIE_IRQ_EP_STATUS BIT(23) |
245 | #define PCIE_IRQ_ALL_MASK GENMASK(31, 0) |
246 | #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT |
247 | |
248 | /* Transaction types */ |
249 | #define PCIE_CONFIG_RD_TYPE0 0x8 |
250 | #define PCIE_CONFIG_RD_TYPE1 0x9 |
251 | #define PCIE_CONFIG_WR_TYPE0 0xa |
252 | #define PCIE_CONFIG_WR_TYPE1 0xb |
253 | |
254 | #define PIO_RETRY_CNT 750000 /* 1.5 s */ |
255 | #define PIO_RETRY_DELAY 2 /* 2 us*/ |
256 | |
257 | #define LINK_WAIT_MAX_RETRIES 10 |
258 | #define LINK_WAIT_USLEEP_MIN 90000 |
259 | #define LINK_WAIT_USLEEP_MAX 100000 |
260 | #define RETRAIN_WAIT_MAX_RETRIES 10 |
261 | #define RETRAIN_WAIT_USLEEP_US 2000 |
262 | |
263 | #define MSI_IRQ_NUM 32 |
264 | |
265 | #define CFG_RD_RRS_VAL 0xffff0001 |
266 | |
267 | struct advk_pcie { |
268 | struct platform_device *pdev; |
269 | void __iomem *base; |
270 | struct { |
271 | phys_addr_t match; |
272 | phys_addr_t remap; |
273 | phys_addr_t mask; |
274 | u32 actions; |
275 | } wins[OB_WIN_COUNT]; |
276 | u8 wins_count; |
277 | struct irq_domain *rp_irq_domain; |
278 | struct irq_domain *irq_domain; |
279 | struct irq_chip irq_chip; |
280 | raw_spinlock_t irq_lock; |
281 | struct irq_domain *msi_domain; |
282 | struct irq_domain *msi_inner_domain; |
283 | raw_spinlock_t msi_irq_lock; |
284 | DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); |
285 | struct mutex msi_used_lock; |
286 | int link_gen; |
287 | struct pci_bridge_emul bridge; |
288 | struct gpio_desc *reset_gpio; |
289 | struct phy *phy; |
290 | }; |
291 | |
292 | static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) |
293 | { |
294 | writel(val, addr: pcie->base + reg); |
295 | } |
296 | |
297 | static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) |
298 | { |
299 | return readl(addr: pcie->base + reg); |
300 | } |
301 | |
302 | static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie) |
303 | { |
304 | u32 val; |
305 | u8 ltssm_state; |
306 | |
307 | val = advk_readl(pcie, CFG_REG); |
308 | ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; |
309 | return ltssm_state; |
310 | } |
311 | |
312 | static inline bool advk_pcie_link_up(struct advk_pcie *pcie) |
313 | { |
314 | /* check if LTSSM is in normal operation - some L* state */ |
315 | u8 ltssm_state = advk_pcie_ltssm_state(pcie); |
316 | return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED; |
317 | } |
318 | |
319 | static inline bool advk_pcie_link_active(struct advk_pcie *pcie) |
320 | { |
321 | /* |
322 | * According to PCIe Base specification 3.0, Table 4-14: Link |
323 | * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle |
324 | * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0, |
325 | * L0s, L1 and L2 states. And according to 3.2.1. Data Link |
326 | * Control and Management State Machine Rules is DL Up status |
327 | * reported in DL Active state. |
328 | */ |
329 | u8 ltssm_state = advk_pcie_ltssm_state(pcie); |
330 | return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED; |
331 | } |
332 | |
333 | static inline bool advk_pcie_link_training(struct advk_pcie *pcie) |
334 | { |
335 | /* |
336 | * According to PCIe Base specification 3.0, Table 4-14: Link |
337 | * Status Mapped to the LTSSM is Link Training mapped to LTSSM |
338 | * Configuration and Recovery states. |
339 | */ |
340 | u8 ltssm_state = advk_pcie_ltssm_state(pcie); |
341 | return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START && |
342 | ltssm_state < LTSSM_L0) || |
343 | (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 && |
344 | ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3)); |
345 | } |
346 | |
347 | static int advk_pcie_wait_for_link(struct advk_pcie *pcie) |
348 | { |
349 | int retries; |
350 | |
351 | /* check if the link is up or not */ |
352 | for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { |
353 | if (advk_pcie_link_up(pcie)) |
354 | return 0; |
355 | |
356 | usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); |
357 | } |
358 | |
359 | return -ETIMEDOUT; |
360 | } |
361 | |
362 | static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie) |
363 | { |
364 | size_t retries; |
365 | |
366 | for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) { |
367 | if (advk_pcie_link_training(pcie)) |
368 | break; |
369 | udelay(RETRAIN_WAIT_USLEEP_US); |
370 | } |
371 | } |
372 | |
373 | static void advk_pcie_issue_perst(struct advk_pcie *pcie) |
374 | { |
375 | if (!pcie->reset_gpio) |
376 | return; |
377 | |
378 | /* 10ms delay is needed for some cards */ |
379 | dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n"); |
380 | gpiod_set_value_cansleep(desc: pcie->reset_gpio, value: 1); |
381 | usleep_range(min: 10000, max: 11000); |
382 | gpiod_set_value_cansleep(desc: pcie->reset_gpio, value: 0); |
383 | } |
384 | |
385 | static void advk_pcie_train_link(struct advk_pcie *pcie) |
386 | { |
387 | struct device *dev = &pcie->pdev->dev; |
388 | u32 reg; |
389 | int ret; |
390 | |
391 | /* |
392 | * Setup PCIe rev / gen compliance based on device tree property |
393 | * 'max-link-speed' which also forces maximal link speed. |
394 | */ |
395 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); |
396 | reg &= ~PCIE_GEN_SEL_MSK; |
397 | if (pcie->link_gen == 3) |
398 | reg |= SPEED_GEN_3; |
399 | else if (pcie->link_gen == 2) |
400 | reg |= SPEED_GEN_2; |
401 | else |
402 | reg |= SPEED_GEN_1; |
403 | advk_writel(pcie, val: reg, PCIE_CORE_CTRL0_REG); |
404 | |
405 | /* |
406 | * Set maximal link speed value also into PCIe Link Control 2 register. |
407 | * Armada 3700 Functional Specification says that default value is based |
408 | * on SPEED_GEN but tests showed that default value is always 8.0 GT/s. |
409 | */ |
410 | reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); |
411 | reg &= ~PCI_EXP_LNKCTL2_TLS; |
412 | if (pcie->link_gen == 3) |
413 | reg |= PCI_EXP_LNKCTL2_TLS_8_0GT; |
414 | else if (pcie->link_gen == 2) |
415 | reg |= PCI_EXP_LNKCTL2_TLS_5_0GT; |
416 | else |
417 | reg |= PCI_EXP_LNKCTL2_TLS_2_5GT; |
418 | advk_writel(pcie, val: reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); |
419 | |
420 | /* Enable link training after selecting PCIe generation */ |
421 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); |
422 | reg |= LINK_TRAINING_EN; |
423 | advk_writel(pcie, val: reg, PCIE_CORE_CTRL0_REG); |
424 | |
425 | /* |
426 | * Reset PCIe card via PERST# signal. Some cards are not detected |
427 | * during link training when they are in some non-initial state. |
428 | */ |
429 | advk_pcie_issue_perst(pcie); |
430 | |
431 | /* |
432 | * PERST# signal could have been asserted by pinctrl subsystem before |
433 | * probe() callback has been called or issued explicitly by reset gpio |
434 | * function advk_pcie_issue_perst(), making the endpoint going into |
435 | * fundamental reset. As required by PCI Express spec (PCI Express |
436 | * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1 |
437 | * Conventional Reset) a delay for at least 100ms after such a reset |
438 | * before sending a Configuration Request to the device is needed. |
439 | * So wait until PCIe link is up. Function advk_pcie_wait_for_link() |
440 | * waits for link at least 900ms. |
441 | */ |
442 | ret = advk_pcie_wait_for_link(pcie); |
443 | if (ret < 0) |
444 | dev_err(dev, "link never came up\n"); |
445 | else |
446 | dev_info(dev, "link up\n"); |
447 | } |
448 | |
449 | /* |
450 | * Set PCIe address window register which could be used for memory |
451 | * mapping. |
452 | */ |
453 | static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num, |
454 | phys_addr_t match, phys_addr_t remap, |
455 | phys_addr_t mask, u32 actions) |
456 | { |
457 | advk_writel(pcie, OB_WIN_ENABLE | |
458 | lower_32_bits(match), OB_WIN_MATCH_LS(win_num)); |
459 | advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num)); |
460 | advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num)); |
461 | advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num)); |
462 | advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num)); |
463 | advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num)); |
464 | advk_writel(pcie, val: actions, OB_WIN_ACTIONS(win_num)); |
465 | } |
466 | |
467 | static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num) |
468 | { |
469 | advk_writel(pcie, val: 0, OB_WIN_MATCH_LS(win_num)); |
470 | advk_writel(pcie, val: 0, OB_WIN_MATCH_MS(win_num)); |
471 | advk_writel(pcie, val: 0, OB_WIN_REMAP_LS(win_num)); |
472 | advk_writel(pcie, val: 0, OB_WIN_REMAP_MS(win_num)); |
473 | advk_writel(pcie, val: 0, OB_WIN_MASK_LS(win_num)); |
474 | advk_writel(pcie, val: 0, OB_WIN_MASK_MS(win_num)); |
475 | advk_writel(pcie, val: 0, OB_WIN_ACTIONS(win_num)); |
476 | } |
477 | |
478 | static void advk_pcie_setup_hw(struct advk_pcie *pcie) |
479 | { |
480 | phys_addr_t msi_addr; |
481 | u32 reg; |
482 | int i; |
483 | |
484 | /* |
485 | * Configure PCIe Reference clock. Direction is from the PCIe |
486 | * controller to the endpoint card, so enable transmitting of |
487 | * Reference clock differential signal off-chip and disable |
488 | * receiving off-chip differential signal. |
489 | */ |
490 | reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG); |
491 | reg |= PCIE_CORE_REF_CLK_TX_ENABLE; |
492 | reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE; |
493 | advk_writel(pcie, val: reg, PCIE_CORE_REF_CLK_REG); |
494 | |
495 | /* Set to Direct mode */ |
496 | reg = advk_readl(pcie, CTRL_CONFIG_REG); |
497 | reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); |
498 | reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT); |
499 | advk_writel(pcie, val: reg, CTRL_CONFIG_REG); |
500 | |
501 | /* Set PCI global control register to RC mode */ |
502 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); |
503 | reg |= (IS_RC_MSK << IS_RC_SHIFT); |
504 | advk_writel(pcie, val: reg, PCIE_CORE_CTRL0_REG); |
505 | |
506 | /* |
507 | * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab. |
508 | * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor |
509 | * id in high 16 bits. Updating this register changes readback value of |
510 | * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround |
511 | * for erratum 4.1: "The value of device and vendor ID is incorrect". |
512 | */ |
513 | reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL; |
514 | advk_writel(pcie, val: reg, VENDOR_ID_REG); |
515 | |
516 | /* |
517 | * Change Class Code of PCI Bridge device to PCI Bridge (0x600400), |
518 | * because the default value is Mass storage controller (0x010400). |
519 | * |
520 | * Note that this Aardvark PCI Bridge does not have compliant Type 1 |
521 | * Configuration Space and it even cannot be accessed via Aardvark's |
522 | * PCI config space access method. Something like config space is |
523 | * available in internal Aardvark registers starting at offset 0x0 |
524 | * and is reported as Type 0. In range 0x10 - 0x34 it has totally |
525 | * different registers. |
526 | * |
527 | * Therefore driver uses emulation of PCI Bridge which emulates |
528 | * access to configuration space via internal Aardvark registers or |
529 | * emulated configuration buffer. |
530 | */ |
531 | reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG); |
532 | reg &= ~0xffffff00; |
533 | reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; |
534 | advk_writel(pcie, val: reg, PCIE_CORE_DEV_REV_REG); |
535 | |
536 | /* Disable Root Bridge I/O space, memory space and bus mastering */ |
537 | reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); |
538 | reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); |
539 | advk_writel(pcie, val: reg, PCIE_CORE_CMD_STATUS_REG); |
540 | |
541 | /* Set Advanced Error Capabilities and Control PF0 register */ |
542 | reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | |
543 | PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | |
544 | PCIE_CORE_ERR_CAPCTL_ECRC_CHCK | |
545 | PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV; |
546 | advk_writel(pcie, val: reg, PCIE_CORE_ERR_CAPCTL_REG); |
547 | |
548 | /* Set PCIe Device Control register */ |
549 | reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); |
550 | reg &= ~PCI_EXP_DEVCTL_RELAX_EN; |
551 | reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; |
552 | reg &= ~PCI_EXP_DEVCTL_PAYLOAD; |
553 | reg &= ~PCI_EXP_DEVCTL_READRQ; |
554 | reg |= PCI_EXP_DEVCTL_PAYLOAD_512B; |
555 | reg |= PCI_EXP_DEVCTL_READRQ_512B; |
556 | advk_writel(pcie, val: reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); |
557 | |
558 | /* Program PCIe Control 2 to disable strict ordering */ |
559 | reg = PCIE_CORE_CTRL2_RESERVED | |
560 | PCIE_CORE_CTRL2_TD_ENABLE; |
561 | advk_writel(pcie, val: reg, PCIE_CORE_CTRL2_REG); |
562 | |
563 | /* Set lane X1 */ |
564 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); |
565 | reg &= ~LANE_CNT_MSK; |
566 | reg |= LANE_COUNT_1; |
567 | advk_writel(pcie, val: reg, PCIE_CORE_CTRL0_REG); |
568 | |
569 | /* Set MSI address */ |
570 | msi_addr = virt_to_phys(address: pcie); |
571 | advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG); |
572 | advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG); |
573 | |
574 | /* Enable MSI */ |
575 | reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); |
576 | reg |= PCIE_CORE_CTRL2_MSI_ENABLE; |
577 | advk_writel(pcie, val: reg, PCIE_CORE_CTRL2_REG); |
578 | |
579 | /* Clear all interrupts */ |
580 | advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); |
581 | advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); |
582 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); |
583 | advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); |
584 | |
585 | /* Disable All ISR0/1 and MSI Sources */ |
586 | advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); |
587 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); |
588 | advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); |
589 | |
590 | /* Unmask summary MSI interrupt */ |
591 | reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); |
592 | reg &= ~PCIE_ISR0_MSI_INT_PENDING; |
593 | advk_writel(pcie, val: reg, PCIE_ISR0_MASK_REG); |
594 | |
595 | /* Unmask PME interrupt for processing of PME requester */ |
596 | reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); |
597 | reg &= ~PCIE_MSG_PM_PME_MASK; |
598 | advk_writel(pcie, val: reg, PCIE_ISR0_MASK_REG); |
599 | |
600 | /* Enable summary interrupt for GIC SPI source */ |
601 | reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); |
602 | advk_writel(pcie, val: reg, HOST_CTRL_INT_MASK_REG); |
603 | |
604 | /* |
605 | * Enable AXI address window location generation: |
606 | * When it is enabled, the default outbound window |
607 | * configurations (Default User Field: 0xD0074CFC) |
608 | * are used to transparent address translation for |
609 | * the outbound transactions. Thus, PCIe address |
610 | * windows are not required for transparent memory |
611 | * access when default outbound window configuration |
612 | * is set for memory access. |
613 | */ |
614 | reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); |
615 | reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE; |
616 | advk_writel(pcie, val: reg, PCIE_CORE_CTRL2_REG); |
617 | |
618 | /* |
619 | * Set memory access in Default User Field so it |
620 | * is not required to configure PCIe address for |
621 | * transparent memory access. |
622 | */ |
623 | advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS); |
624 | |
625 | /* |
626 | * Bypass the address window mapping for PIO: |
627 | * Since PIO access already contains all required |
628 | * info over AXI interface by PIO registers, the |
629 | * address window is not required. |
630 | */ |
631 | reg = advk_readl(pcie, PIO_CTRL); |
632 | reg |= PIO_CTRL_ADDR_WIN_DISABLE; |
633 | advk_writel(pcie, val: reg, PIO_CTRL); |
634 | |
635 | /* |
636 | * Configure PCIe address windows for non-memory or |
637 | * non-transparent access as by default PCIe uses |
638 | * transparent memory access. |
639 | */ |
640 | for (i = 0; i < pcie->wins_count; i++) |
641 | advk_pcie_set_ob_win(pcie, win_num: i, |
642 | match: pcie->wins[i].match, remap: pcie->wins[i].remap, |
643 | mask: pcie->wins[i].mask, actions: pcie->wins[i].actions); |
644 | |
645 | /* Disable remaining PCIe outbound windows */ |
646 | for (i = pcie->wins_count; i < OB_WIN_COUNT; i++) |
647 | advk_pcie_disable_ob_win(pcie, win_num: i); |
648 | |
649 | advk_pcie_train_link(pcie); |
650 | } |
651 | |
652 | static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val) |
653 | { |
654 | struct device *dev = &pcie->pdev->dev; |
655 | u32 reg; |
656 | unsigned int status; |
657 | char *strcomp_status, *str_posted; |
658 | int ret; |
659 | |
660 | reg = advk_readl(pcie, PIO_STAT); |
661 | status = (reg & PIO_COMPLETION_STATUS_MASK) >> |
662 | PIO_COMPLETION_STATUS_SHIFT; |
663 | |
664 | /* |
665 | * According to HW spec, the PIO status check sequence as below: |
666 | * 1) even if COMPLETION_STATUS(bit9:7) indicates successful, |
667 | * it still needs to check Error Status(bit11), only when this bit |
668 | * indicates no error happen, the operation is successful. |
669 | * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only |
670 | * means a PIO write error, and for PIO read it is successful with |
671 | * a read value of 0xFFFFFFFF. |
672 | * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7) |
673 | * only means a PIO write error, and for PIO read it is successful |
674 | * with a read value of 0xFFFF0001. |
675 | * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means |
676 | * error for both PIO read and PIO write operation. |
677 | * 5) other errors are indicated as 'unknown'. |
678 | */ |
679 | switch (status) { |
680 | case PIO_COMPLETION_STATUS_OK: |
681 | if (reg & PIO_ERR_STATUS) { |
682 | strcomp_status = "COMP_ERR"; |
683 | ret = -EFAULT; |
684 | break; |
685 | } |
686 | /* Get the read result */ |
687 | if (val) |
688 | *val = advk_readl(pcie, PIO_RD_DATA); |
689 | /* No error */ |
690 | strcomp_status = NULL; |
691 | ret = 0; |
692 | break; |
693 | case PIO_COMPLETION_STATUS_UR: |
694 | strcomp_status = "UR"; |
695 | ret = -EOPNOTSUPP; |
696 | break; |
697 | case PIO_COMPLETION_STATUS_RRS: |
698 | if (allow_rrs && val) { |
699 | /* PCIe r6.0, sec 2.3.2, says: |
700 | * If Configuration RRS Software Visibility is enabled: |
701 | * For a Configuration Read Request that includes both |
702 | * bytes of the Vendor ID field of a device Function's |
703 | * Configuration Space Header, the Root Complex must |
704 | * complete the Request to the host by returning a |
705 | * read-data value of 0001h for the Vendor ID field and |
706 | * all '1's for any additional bytes included in the |
707 | * request. |
708 | * |
709 | * So RRS in this case is not an error status. |
710 | */ |
711 | *val = CFG_RD_RRS_VAL; |
712 | strcomp_status = NULL; |
713 | ret = 0; |
714 | break; |
715 | } |
716 | /* PCIe r6.0, sec 2.3.2, says: |
717 | * If RRS Software Visibility is not enabled, the Root Complex |
718 | * must re-issue the Configuration Request as a new Request. |
719 | * If RRS Software Visibility is enabled: For a Configuration |
720 | * Write Request or for any other Configuration Read Request, |
721 | * the Root Complex must re-issue the Configuration Request as |
722 | * a new Request. |
723 | * A Root Complex implementation may choose to limit the number |
724 | * of Configuration Request/RRS Completion Status loops before |
725 | * determining that something is wrong with the target of the |
726 | * Request and taking appropriate action, e.g., complete the |
727 | * Request to the host as a failed transaction. |
728 | * |
729 | * So return -EAGAIN and caller (pci-aardvark.c driver) will |
730 | * re-issue request again up to the PIO_RETRY_CNT retries. |
731 | */ |
732 | strcomp_status = "RRS"; |
733 | ret = -EAGAIN; |
734 | break; |
735 | case PIO_COMPLETION_STATUS_CA: |
736 | strcomp_status = "CA"; |
737 | ret = -ECANCELED; |
738 | break; |
739 | default: |
740 | strcomp_status = "Unknown"; |
741 | ret = -EINVAL; |
742 | break; |
743 | } |
744 | |
745 | if (!strcomp_status) |
746 | return ret; |
747 | |
748 | if (reg & PIO_NON_POSTED_REQ) |
749 | str_posted = "Non-posted"; |
750 | else |
751 | str_posted = "Posted"; |
752 | |
753 | dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n", |
754 | str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); |
755 | |
756 | return ret; |
757 | } |
758 | |
759 | static int advk_pcie_wait_pio(struct advk_pcie *pcie) |
760 | { |
761 | struct device *dev = &pcie->pdev->dev; |
762 | int i; |
763 | |
764 | for (i = 1; i <= PIO_RETRY_CNT; i++) { |
765 | u32 start, isr; |
766 | |
767 | start = advk_readl(pcie, PIO_START); |
768 | isr = advk_readl(pcie, PIO_ISR); |
769 | if (!start && isr) |
770 | return i; |
771 | udelay(PIO_RETRY_DELAY); |
772 | } |
773 | |
774 | dev_err(dev, "PIO read/write transfer time out\n"); |
775 | return -ETIMEDOUT; |
776 | } |
777 | |
778 | static pci_bridge_emul_read_status_t |
779 | advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, |
780 | int reg, u32 *value) |
781 | { |
782 | struct advk_pcie *pcie = bridge->data; |
783 | |
784 | switch (reg) { |
785 | case PCI_COMMAND: |
786 | *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); |
787 | return PCI_BRIDGE_EMUL_HANDLED; |
788 | |
789 | case PCI_INTERRUPT_LINE: { |
790 | /* |
791 | * From the whole 32bit register we support reading from HW only |
792 | * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR. |
793 | * Other bits are retrieved only from emulated config buffer. |
794 | */ |
795 | __le32 *cfgspace = (__le32 *)&bridge->conf; |
796 | u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); |
797 | if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK) |
798 | val &= ~(PCI_BRIDGE_CTL_SERR << 16); |
799 | else |
800 | val |= PCI_BRIDGE_CTL_SERR << 16; |
801 | if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) |
802 | val |= PCI_BRIDGE_CTL_BUS_RESET << 16; |
803 | else |
804 | val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); |
805 | *value = val; |
806 | return PCI_BRIDGE_EMUL_HANDLED; |
807 | } |
808 | |
809 | default: |
810 | return PCI_BRIDGE_EMUL_NOT_HANDLED; |
811 | } |
812 | } |
813 | |
814 | static void |
815 | advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, |
816 | int reg, u32 old, u32 new, u32 mask) |
817 | { |
818 | struct advk_pcie *pcie = bridge->data; |
819 | |
820 | switch (reg) { |
821 | case PCI_COMMAND: |
822 | advk_writel(pcie, val: new, PCIE_CORE_CMD_STATUS_REG); |
823 | break; |
824 | |
825 | case PCI_INTERRUPT_LINE: |
826 | /* |
827 | * According to Figure 6-3: Pseudo Logic Diagram for Error |
828 | * Message Controls in PCIe base specification, SERR# Enable bit |
829 | * in Bridge Control register enable receiving of ERR_* messages |
830 | */ |
831 | if (mask & (PCI_BRIDGE_CTL_SERR << 16)) { |
832 | u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); |
833 | if (new & (PCI_BRIDGE_CTL_SERR << 16)) |
834 | val &= ~PCIE_ISR0_ERR_MASK; |
835 | else |
836 | val |= PCIE_ISR0_ERR_MASK; |
837 | advk_writel(pcie, val, PCIE_ISR0_MASK_REG); |
838 | } |
839 | if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { |
840 | u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); |
841 | if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) |
842 | val |= HOT_RESET_GEN; |
843 | else |
844 | val &= ~HOT_RESET_GEN; |
845 | advk_writel(pcie, val, PCIE_CORE_CTRL1_REG); |
846 | } |
847 | break; |
848 | |
849 | default: |
850 | break; |
851 | } |
852 | } |
853 | |
854 | static pci_bridge_emul_read_status_t |
855 | advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, |
856 | int reg, u32 *value) |
857 | { |
858 | struct advk_pcie *pcie = bridge->data; |
859 | |
860 | |
861 | switch (reg) { |
862 | /* |
863 | * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are |
864 | * also supported, but do not need to be handled here, because their |
865 | * values are stored in emulated config space buffer, and we read them |
866 | * from there when needed. |
867 | */ |
868 | |
869 | case PCI_EXP_LNKCAP: { |
870 | u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); |
871 | /* |
872 | * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0. |
873 | * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm |
874 | * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag. |
875 | */ |
876 | val |= PCI_EXP_LNKCAP_DLLLARC; |
877 | *value = val; |
878 | return PCI_BRIDGE_EMUL_HANDLED; |
879 | } |
880 | |
881 | case PCI_EXP_LNKCTL: { |
882 | /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */ |
883 | u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) & |
884 | ~(PCI_EXP_LNKSTA_LT << 16); |
885 | if (advk_pcie_link_training(pcie)) |
886 | val |= (PCI_EXP_LNKSTA_LT << 16); |
887 | if (advk_pcie_link_active(pcie)) |
888 | val |= (PCI_EXP_LNKSTA_DLLLA << 16); |
889 | *value = val; |
890 | return PCI_BRIDGE_EMUL_HANDLED; |
891 | } |
892 | |
893 | case PCI_EXP_DEVCAP: |
894 | case PCI_EXP_DEVCTL: |
895 | case PCI_EXP_DEVCAP2: |
896 | case PCI_EXP_DEVCTL2: |
897 | case PCI_EXP_LNKCAP2: |
898 | case PCI_EXP_LNKCTL2: |
899 | *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); |
900 | return PCI_BRIDGE_EMUL_HANDLED; |
901 | |
902 | default: |
903 | return PCI_BRIDGE_EMUL_NOT_HANDLED; |
904 | } |
905 | |
906 | } |
907 | |
908 | static void |
909 | advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, |
910 | int reg, u32 old, u32 new, u32 mask) |
911 | { |
912 | struct advk_pcie *pcie = bridge->data; |
913 | |
914 | switch (reg) { |
915 | case PCI_EXP_LNKCTL: |
916 | advk_writel(pcie, val: new, PCIE_CORE_PCIEXP_CAP + reg); |
917 | if (new & PCI_EXP_LNKCTL_RL) |
918 | advk_pcie_wait_for_retrain(pcie); |
919 | break; |
920 | |
921 | case PCI_EXP_RTCTL: { |
922 | u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); |
923 | /* Only emulation of PMEIE and RRS_SVE bits is provided */ |
924 | rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE; |
925 | bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); |
926 | break; |
927 | } |
928 | |
929 | /* |
930 | * PCI_EXP_RTSTA is also supported, but does not need to be handled |
931 | * here, because its value is stored in emulated config space buffer, |
932 | * and we write it there when needed. |
933 | */ |
934 | |
935 | case PCI_EXP_DEVCTL: |
936 | case PCI_EXP_DEVCTL2: |
937 | case PCI_EXP_LNKCTL2: |
938 | advk_writel(pcie, val: new, PCIE_CORE_PCIEXP_CAP + reg); |
939 | break; |
940 | |
941 | default: |
942 | break; |
943 | } |
944 | } |
945 | |
946 | static pci_bridge_emul_read_status_t |
947 | advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, |
948 | int reg, u32 *value) |
949 | { |
950 | struct advk_pcie *pcie = bridge->data; |
951 | |
952 | switch (reg) { |
953 | case 0: |
954 | *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); |
955 | |
956 | /* |
957 | * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada |
958 | * 3700 Functional Specification does not document registers |
959 | * at those addresses. |
960 | * |
961 | * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error |
962 | * Reporting Capability header the last Extended Capability. |
963 | * If we obtain documentation for those registers in the |
964 | * future, this can be changed. |
965 | */ |
966 | *value &= 0x000fffff; |
967 | return PCI_BRIDGE_EMUL_HANDLED; |
968 | |
969 | case PCI_ERR_UNCOR_STATUS: |
970 | case PCI_ERR_UNCOR_MASK: |
971 | case PCI_ERR_UNCOR_SEVER: |
972 | case PCI_ERR_COR_STATUS: |
973 | case PCI_ERR_COR_MASK: |
974 | case PCI_ERR_CAP: |
975 | case PCI_ERR_HEADER_LOG + 0: |
976 | case PCI_ERR_HEADER_LOG + 4: |
977 | case PCI_ERR_HEADER_LOG + 8: |
978 | case PCI_ERR_HEADER_LOG + 12: |
979 | case PCI_ERR_ROOT_COMMAND: |
980 | case PCI_ERR_ROOT_STATUS: |
981 | case PCI_ERR_ROOT_ERR_SRC: |
982 | *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); |
983 | return PCI_BRIDGE_EMUL_HANDLED; |
984 | |
985 | default: |
986 | return PCI_BRIDGE_EMUL_NOT_HANDLED; |
987 | } |
988 | } |
989 | |
990 | static void |
991 | advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, |
992 | int reg, u32 old, u32 new, u32 mask) |
993 | { |
994 | struct advk_pcie *pcie = bridge->data; |
995 | |
996 | switch (reg) { |
997 | /* These are W1C registers, so clear other bits */ |
998 | case PCI_ERR_UNCOR_STATUS: |
999 | case PCI_ERR_COR_STATUS: |
1000 | case PCI_ERR_ROOT_STATUS: |
1001 | new &= mask; |
1002 | fallthrough; |
1003 | |
1004 | case PCI_ERR_UNCOR_MASK: |
1005 | case PCI_ERR_UNCOR_SEVER: |
1006 | case PCI_ERR_COR_MASK: |
1007 | case PCI_ERR_CAP: |
1008 | case PCI_ERR_HEADER_LOG + 0: |
1009 | case PCI_ERR_HEADER_LOG + 4: |
1010 | case PCI_ERR_HEADER_LOG + 8: |
1011 | case PCI_ERR_HEADER_LOG + 12: |
1012 | case PCI_ERR_ROOT_COMMAND: |
1013 | case PCI_ERR_ROOT_ERR_SRC: |
1014 | advk_writel(pcie, val: new, PCIE_CORE_PCIERR_CAP + reg); |
1015 | break; |
1016 | |
1017 | default: |
1018 | break; |
1019 | } |
1020 | } |
1021 | |
1022 | static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { |
1023 | .read_base = advk_pci_bridge_emul_base_conf_read, |
1024 | .write_base = advk_pci_bridge_emul_base_conf_write, |
1025 | .read_pcie = advk_pci_bridge_emul_pcie_conf_read, |
1026 | .write_pcie = advk_pci_bridge_emul_pcie_conf_write, |
1027 | .read_ext = advk_pci_bridge_emul_ext_conf_read, |
1028 | .write_ext = advk_pci_bridge_emul_ext_conf_write, |
1029 | }; |
1030 | |
1031 | /* |
1032 | * Initialize the configuration space of the PCI-to-PCI bridge |
1033 | * associated with the given PCIe interface. |
1034 | */ |
1035 | static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) |
1036 | { |
1037 | struct pci_bridge_emul *bridge = &pcie->bridge; |
1038 | |
1039 | bridge->conf.vendor = |
1040 | cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff); |
1041 | bridge->conf.device = |
1042 | cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16); |
1043 | bridge->conf.class_revision = |
1044 | cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff); |
1045 | |
1046 | /* Support 32 bits I/O addressing */ |
1047 | bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; |
1048 | bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; |
1049 | |
1050 | /* Support 64 bits memory pref */ |
1051 | bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); |
1052 | bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); |
1053 | |
1054 | /* Support interrupt A for MSI feature */ |
1055 | bridge->conf.intpin = PCI_INTERRUPT_INTA; |
1056 | |
1057 | /* |
1058 | * Aardvark HW provides PCIe Capability structure in version 2 and |
1059 | * indicate slot support, which is emulated. |
1060 | */ |
1061 | bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT); |
1062 | |
1063 | /* |
1064 | * Set Presence Detect State bit permanently since there is no support |
1065 | * for unplugging the card nor detecting whether it is plugged. (If a |
1066 | * platform exists in the future that supports it, via a GPIO for |
1067 | * example, it should be implemented via this bit.) |
1068 | * |
1069 | * Set physical slot number to 1 since there is only one port and zero |
1070 | * value is reserved for ports within the same silicon as Root Port |
1071 | * which is not our case. |
1072 | */ |
1073 | bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN, |
1074 | 1)); |
1075 | bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); |
1076 | |
1077 | /* Indicates supports for Completion Retry Status */ |
1078 | bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV); |
1079 | |
1080 | bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff; |
1081 | bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16; |
1082 | bridge->has_pcie = true; |
1083 | bridge->pcie_start = PCIE_CORE_PCIEXP_CAP; |
1084 | bridge->data = pcie; |
1085 | bridge->ops = &advk_pci_bridge_emul_ops; |
1086 | |
1087 | return pci_bridge_emul_init(bridge, flags: 0); |
1088 | } |
1089 | |
1090 | static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, |
1091 | int devfn) |
1092 | { |
1093 | if (pci_is_root_bus(pbus: bus) && PCI_SLOT(devfn) != 0) |
1094 | return false; |
1095 | |
1096 | /* |
1097 | * If the link goes down after we check for link-up, we have a problem: |
1098 | * if a PIO request is executed while link-down, the whole controller |
1099 | * gets stuck in a non-functional state, and even after link comes up |
1100 | * again, PIO requests won't work anymore, and a reset of the whole PCIe |
1101 | * controller is needed. Therefore we need to prevent sending PIO |
1102 | * requests while the link is down. |
1103 | */ |
1104 | if (!pci_is_root_bus(pbus: bus) && !advk_pcie_link_up(pcie)) |
1105 | return false; |
1106 | |
1107 | return true; |
1108 | } |
1109 | |
1110 | static bool advk_pcie_pio_is_running(struct advk_pcie *pcie) |
1111 | { |
1112 | struct device *dev = &pcie->pdev->dev; |
1113 | |
1114 | /* |
1115 | * Trying to start a new PIO transfer when previous has not completed |
1116 | * cause External Abort on CPU which results in kernel panic: |
1117 | * |
1118 | * SError Interrupt on CPU0, code 0xbf000002 -- SError |
1119 | * Kernel panic - not syncing: Asynchronous SError Interrupt |
1120 | * |
1121 | * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected |
1122 | * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent |
1123 | * concurrent calls at the same time. But because PIO transfer may take |
1124 | * about 1.5s when link is down or card is disconnected, it means that |
1125 | * advk_pcie_wait_pio() does not always have to wait for completion. |
1126 | * |
1127 | * Some versions of ARM Trusted Firmware handles this External Abort at |
1128 | * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit: |
1129 | * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50 |
1130 | */ |
1131 | if (advk_readl(pcie, PIO_START)) { |
1132 | dev_err(dev, "Previous PIO read/write transfer is still running\n"); |
1133 | return true; |
1134 | } |
1135 | |
1136 | return false; |
1137 | } |
1138 | |
1139 | static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, |
1140 | int where, int size, u32 *val) |
1141 | { |
1142 | struct advk_pcie *pcie = bus->sysdata; |
1143 | int retry_count; |
1144 | bool allow_rrs; |
1145 | u32 reg; |
1146 | int ret; |
1147 | |
1148 | if (!advk_pcie_valid_device(pcie, bus, devfn)) |
1149 | return PCIBIOS_DEVICE_NOT_FOUND; |
1150 | |
1151 | if (pci_is_root_bus(pbus: bus)) |
1152 | return pci_bridge_emul_conf_read(bridge: &pcie->bridge, where, |
1153 | size, value: val); |
1154 | |
1155 | /* |
1156 | * Configuration Request Retry Status (RRS) is possible to return |
1157 | * only when reading both bytes from PCI_VENDOR_ID at once and |
1158 | * RRS_SVE flag on Root Port is enabled. |
1159 | */ |
1160 | allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) && |
1161 | (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & |
1162 | PCI_EXP_RTCTL_RRS_SVE); |
1163 | |
1164 | if (advk_pcie_pio_is_running(pcie)) |
1165 | goto try_rrs; |
1166 | |
1167 | /* Program the control register */ |
1168 | reg = advk_readl(pcie, PIO_CTRL); |
1169 | reg &= ~PIO_CTRL_TYPE_MASK; |
1170 | if (pci_is_root_bus(pbus: bus->parent)) |
1171 | reg |= PCIE_CONFIG_RD_TYPE0; |
1172 | else |
1173 | reg |= PCIE_CONFIG_RD_TYPE1; |
1174 | advk_writel(pcie, val: reg, PIO_CTRL); |
1175 | |
1176 | /* Program the address registers */ |
1177 | reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4); |
1178 | advk_writel(pcie, val: reg, PIO_ADDR_LS); |
1179 | advk_writel(pcie, val: 0, PIO_ADDR_MS); |
1180 | |
1181 | /* Program the data strobe */ |
1182 | advk_writel(pcie, val: 0xf, PIO_WR_DATA_STRB); |
1183 | |
1184 | retry_count = 0; |
1185 | do { |
1186 | /* Clear PIO DONE ISR and start the transfer */ |
1187 | advk_writel(pcie, val: 1, PIO_ISR); |
1188 | advk_writel(pcie, val: 1, PIO_START); |
1189 | |
1190 | ret = advk_pcie_wait_pio(pcie); |
1191 | if (ret < 0) |
1192 | goto try_rrs; |
1193 | |
1194 | retry_count += ret; |
1195 | |
1196 | /* Check PIO status and get the read result */ |
1197 | ret = advk_pcie_check_pio_status(pcie, allow_rrs, val); |
1198 | } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); |
1199 | |
1200 | if (ret < 0) |
1201 | goto fail; |
1202 | |
1203 | if (size == 1) |
1204 | *val = (*val >> (8 * (where & 3))) & 0xff; |
1205 | else if (size == 2) |
1206 | *val = (*val >> (8 * (where & 3))) & 0xffff; |
1207 | |
1208 | return PCIBIOS_SUCCESSFUL; |
1209 | |
1210 | try_rrs: |
1211 | /* |
1212 | * If it is possible, return Configuration Request Retry Status so |
1213 | * that caller tries to issue the request again instead of failing. |
1214 | */ |
1215 | if (allow_rrs) { |
1216 | *val = CFG_RD_RRS_VAL; |
1217 | return PCIBIOS_SUCCESSFUL; |
1218 | } |
1219 | |
1220 | fail: |
1221 | *val = 0xffffffff; |
1222 | return PCIBIOS_SET_FAILED; |
1223 | } |
1224 | |
1225 | static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, |
1226 | int where, int size, u32 val) |
1227 | { |
1228 | struct advk_pcie *pcie = bus->sysdata; |
1229 | u32 reg; |
1230 | u32 data_strobe = 0x0; |
1231 | int retry_count; |
1232 | int offset; |
1233 | int ret; |
1234 | |
1235 | if (!advk_pcie_valid_device(pcie, bus, devfn)) |
1236 | return PCIBIOS_DEVICE_NOT_FOUND; |
1237 | |
1238 | if (pci_is_root_bus(pbus: bus)) |
1239 | return pci_bridge_emul_conf_write(bridge: &pcie->bridge, where, |
1240 | size, value: val); |
1241 | |
1242 | if (where % size) |
1243 | return PCIBIOS_SET_FAILED; |
1244 | |
1245 | if (advk_pcie_pio_is_running(pcie)) |
1246 | return PCIBIOS_SET_FAILED; |
1247 | |
1248 | /* Program the control register */ |
1249 | reg = advk_readl(pcie, PIO_CTRL); |
1250 | reg &= ~PIO_CTRL_TYPE_MASK; |
1251 | if (pci_is_root_bus(pbus: bus->parent)) |
1252 | reg |= PCIE_CONFIG_WR_TYPE0; |
1253 | else |
1254 | reg |= PCIE_CONFIG_WR_TYPE1; |
1255 | advk_writel(pcie, val: reg, PIO_CTRL); |
1256 | |
1257 | /* Program the address registers */ |
1258 | reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4); |
1259 | advk_writel(pcie, val: reg, PIO_ADDR_LS); |
1260 | advk_writel(pcie, val: 0, PIO_ADDR_MS); |
1261 | |
1262 | /* Calculate the write strobe */ |
1263 | offset = where & 0x3; |
1264 | reg = val << (8 * offset); |
1265 | data_strobe = GENMASK(size - 1, 0) << offset; |
1266 | |
1267 | /* Program the data register */ |
1268 | advk_writel(pcie, val: reg, PIO_WR_DATA); |
1269 | |
1270 | /* Program the data strobe */ |
1271 | advk_writel(pcie, val: data_strobe, PIO_WR_DATA_STRB); |
1272 | |
1273 | retry_count = 0; |
1274 | do { |
1275 | /* Clear PIO DONE ISR and start the transfer */ |
1276 | advk_writel(pcie, val: 1, PIO_ISR); |
1277 | advk_writel(pcie, val: 1, PIO_START); |
1278 | |
1279 | ret = advk_pcie_wait_pio(pcie); |
1280 | if (ret < 0) |
1281 | return PCIBIOS_SET_FAILED; |
1282 | |
1283 | retry_count += ret; |
1284 | |
1285 | ret = advk_pcie_check_pio_status(pcie, allow_rrs: false, NULL); |
1286 | } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); |
1287 | |
1288 | return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; |
1289 | } |
1290 | |
1291 | static struct pci_ops advk_pcie_ops = { |
1292 | .read = advk_pcie_rd_conf, |
1293 | .write = advk_pcie_wr_conf, |
1294 | }; |
1295 | |
1296 | static void advk_msi_irq_compose_msi_msg(struct irq_data *data, |
1297 | struct msi_msg *msg) |
1298 | { |
1299 | struct advk_pcie *pcie = irq_data_get_irq_chip_data(d: data); |
1300 | phys_addr_t msi_addr = virt_to_phys(address: pcie); |
1301 | |
1302 | msg->address_lo = lower_32_bits(msi_addr); |
1303 | msg->address_hi = upper_32_bits(msi_addr); |
1304 | msg->data = data->hwirq; |
1305 | } |
1306 | |
1307 | static void advk_msi_irq_mask(struct irq_data *d) |
1308 | { |
1309 | struct advk_pcie *pcie = d->domain->host_data; |
1310 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
1311 | unsigned long flags; |
1312 | u32 mask; |
1313 | |
1314 | raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); |
1315 | mask = advk_readl(pcie, PCIE_MSI_MASK_REG); |
1316 | mask |= BIT(hwirq); |
1317 | advk_writel(pcie, val: mask, PCIE_MSI_MASK_REG); |
1318 | raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); |
1319 | } |
1320 | |
1321 | static void advk_msi_irq_unmask(struct irq_data *d) |
1322 | { |
1323 | struct advk_pcie *pcie = d->domain->host_data; |
1324 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
1325 | unsigned long flags; |
1326 | u32 mask; |
1327 | |
1328 | raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); |
1329 | mask = advk_readl(pcie, PCIE_MSI_MASK_REG); |
1330 | mask &= ~BIT(hwirq); |
1331 | advk_writel(pcie, val: mask, PCIE_MSI_MASK_REG); |
1332 | raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); |
1333 | } |
1334 | |
1335 | static void advk_msi_top_irq_mask(struct irq_data *d) |
1336 | { |
1337 | pci_msi_mask_irq(data: d); |
1338 | irq_chip_mask_parent(data: d); |
1339 | } |
1340 | |
1341 | static void advk_msi_top_irq_unmask(struct irq_data *d) |
1342 | { |
1343 | pci_msi_unmask_irq(data: d); |
1344 | irq_chip_unmask_parent(data: d); |
1345 | } |
1346 | |
1347 | static struct irq_chip advk_msi_bottom_irq_chip = { |
1348 | .name = "MSI", |
1349 | .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, |
1350 | .irq_mask = advk_msi_irq_mask, |
1351 | .irq_unmask = advk_msi_irq_unmask, |
1352 | }; |
1353 | |
1354 | static int advk_msi_irq_domain_alloc(struct irq_domain *domain, |
1355 | unsigned int virq, |
1356 | unsigned int nr_irqs, void *args) |
1357 | { |
1358 | struct advk_pcie *pcie = domain->host_data; |
1359 | int hwirq, i; |
1360 | |
1361 | mutex_lock(&pcie->msi_used_lock); |
1362 | hwirq = bitmap_find_free_region(bitmap: pcie->msi_used, MSI_IRQ_NUM, |
1363 | order_base_2(nr_irqs)); |
1364 | mutex_unlock(lock: &pcie->msi_used_lock); |
1365 | if (hwirq < 0) |
1366 | return -ENOSPC; |
1367 | |
1368 | for (i = 0; i < nr_irqs; i++) |
1369 | irq_domain_set_info(domain, virq: virq + i, hwirq: hwirq + i, |
1370 | chip: &advk_msi_bottom_irq_chip, |
1371 | chip_data: domain->host_data, handler: handle_simple_irq, |
1372 | NULL, NULL); |
1373 | |
1374 | return 0; |
1375 | } |
1376 | |
1377 | static void advk_msi_irq_domain_free(struct irq_domain *domain, |
1378 | unsigned int virq, unsigned int nr_irqs) |
1379 | { |
1380 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
1381 | struct advk_pcie *pcie = domain->host_data; |
1382 | |
1383 | mutex_lock(&pcie->msi_used_lock); |
1384 | bitmap_release_region(bitmap: pcie->msi_used, pos: d->hwirq, order_base_2(nr_irqs)); |
1385 | mutex_unlock(lock: &pcie->msi_used_lock); |
1386 | } |
1387 | |
1388 | static const struct irq_domain_ops advk_msi_domain_ops = { |
1389 | .alloc = advk_msi_irq_domain_alloc, |
1390 | .free = advk_msi_irq_domain_free, |
1391 | }; |
1392 | |
1393 | static void advk_pcie_irq_mask(struct irq_data *d) |
1394 | { |
1395 | struct advk_pcie *pcie = d->domain->host_data; |
1396 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
1397 | unsigned long flags; |
1398 | u32 mask; |
1399 | |
1400 | raw_spin_lock_irqsave(&pcie->irq_lock, flags); |
1401 | mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); |
1402 | mask |= PCIE_ISR1_INTX_ASSERT(hwirq); |
1403 | advk_writel(pcie, val: mask, PCIE_ISR1_MASK_REG); |
1404 | raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); |
1405 | } |
1406 | |
1407 | static void advk_pcie_irq_unmask(struct irq_data *d) |
1408 | { |
1409 | struct advk_pcie *pcie = d->domain->host_data; |
1410 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
1411 | unsigned long flags; |
1412 | u32 mask; |
1413 | |
1414 | raw_spin_lock_irqsave(&pcie->irq_lock, flags); |
1415 | mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); |
1416 | mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); |
1417 | advk_writel(pcie, val: mask, PCIE_ISR1_MASK_REG); |
1418 | raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); |
1419 | } |
1420 | |
1421 | static int advk_pcie_irq_map(struct irq_domain *h, |
1422 | unsigned int virq, irq_hw_number_t hwirq) |
1423 | { |
1424 | struct advk_pcie *pcie = h->host_data; |
1425 | |
1426 | irq_set_status_flags(irq: virq, set: IRQ_LEVEL); |
1427 | irq_set_chip_and_handler(irq: virq, chip: &pcie->irq_chip, |
1428 | handle: handle_level_irq); |
1429 | irq_set_chip_data(irq: virq, data: pcie); |
1430 | |
1431 | return 0; |
1432 | } |
1433 | |
1434 | static const struct irq_domain_ops advk_pcie_irq_domain_ops = { |
1435 | .map = advk_pcie_irq_map, |
1436 | .xlate = irq_domain_xlate_onecell, |
1437 | }; |
1438 | |
1439 | static struct irq_chip advk_msi_irq_chip = { |
1440 | .name = "advk-MSI", |
1441 | .irq_mask = advk_msi_top_irq_mask, |
1442 | .irq_unmask = advk_msi_top_irq_unmask, |
1443 | }; |
1444 | |
1445 | static struct msi_domain_info advk_msi_domain_info = { |
1446 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
1447 | MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI | |
1448 | MSI_FLAG_PCI_MSIX, |
1449 | .chip = &advk_msi_irq_chip, |
1450 | }; |
1451 | |
1452 | static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) |
1453 | { |
1454 | struct device *dev = &pcie->pdev->dev; |
1455 | |
1456 | raw_spin_lock_init(&pcie->msi_irq_lock); |
1457 | mutex_init(&pcie->msi_used_lock); |
1458 | |
1459 | pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM, |
1460 | ops: &advk_msi_domain_ops, host_data: pcie); |
1461 | if (!pcie->msi_inner_domain) |
1462 | return -ENOMEM; |
1463 | |
1464 | pcie->msi_domain = |
1465 | pci_msi_create_irq_domain(dev_fwnode(dev), |
1466 | info: &advk_msi_domain_info, |
1467 | parent: pcie->msi_inner_domain); |
1468 | if (!pcie->msi_domain) { |
1469 | irq_domain_remove(domain: pcie->msi_inner_domain); |
1470 | return -ENOMEM; |
1471 | } |
1472 | |
1473 | return 0; |
1474 | } |
1475 | |
1476 | static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) |
1477 | { |
1478 | irq_domain_remove(domain: pcie->msi_domain); |
1479 | irq_domain_remove(domain: pcie->msi_inner_domain); |
1480 | } |
1481 | |
1482 | static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) |
1483 | { |
1484 | struct device *dev = &pcie->pdev->dev; |
1485 | struct device_node *node = dev->of_node; |
1486 | struct device_node *pcie_intc_node; |
1487 | struct irq_chip *irq_chip; |
1488 | int ret = 0; |
1489 | |
1490 | raw_spin_lock_init(&pcie->irq_lock); |
1491 | |
1492 | pcie_intc_node = of_get_next_child(node, NULL); |
1493 | if (!pcie_intc_node) { |
1494 | dev_err(dev, "No PCIe Intc node found\n"); |
1495 | return -ENODEV; |
1496 | } |
1497 | |
1498 | irq_chip = &pcie->irq_chip; |
1499 | |
1500 | irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s-irq", |
1501 | dev_name(dev)); |
1502 | if (!irq_chip->name) { |
1503 | ret = -ENOMEM; |
1504 | goto out_put_node; |
1505 | } |
1506 | |
1507 | irq_chip->irq_mask = advk_pcie_irq_mask; |
1508 | irq_chip->irq_unmask = advk_pcie_irq_unmask; |
1509 | |
1510 | pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, |
1511 | ops: &advk_pcie_irq_domain_ops, host_data: pcie); |
1512 | if (!pcie->irq_domain) { |
1513 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); |
1514 | ret = -ENOMEM; |
1515 | goto out_put_node; |
1516 | } |
1517 | |
1518 | out_put_node: |
1519 | of_node_put(node: pcie_intc_node); |
1520 | return ret; |
1521 | } |
1522 | |
1523 | static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) |
1524 | { |
1525 | irq_domain_remove(domain: pcie->irq_domain); |
1526 | } |
1527 | |
1528 | static struct irq_chip advk_rp_irq_chip = { |
1529 | .name = "advk-RP", |
1530 | }; |
1531 | |
1532 | static int advk_pcie_rp_irq_map(struct irq_domain *h, |
1533 | unsigned int virq, irq_hw_number_t hwirq) |
1534 | { |
1535 | struct advk_pcie *pcie = h->host_data; |
1536 | |
1537 | irq_set_chip_and_handler(irq: virq, chip: &advk_rp_irq_chip, handle: handle_simple_irq); |
1538 | irq_set_chip_data(irq: virq, data: pcie); |
1539 | |
1540 | return 0; |
1541 | } |
1542 | |
1543 | static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { |
1544 | .map = advk_pcie_rp_irq_map, |
1545 | .xlate = irq_domain_xlate_onecell, |
1546 | }; |
1547 | |
1548 | static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) |
1549 | { |
1550 | pcie->rp_irq_domain = irq_domain_create_linear(NULL, size: 1, ops: &advk_pcie_rp_irq_domain_ops, host_data: pcie); |
1551 | if (!pcie->rp_irq_domain) { |
1552 | dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); |
1553 | return -ENOMEM; |
1554 | } |
1555 | |
1556 | return 0; |
1557 | } |
1558 | |
1559 | static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie) |
1560 | { |
1561 | irq_domain_remove(domain: pcie->rp_irq_domain); |
1562 | } |
1563 | |
1564 | static void advk_pcie_handle_pme(struct advk_pcie *pcie) |
1565 | { |
1566 | u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16; |
1567 | |
1568 | advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); |
1569 | |
1570 | /* |
1571 | * PCIE_MSG_LOG_REG contains the last inbound message, so store |
1572 | * the requester ID only when PME was not asserted yet. |
1573 | * Also do not trigger PME interrupt when PME is still asserted. |
1574 | */ |
1575 | if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) { |
1576 | pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME); |
1577 | |
1578 | /* |
1579 | * Trigger PME interrupt only if PMEIE bit in Root Control is set. |
1580 | * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0. |
1581 | */ |
1582 | if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE)) |
1583 | return; |
1584 | |
1585 | if (generic_handle_domain_irq(domain: pcie->rp_irq_domain, hwirq: 0) == -EINVAL) |
1586 | dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); |
1587 | } |
1588 | } |
1589 | |
1590 | static void advk_pcie_handle_msi(struct advk_pcie *pcie) |
1591 | { |
1592 | u32 msi_val, msi_mask, msi_status, msi_idx; |
1593 | |
1594 | msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); |
1595 | msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); |
1596 | msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK); |
1597 | |
1598 | for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { |
1599 | if (!(BIT(msi_idx) & msi_status)) |
1600 | continue; |
1601 | |
1602 | advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); |
1603 | if (generic_handle_domain_irq(domain: pcie->msi_inner_domain, hwirq: msi_idx) == -EINVAL) |
1604 | dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx); |
1605 | } |
1606 | |
1607 | advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, |
1608 | PCIE_ISR0_REG); |
1609 | } |
1610 | |
1611 | static void advk_pcie_handle_int(struct advk_pcie *pcie) |
1612 | { |
1613 | u32 isr0_val, isr0_mask, isr0_status; |
1614 | u32 isr1_val, isr1_mask, isr1_status; |
1615 | int i; |
1616 | |
1617 | isr0_val = advk_readl(pcie, PCIE_ISR0_REG); |
1618 | isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); |
1619 | isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); |
1620 | |
1621 | isr1_val = advk_readl(pcie, PCIE_ISR1_REG); |
1622 | isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); |
1623 | isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); |
1624 | |
1625 | /* Process PME interrupt as the first one to do not miss PME requester id */ |
1626 | if (isr0_status & PCIE_MSG_PM_PME_MASK) |
1627 | advk_pcie_handle_pme(pcie); |
1628 | |
1629 | /* Process ERR interrupt */ |
1630 | if (isr0_status & PCIE_ISR0_ERR_MASK) { |
1631 | advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG); |
1632 | |
1633 | /* |
1634 | * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use |
1635 | * PCIe interrupt 0 |
1636 | */ |
1637 | if (generic_handle_domain_irq(domain: pcie->rp_irq_domain, hwirq: 0) == -EINVAL) |
1638 | dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); |
1639 | } |
1640 | |
1641 | /* Process MSI interrupts */ |
1642 | if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) |
1643 | advk_pcie_handle_msi(pcie); |
1644 | |
1645 | /* Process legacy interrupts */ |
1646 | for (i = 0; i < PCI_NUM_INTX; i++) { |
1647 | if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) |
1648 | continue; |
1649 | |
1650 | advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), |
1651 | PCIE_ISR1_REG); |
1652 | |
1653 | if (generic_handle_domain_irq(domain: pcie->irq_domain, hwirq: i) == -EINVAL) |
1654 | dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n", |
1655 | (char)i + 'A'); |
1656 | } |
1657 | } |
1658 | |
1659 | static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) |
1660 | { |
1661 | struct advk_pcie *pcie = arg; |
1662 | u32 status; |
1663 | |
1664 | status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); |
1665 | if (!(status & PCIE_IRQ_CORE_INT)) |
1666 | return IRQ_NONE; |
1667 | |
1668 | advk_pcie_handle_int(pcie); |
1669 | |
1670 | /* Clear interrupt */ |
1671 | advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); |
1672 | |
1673 | return IRQ_HANDLED; |
1674 | } |
1675 | |
1676 | static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
1677 | { |
1678 | struct advk_pcie *pcie = dev->bus->sysdata; |
1679 | |
1680 | /* |
1681 | * Emulated root bridge has its own emulated irq chip and irq domain. |
1682 | * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and |
1683 | * hwirq for irq_create_mapping() is indexed from zero. |
1684 | */ |
1685 | if (pci_is_root_bus(pbus: dev->bus)) |
1686 | return irq_create_mapping(domain: pcie->rp_irq_domain, hwirq: pin - 1); |
1687 | else |
1688 | return of_irq_parse_and_map_pci(dev, slot, pin); |
1689 | } |
1690 | |
1691 | static void advk_pcie_disable_phy(struct advk_pcie *pcie) |
1692 | { |
1693 | phy_power_off(phy: pcie->phy); |
1694 | phy_exit(phy: pcie->phy); |
1695 | } |
1696 | |
1697 | static int advk_pcie_enable_phy(struct advk_pcie *pcie) |
1698 | { |
1699 | int ret; |
1700 | |
1701 | if (!pcie->phy) |
1702 | return 0; |
1703 | |
1704 | ret = phy_init(phy: pcie->phy); |
1705 | if (ret) |
1706 | return ret; |
1707 | |
1708 | ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE); |
1709 | if (ret) { |
1710 | phy_exit(phy: pcie->phy); |
1711 | return ret; |
1712 | } |
1713 | |
1714 | ret = phy_power_on(phy: pcie->phy); |
1715 | if (ret) { |
1716 | phy_exit(phy: pcie->phy); |
1717 | return ret; |
1718 | } |
1719 | |
1720 | return 0; |
1721 | } |
1722 | |
1723 | static int advk_pcie_setup_phy(struct advk_pcie *pcie) |
1724 | { |
1725 | struct device *dev = &pcie->pdev->dev; |
1726 | struct device_node *node = dev->of_node; |
1727 | int ret = 0; |
1728 | |
1729 | pcie->phy = devm_of_phy_get(dev, np: node, NULL); |
1730 | if (IS_ERR(ptr: pcie->phy) && (PTR_ERR(ptr: pcie->phy) == -EPROBE_DEFER)) |
1731 | return PTR_ERR(ptr: pcie->phy); |
1732 | |
1733 | /* Old bindings miss the PHY handle */ |
1734 | if (IS_ERR(ptr: pcie->phy)) { |
1735 | dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy)); |
1736 | pcie->phy = NULL; |
1737 | return 0; |
1738 | } |
1739 | |
1740 | ret = advk_pcie_enable_phy(pcie); |
1741 | if (ret) |
1742 | dev_err(dev, "Failed to initialize PHY (%d)\n", ret); |
1743 | |
1744 | return ret; |
1745 | } |
1746 | |
1747 | static int advk_pcie_probe(struct platform_device *pdev) |
1748 | { |
1749 | struct device *dev = &pdev->dev; |
1750 | struct advk_pcie *pcie; |
1751 | struct pci_host_bridge *bridge; |
1752 | struct resource_entry *entry; |
1753 | int ret, irq; |
1754 | |
1755 | bridge = devm_pci_alloc_host_bridge(dev, priv: sizeof(struct advk_pcie)); |
1756 | if (!bridge) |
1757 | return -ENOMEM; |
1758 | |
1759 | pcie = pci_host_bridge_priv(bridge); |
1760 | pcie->pdev = pdev; |
1761 | platform_set_drvdata(pdev, data: pcie); |
1762 | |
1763 | resource_list_for_each_entry(entry, &bridge->windows) { |
1764 | resource_size_t start = entry->res->start; |
1765 | resource_size_t size = resource_size(res: entry->res); |
1766 | unsigned long type = resource_type(res: entry->res); |
1767 | u64 win_size; |
1768 | |
1769 | /* |
1770 | * Aardvark hardware allows to configure also PCIe window |
1771 | * for config type 0 and type 1 mapping, but driver uses |
1772 | * only PIO for issuing configuration transfers which does |
1773 | * not use PCIe window configuration. |
1774 | */ |
1775 | if (type != IORESOURCE_MEM && type != IORESOURCE_IO) |
1776 | continue; |
1777 | |
1778 | /* |
1779 | * Skip transparent memory resources. Default outbound access |
1780 | * configuration is set to transparent memory access so it |
1781 | * does not need window configuration. |
1782 | */ |
1783 | if (type == IORESOURCE_MEM && entry->offset == 0) |
1784 | continue; |
1785 | |
1786 | /* |
1787 | * The n-th PCIe window is configured by tuple (match, remap, mask) |
1788 | * and an access to address A uses this window if A matches the |
1789 | * match with given mask. |
1790 | * So every PCIe window size must be a power of two and every start |
1791 | * address must be aligned to window size. Minimal size is 64 KiB |
1792 | * because lower 16 bits of mask must be zero. Remapped address |
1793 | * may have set only bits from the mask. |
1794 | */ |
1795 | while (pcie->wins_count < OB_WIN_COUNT && size > 0) { |
1796 | /* Calculate the largest aligned window size */ |
1797 | win_size = (1ULL << (fls64(x: size)-1)) | |
1798 | (start ? (1ULL << __ffs64(word: start)) : 0); |
1799 | win_size = 1ULL << __ffs64(word: win_size); |
1800 | if (win_size < 0x10000) |
1801 | break; |
1802 | |
1803 | dev_dbg(dev, |
1804 | "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n", |
1805 | pcie->wins_count, (unsigned long long)start, |
1806 | (unsigned long long)start + win_size, type); |
1807 | |
1808 | if (type == IORESOURCE_IO) { |
1809 | pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO; |
1810 | pcie->wins[pcie->wins_count].match = pci_pio_to_address(pio: start); |
1811 | } else { |
1812 | pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM; |
1813 | pcie->wins[pcie->wins_count].match = start; |
1814 | } |
1815 | pcie->wins[pcie->wins_count].remap = start - entry->offset; |
1816 | pcie->wins[pcie->wins_count].mask = ~(win_size - 1); |
1817 | |
1818 | if (pcie->wins[pcie->wins_count].remap & (win_size - 1)) |
1819 | break; |
1820 | |
1821 | start += win_size; |
1822 | size -= win_size; |
1823 | pcie->wins_count++; |
1824 | } |
1825 | |
1826 | if (size > 0) { |
1827 | dev_err(&pcie->pdev->dev, |
1828 | "Invalid PCIe region [0x%llx-0x%llx]\n", |
1829 | (unsigned long long)entry->res->start, |
1830 | (unsigned long long)entry->res->end + 1); |
1831 | return -EINVAL; |
1832 | } |
1833 | } |
1834 | |
1835 | pcie->base = devm_platform_ioremap_resource(pdev, index: 0); |
1836 | if (IS_ERR(ptr: pcie->base)) |
1837 | return PTR_ERR(ptr: pcie->base); |
1838 | |
1839 | irq = platform_get_irq(pdev, 0); |
1840 | if (irq < 0) |
1841 | return irq; |
1842 | |
1843 | ret = devm_request_irq(dev, irq, handler: advk_pcie_irq_handler, |
1844 | IRQF_SHARED | IRQF_NO_THREAD, devname: "advk-pcie", |
1845 | dev_id: pcie); |
1846 | if (ret) { |
1847 | dev_err(dev, "Failed to register interrupt\n"); |
1848 | return ret; |
1849 | } |
1850 | |
1851 | pcie->reset_gpio = devm_gpiod_get_optional(dev, con_id: "reset", flags: GPIOD_OUT_LOW); |
1852 | ret = PTR_ERR_OR_ZERO(ptr: pcie->reset_gpio); |
1853 | if (ret) { |
1854 | if (ret != -EPROBE_DEFER) |
1855 | dev_err(dev, "Failed to get reset-gpio: %i\n", ret); |
1856 | return ret; |
1857 | } |
1858 | |
1859 | ret = gpiod_set_consumer_name(desc: pcie->reset_gpio, name: "pcie1-reset"); |
1860 | if (ret) { |
1861 | dev_err(dev, "Failed to set reset gpio name: %d\n", ret); |
1862 | return ret; |
1863 | } |
1864 | |
1865 | ret = of_pci_get_max_link_speed(node: dev->of_node); |
1866 | if (ret <= 0 || ret > 3) |
1867 | pcie->link_gen = 3; |
1868 | else |
1869 | pcie->link_gen = ret; |
1870 | |
1871 | ret = advk_pcie_setup_phy(pcie); |
1872 | if (ret) |
1873 | return ret; |
1874 | |
1875 | advk_pcie_setup_hw(pcie); |
1876 | |
1877 | ret = advk_sw_pci_bridge_init(pcie); |
1878 | if (ret) { |
1879 | dev_err(dev, "Failed to register emulated root PCI bridge\n"); |
1880 | return ret; |
1881 | } |
1882 | |
1883 | ret = advk_pcie_init_irq_domain(pcie); |
1884 | if (ret) { |
1885 | dev_err(dev, "Failed to initialize irq\n"); |
1886 | return ret; |
1887 | } |
1888 | |
1889 | ret = advk_pcie_init_msi_irq_domain(pcie); |
1890 | if (ret) { |
1891 | dev_err(dev, "Failed to initialize irq\n"); |
1892 | advk_pcie_remove_irq_domain(pcie); |
1893 | return ret; |
1894 | } |
1895 | |
1896 | ret = advk_pcie_init_rp_irq_domain(pcie); |
1897 | if (ret) { |
1898 | dev_err(dev, "Failed to initialize irq\n"); |
1899 | advk_pcie_remove_msi_irq_domain(pcie); |
1900 | advk_pcie_remove_irq_domain(pcie); |
1901 | return ret; |
1902 | } |
1903 | |
1904 | bridge->sysdata = pcie; |
1905 | bridge->ops = &advk_pcie_ops; |
1906 | bridge->map_irq = advk_pcie_map_irq; |
1907 | |
1908 | ret = pci_host_probe(bridge); |
1909 | if (ret < 0) { |
1910 | advk_pcie_remove_rp_irq_domain(pcie); |
1911 | advk_pcie_remove_msi_irq_domain(pcie); |
1912 | advk_pcie_remove_irq_domain(pcie); |
1913 | return ret; |
1914 | } |
1915 | |
1916 | return 0; |
1917 | } |
1918 | |
1919 | static void advk_pcie_remove(struct platform_device *pdev) |
1920 | { |
1921 | struct advk_pcie *pcie = platform_get_drvdata(pdev); |
1922 | struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv: pcie); |
1923 | u32 val; |
1924 | int i; |
1925 | |
1926 | /* Remove PCI bus with all devices */ |
1927 | pci_lock_rescan_remove(); |
1928 | pci_stop_root_bus(bus: bridge->bus); |
1929 | pci_remove_root_bus(bus: bridge->bus); |
1930 | pci_unlock_rescan_remove(); |
1931 | |
1932 | /* Disable Root Bridge I/O space, memory space and bus mastering */ |
1933 | val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); |
1934 | val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); |
1935 | advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG); |
1936 | |
1937 | /* Disable MSI */ |
1938 | val = advk_readl(pcie, PCIE_CORE_CTRL2_REG); |
1939 | val &= ~PCIE_CORE_CTRL2_MSI_ENABLE; |
1940 | advk_writel(pcie, val, PCIE_CORE_CTRL2_REG); |
1941 | |
1942 | /* Clear MSI address */ |
1943 | advk_writel(pcie, val: 0, PCIE_MSI_ADDR_LOW_REG); |
1944 | advk_writel(pcie, val: 0, PCIE_MSI_ADDR_HIGH_REG); |
1945 | |
1946 | /* Mask all interrupts */ |
1947 | advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); |
1948 | advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); |
1949 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); |
1950 | advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG); |
1951 | |
1952 | /* Clear all interrupts */ |
1953 | advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); |
1954 | advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); |
1955 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); |
1956 | advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); |
1957 | |
1958 | /* Remove IRQ domains */ |
1959 | advk_pcie_remove_rp_irq_domain(pcie); |
1960 | advk_pcie_remove_msi_irq_domain(pcie); |
1961 | advk_pcie_remove_irq_domain(pcie); |
1962 | |
1963 | /* Free config space for emulated root bridge */ |
1964 | pci_bridge_emul_cleanup(bridge: &pcie->bridge); |
1965 | |
1966 | /* Assert PERST# signal which prepares PCIe card for power down */ |
1967 | if (pcie->reset_gpio) |
1968 | gpiod_set_value_cansleep(desc: pcie->reset_gpio, value: 1); |
1969 | |
1970 | /* Disable link training */ |
1971 | val = advk_readl(pcie, PCIE_CORE_CTRL0_REG); |
1972 | val &= ~LINK_TRAINING_EN; |
1973 | advk_writel(pcie, val, PCIE_CORE_CTRL0_REG); |
1974 | |
1975 | /* Disable outbound address windows mapping */ |
1976 | for (i = 0; i < OB_WIN_COUNT; i++) |
1977 | advk_pcie_disable_ob_win(pcie, win_num: i); |
1978 | |
1979 | /* Disable phy */ |
1980 | advk_pcie_disable_phy(pcie); |
1981 | } |
1982 | |
1983 | static const struct of_device_id advk_pcie_of_match_table[] = { |
1984 | { .compatible = "marvell,armada-3700-pcie", }, |
1985 | {}, |
1986 | }; |
1987 | MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table); |
1988 | |
1989 | static struct platform_driver advk_pcie_driver = { |
1990 | .driver = { |
1991 | .name = "advk-pcie", |
1992 | .of_match_table = advk_pcie_of_match_table, |
1993 | }, |
1994 | .probe = advk_pcie_probe, |
1995 | .remove = advk_pcie_remove, |
1996 | }; |
1997 | module_platform_driver(advk_pcie_driver); |
1998 | |
1999 | MODULE_DESCRIPTION("Aardvark PCIe controller"); |
2000 | MODULE_LICENSE("GPL v2"); |
2001 |
Definitions
- advk_pcie
- advk_writel
- advk_readl
- advk_pcie_ltssm_state
- advk_pcie_link_up
- advk_pcie_link_active
- advk_pcie_link_training
- advk_pcie_wait_for_link
- advk_pcie_wait_for_retrain
- advk_pcie_issue_perst
- advk_pcie_train_link
- advk_pcie_set_ob_win
- advk_pcie_disable_ob_win
- advk_pcie_setup_hw
- advk_pcie_check_pio_status
- advk_pcie_wait_pio
- advk_pci_bridge_emul_base_conf_read
- advk_pci_bridge_emul_base_conf_write
- advk_pci_bridge_emul_pcie_conf_read
- advk_pci_bridge_emul_pcie_conf_write
- advk_pci_bridge_emul_ext_conf_read
- advk_pci_bridge_emul_ext_conf_write
- advk_pci_bridge_emul_ops
- advk_sw_pci_bridge_init
- advk_pcie_valid_device
- advk_pcie_pio_is_running
- advk_pcie_rd_conf
- advk_pcie_wr_conf
- advk_pcie_ops
- advk_msi_irq_compose_msi_msg
- advk_msi_irq_mask
- advk_msi_irq_unmask
- advk_msi_top_irq_mask
- advk_msi_top_irq_unmask
- advk_msi_bottom_irq_chip
- advk_msi_irq_domain_alloc
- advk_msi_irq_domain_free
- advk_msi_domain_ops
- advk_pcie_irq_mask
- advk_pcie_irq_unmask
- advk_pcie_irq_map
- advk_pcie_irq_domain_ops
- advk_msi_irq_chip
- advk_msi_domain_info
- advk_pcie_init_msi_irq_domain
- advk_pcie_remove_msi_irq_domain
- advk_pcie_init_irq_domain
- advk_pcie_remove_irq_domain
- advk_rp_irq_chip
- advk_pcie_rp_irq_map
- advk_pcie_rp_irq_domain_ops
- advk_pcie_init_rp_irq_domain
- advk_pcie_remove_rp_irq_domain
- advk_pcie_handle_pme
- advk_pcie_handle_msi
- advk_pcie_handle_int
- advk_pcie_irq_handler
- advk_pcie_map_irq
- advk_pcie_disable_phy
- advk_pcie_enable_phy
- advk_pcie_setup_phy
- advk_pcie_probe
- advk_pcie_remove
- advk_pcie_of_match_table
Improve your Profiling and Debugging skills
Find out more