1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Qualcomm PCIe Endpoint controller driver |
4 | * |
5 | * Copyright (c) 2020, The Linux Foundation. All rights reserved. |
6 | * Author: Siddartha Mohanadoss <smohanad@codeaurora.org |
7 | * |
8 | * Copyright (c) 2021, Linaro Ltd. |
9 | * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org |
10 | */ |
11 | |
12 | #include <linux/clk.h> |
13 | #include <linux/debugfs.h> |
14 | #include <linux/delay.h> |
15 | #include <linux/gpio/consumer.h> |
16 | #include <linux/interconnect.h> |
17 | #include <linux/mfd/syscon.h> |
18 | #include <linux/phy/pcie.h> |
19 | #include <linux/phy/phy.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/pm_domain.h> |
22 | #include <linux/regmap.h> |
23 | #include <linux/reset.h> |
24 | #include <linux/module.h> |
25 | |
26 | #include "../../pci.h" |
27 | #include "pcie-designware.h" |
28 | |
29 | /* PARF registers */ |
30 | #define PARF_SYS_CTRL 0x00 |
31 | #define PARF_DB_CTRL 0x10 |
32 | #define PARF_PM_CTRL 0x20 |
33 | #define PARF_MHI_CLOCK_RESET_CTRL 0x174 |
34 | #define PARF_MHI_BASE_ADDR_LOWER 0x178 |
35 | #define PARF_MHI_BASE_ADDR_UPPER 0x17c |
36 | #define PARF_DEBUG_INT_EN 0x190 |
37 | #define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4 |
38 | #define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8 |
39 | #define PARF_Q2A_FLUSH 0x1ac |
40 | #define PARF_LTSSM 0x1b0 |
41 | #define PARF_CFG_BITS 0x210 |
42 | #define PARF_INT_ALL_STATUS 0x224 |
43 | #define PARF_INT_ALL_CLEAR 0x228 |
44 | #define PARF_INT_ALL_MASK 0x22c |
45 | #define PARF_SLV_ADDR_MSB_CTRL 0x2c0 |
46 | #define PARF_DBI_BASE_ADDR 0x350 |
47 | #define PARF_DBI_BASE_ADDR_HI 0x354 |
48 | #define PARF_SLV_ADDR_SPACE_SIZE 0x358 |
49 | #define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c |
50 | #define PARF_ATU_BASE_ADDR 0x634 |
51 | #define PARF_ATU_BASE_ADDR_HI 0x638 |
52 | #define PARF_SRIS_MODE 0x644 |
53 | #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 |
54 | #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c |
55 | #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 |
56 | #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 |
57 | #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 |
58 | #define PARF_DEVICE_TYPE 0x1000 |
59 | #define PARF_BDF_TO_SID_CFG 0x2c00 |
60 | |
61 | /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ |
62 | #define PARF_INT_ALL_LINK_DOWN BIT(1) |
63 | #define PARF_INT_ALL_BME BIT(2) |
64 | #define PARF_INT_ALL_PM_TURNOFF BIT(3) |
65 | #define PARF_INT_ALL_DEBUG BIT(4) |
66 | #define PARF_INT_ALL_LTR BIT(5) |
67 | #define PARF_INT_ALL_MHI_Q6 BIT(6) |
68 | #define PARF_INT_ALL_MHI_A7 BIT(7) |
69 | #define PARF_INT_ALL_DSTATE_CHANGE BIT(8) |
70 | #define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9) |
71 | #define PARF_INT_ALL_MMIO_WRITE BIT(10) |
72 | #define PARF_INT_ALL_CFG_WRITE BIT(11) |
73 | #define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12) |
74 | #define PARF_INT_ALL_LINK_UP BIT(13) |
75 | #define PARF_INT_ALL_AER_LEGACY BIT(14) |
76 | #define PARF_INT_ALL_PLS_ERR BIT(15) |
77 | #define PARF_INT_ALL_PME_LEGACY BIT(16) |
78 | #define PARF_INT_ALL_PLS_PME BIT(17) |
79 | #define PARF_INT_ALL_EDMA BIT(22) |
80 | |
81 | /* PARF_BDF_TO_SID_CFG register fields */ |
82 | #define PARF_BDF_TO_SID_BYPASS BIT(0) |
83 | |
84 | /* PARF_DEBUG_INT_EN register fields */ |
85 | #define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1) |
86 | #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2) |
87 | #define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3) |
88 | |
89 | /* PARF_DEVICE_TYPE register fields */ |
90 | #define PARF_DEVICE_TYPE_EP 0x0 |
91 | |
92 | /* PARF_PM_CTRL register fields */ |
93 | #define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1) |
94 | #define PARF_PM_CTRL_READY_ENTR_L23 BIT(2) |
95 | #define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5) |
96 | |
97 | /* PARF_MHI_CLOCK_RESET_CTRL fields */ |
98 | #define PARF_MSTR_AXI_CLK_EN BIT(1) |
99 | |
100 | /* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */ |
101 | #define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0) |
102 | |
103 | /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ |
104 | #define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31) |
105 | |
106 | /* PARF_Q2A_FLUSH register fields */ |
107 | #define PARF_Q2A_FLUSH_EN BIT(16) |
108 | |
109 | /* PARF_SYS_CTRL register fields */ |
110 | #define PARF_SYS_CTRL_AUX_PWR_DET BIT(4) |
111 | #define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6) |
112 | #define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10) |
113 | #define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11) |
114 | |
115 | /* PARF_DB_CTRL register fields */ |
116 | #define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0) |
117 | #define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1) |
118 | #define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4) |
119 | #define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5) |
120 | #define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6) |
121 | |
122 | /* PARF_CFG_BITS register fields */ |
123 | #define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1) |
124 | |
125 | /* ELBI registers */ |
126 | #define ELBI_SYS_STTS 0x08 |
127 | #define ELBI_CS2_ENABLE 0xa4 |
128 | |
129 | /* DBI registers */ |
130 | #define DBI_CON_STATUS 0x44 |
131 | |
132 | /* DBI register fields */ |
133 | #define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0) |
134 | |
135 | #define XMLH_LINK_UP 0x400 |
136 | #define CORE_RESET_TIME_US_MIN 1000 |
137 | #define CORE_RESET_TIME_US_MAX 1005 |
138 | #define WAKE_DELAY_US 2000 /* 2 ms */ |
139 | |
140 | #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \ |
141 | Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) |
142 | |
143 | #define to_pcie_ep(x) dev_get_drvdata((x)->dev) |
144 | |
145 | enum qcom_pcie_ep_link_status { |
146 | QCOM_PCIE_EP_LINK_DISABLED, |
147 | QCOM_PCIE_EP_LINK_ENABLED, |
148 | QCOM_PCIE_EP_LINK_UP, |
149 | QCOM_PCIE_EP_LINK_DOWN, |
150 | }; |
151 | |
152 | /** |
153 | * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller |
154 | * @pci: Designware PCIe controller struct |
155 | * @parf: Qualcomm PCIe specific PARF register base |
156 | * @elbi: Designware PCIe specific ELBI register base |
157 | * @mmio: MMIO register base |
158 | * @perst_map: PERST regmap |
159 | * @mmio_res: MMIO region resource |
160 | * @core_reset: PCIe Endpoint core reset |
161 | * @reset: PERST# GPIO |
162 | * @wake: WAKE# GPIO |
163 | * @phy: PHY controller block |
164 | * @debugfs: PCIe Endpoint Debugfs directory |
165 | * @icc_mem: Handle to an interconnect path between PCIe and MEM |
166 | * @clks: PCIe clocks |
167 | * @num_clks: PCIe clocks count |
168 | * @perst_en: Flag for PERST enable |
169 | * @perst_sep_en: Flag for PERST separation enable |
170 | * @link_status: PCIe Link status |
171 | * @global_irq: Qualcomm PCIe specific Global IRQ |
172 | * @perst_irq: PERST# IRQ |
173 | */ |
174 | struct qcom_pcie_ep { |
175 | struct dw_pcie pci; |
176 | |
177 | void __iomem *parf; |
178 | void __iomem *elbi; |
179 | void __iomem *mmio; |
180 | struct regmap *perst_map; |
181 | struct resource *mmio_res; |
182 | |
183 | struct reset_control *core_reset; |
184 | struct gpio_desc *reset; |
185 | struct gpio_desc *wake; |
186 | struct phy *phy; |
187 | struct dentry *debugfs; |
188 | |
189 | struct icc_path *icc_mem; |
190 | |
191 | struct clk_bulk_data *clks; |
192 | int num_clks; |
193 | |
194 | u32 perst_en; |
195 | u32 perst_sep_en; |
196 | |
197 | enum qcom_pcie_ep_link_status link_status; |
198 | int global_irq; |
199 | int perst_irq; |
200 | }; |
201 | |
202 | static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep) |
203 | { |
204 | struct dw_pcie *pci = &pcie_ep->pci; |
205 | struct device *dev = pci->dev; |
206 | int ret; |
207 | |
208 | ret = reset_control_assert(rstc: pcie_ep->core_reset); |
209 | if (ret) { |
210 | dev_err(dev, "Cannot assert core reset\n" ); |
211 | return ret; |
212 | } |
213 | |
214 | usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); |
215 | |
216 | ret = reset_control_deassert(rstc: pcie_ep->core_reset); |
217 | if (ret) { |
218 | dev_err(dev, "Cannot de-assert core reset\n" ); |
219 | return ret; |
220 | } |
221 | |
222 | usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); |
223 | |
224 | return 0; |
225 | } |
226 | |
227 | /* |
228 | * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid |
229 | * device reset during host reboot and hibernation. The driver is |
230 | * expected to handle this situation. |
231 | */ |
232 | static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep) |
233 | { |
234 | if (pcie_ep->perst_map) { |
235 | regmap_write(map: pcie_ep->perst_map, reg: pcie_ep->perst_en, val: 0); |
236 | regmap_write(map: pcie_ep->perst_map, reg: pcie_ep->perst_sep_en, val: 0); |
237 | } |
238 | } |
239 | |
240 | static int qcom_pcie_dw_link_up(struct dw_pcie *pci) |
241 | { |
242 | struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); |
243 | u32 reg; |
244 | |
245 | reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS); |
246 | |
247 | return reg & XMLH_LINK_UP; |
248 | } |
249 | |
250 | static int qcom_pcie_dw_start_link(struct dw_pcie *pci) |
251 | { |
252 | struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); |
253 | |
254 | enable_irq(irq: pcie_ep->perst_irq); |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | static void qcom_pcie_dw_stop_link(struct dw_pcie *pci) |
260 | { |
261 | struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); |
262 | |
263 | disable_irq(irq: pcie_ep->perst_irq); |
264 | } |
265 | |
266 | static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base, |
267 | u32 reg, size_t size, u32 val) |
268 | { |
269 | struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); |
270 | int ret; |
271 | |
272 | writel(val: 1, addr: pcie_ep->elbi + ELBI_CS2_ENABLE); |
273 | |
274 | ret = dw_pcie_write(addr: pci->dbi_base2 + reg, size, val); |
275 | if (ret) |
276 | dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n" , reg, ret); |
277 | |
278 | writel(val: 0, addr: pcie_ep->elbi + ELBI_CS2_ENABLE); |
279 | } |
280 | |
281 | static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep) |
282 | { |
283 | struct dw_pcie *pci = &pcie_ep->pci; |
284 | u32 offset, status; |
285 | int speed, width; |
286 | int ret; |
287 | |
288 | if (!pcie_ep->icc_mem) |
289 | return; |
290 | |
291 | offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); |
292 | status = readw(addr: pci->dbi_base + offset + PCI_EXP_LNKSTA); |
293 | |
294 | speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); |
295 | width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); |
296 | |
297 | ret = icc_set_bw(path: pcie_ep->icc_mem, avg_bw: 0, peak_bw: width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); |
298 | if (ret) |
299 | dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n" , |
300 | ret); |
301 | } |
302 | |
303 | static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep) |
304 | { |
305 | struct dw_pcie *pci = &pcie_ep->pci; |
306 | int ret; |
307 | |
308 | ret = clk_bulk_prepare_enable(num_clks: pcie_ep->num_clks, clks: pcie_ep->clks); |
309 | if (ret) |
310 | return ret; |
311 | |
312 | ret = qcom_pcie_ep_core_reset(pcie_ep); |
313 | if (ret) |
314 | goto err_disable_clk; |
315 | |
316 | ret = phy_init(phy: pcie_ep->phy); |
317 | if (ret) |
318 | goto err_disable_clk; |
319 | |
320 | ret = phy_set_mode_ext(phy: pcie_ep->phy, mode: PHY_MODE_PCIE, PHY_MODE_PCIE_EP); |
321 | if (ret) |
322 | goto err_phy_exit; |
323 | |
324 | ret = phy_power_on(phy: pcie_ep->phy); |
325 | if (ret) |
326 | goto err_phy_exit; |
327 | |
328 | /* |
329 | * Some Qualcomm platforms require interconnect bandwidth constraints |
330 | * to be set before enabling interconnect clocks. |
331 | * |
332 | * Set an initial peak bandwidth corresponding to single-lane Gen 1 |
333 | * for the pcie-mem path. |
334 | */ |
335 | ret = icc_set_bw(path: pcie_ep->icc_mem, avg_bw: 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); |
336 | if (ret) { |
337 | dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n" , |
338 | ret); |
339 | goto err_phy_off; |
340 | } |
341 | |
342 | return 0; |
343 | |
344 | err_phy_off: |
345 | phy_power_off(phy: pcie_ep->phy); |
346 | err_phy_exit: |
347 | phy_exit(phy: pcie_ep->phy); |
348 | err_disable_clk: |
349 | clk_bulk_disable_unprepare(num_clks: pcie_ep->num_clks, clks: pcie_ep->clks); |
350 | |
351 | return ret; |
352 | } |
353 | |
354 | static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep) |
355 | { |
356 | icc_set_bw(path: pcie_ep->icc_mem, avg_bw: 0, peak_bw: 0); |
357 | phy_power_off(phy: pcie_ep->phy); |
358 | phy_exit(phy: pcie_ep->phy); |
359 | clk_bulk_disable_unprepare(num_clks: pcie_ep->num_clks, clks: pcie_ep->clks); |
360 | } |
361 | |
362 | static int qcom_pcie_perst_deassert(struct dw_pcie *pci) |
363 | { |
364 | struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); |
365 | struct device *dev = pci->dev; |
366 | u32 val, offset; |
367 | int ret; |
368 | |
369 | ret = qcom_pcie_enable_resources(pcie_ep); |
370 | if (ret) { |
371 | dev_err(dev, "Failed to enable resources: %d\n" , ret); |
372 | return ret; |
373 | } |
374 | |
375 | /* Assert WAKE# to RC to indicate device is ready */ |
376 | gpiod_set_value_cansleep(desc: pcie_ep->wake, value: 1); |
377 | usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500); |
378 | gpiod_set_value_cansleep(desc: pcie_ep->wake, value: 0); |
379 | |
380 | qcom_pcie_ep_configure_tcsr(pcie_ep); |
381 | |
382 | /* Disable BDF to SID mapping */ |
383 | val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG); |
384 | val |= PARF_BDF_TO_SID_BYPASS; |
385 | writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG); |
386 | |
387 | /* Enable debug IRQ */ |
388 | val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN); |
389 | val |= PARF_DEBUG_INT_RADM_PM_TURNOFF | |
390 | PARF_DEBUG_INT_CFG_BUS_MASTER_EN | |
391 | PARF_DEBUG_INT_PM_DSTATE_CHANGE; |
392 | writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN); |
393 | |
394 | /* Configure PCIe to endpoint mode */ |
395 | writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE); |
396 | |
397 | /* Allow entering L1 state */ |
398 | val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); |
399 | val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1; |
400 | writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); |
401 | |
402 | /* Read halts write */ |
403 | val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); |
404 | val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN; |
405 | writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); |
406 | |
407 | /* Write after write halt */ |
408 | val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); |
409 | val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN; |
410 | writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); |
411 | |
412 | /* Q2A flush disable */ |
413 | val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH); |
414 | val &= ~PARF_Q2A_FLUSH_EN; |
415 | writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH); |
416 | |
417 | /* |
418 | * Disable Master AXI clock during idle. Do not allow DBI access |
419 | * to take the core out of L1. Disable core clock gating that |
420 | * gates PIPE clock from propagating to core clock. Report to the |
421 | * host that Vaux is present. |
422 | */ |
423 | val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL); |
424 | val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS; |
425 | val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE | |
426 | PARF_SYS_CTRL_CORE_CLK_CGC_DIS | |
427 | PARF_SYS_CTRL_AUX_PWR_DET; |
428 | writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL); |
429 | |
430 | /* Disable the debouncers */ |
431 | val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL); |
432 | val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK | |
433 | PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK | |
434 | PARF_DB_CTRL_MST_WKP_BLOCK; |
435 | writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL); |
436 | |
437 | /* Request to exit from L1SS for MSI and LTR MSG */ |
438 | val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS); |
439 | val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN; |
440 | writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS); |
441 | |
442 | dw_pcie_dbi_ro_wr_en(pci); |
443 | |
444 | /* Set the L0s Exit Latency to 2us-4us = 0x6 */ |
445 | offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); |
446 | val = dw_pcie_readl_dbi(pci, reg: offset + PCI_EXP_LNKCAP); |
447 | val &= ~PCI_EXP_LNKCAP_L0SEL; |
448 | val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6); |
449 | dw_pcie_writel_dbi(pci, reg: offset + PCI_EXP_LNKCAP, val); |
450 | |
451 | /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */ |
452 | offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); |
453 | val = dw_pcie_readl_dbi(pci, reg: offset + PCI_EXP_LNKCAP); |
454 | val &= ~PCI_EXP_LNKCAP_L1EL; |
455 | val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6); |
456 | dw_pcie_writel_dbi(pci, reg: offset + PCI_EXP_LNKCAP, val); |
457 | |
458 | dw_pcie_dbi_ro_wr_dis(pci); |
459 | |
460 | writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK); |
461 | val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME | |
462 | PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE | |
463 | PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA; |
464 | writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK); |
465 | |
466 | ret = dw_pcie_ep_init_complete(ep: &pcie_ep->pci.ep); |
467 | if (ret) { |
468 | dev_err(dev, "Failed to complete initialization: %d\n" , ret); |
469 | goto err_disable_resources; |
470 | } |
471 | |
472 | /* |
473 | * The physical address of the MMIO region which is exposed as the BAR |
474 | * should be written to MHI BASE registers. |
475 | */ |
476 | writel_relaxed(pcie_ep->mmio_res->start, |
477 | pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER); |
478 | writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER); |
479 | |
480 | /* Gate Master AXI clock to MHI bus during L1SS */ |
481 | val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); |
482 | val &= ~PARF_MSTR_AXI_CLK_EN; |
483 | writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); |
484 | |
485 | dw_pcie_ep_init_notify(ep: &pcie_ep->pci.ep); |
486 | |
487 | /* Enable LTSSM */ |
488 | val = readl_relaxed(pcie_ep->parf + PARF_LTSSM); |
489 | val |= BIT(8); |
490 | writel_relaxed(val, pcie_ep->parf + PARF_LTSSM); |
491 | |
492 | return 0; |
493 | |
494 | err_disable_resources: |
495 | qcom_pcie_disable_resources(pcie_ep); |
496 | |
497 | return ret; |
498 | } |
499 | |
500 | static void qcom_pcie_perst_assert(struct dw_pcie *pci) |
501 | { |
502 | struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); |
503 | struct device *dev = pci->dev; |
504 | |
505 | if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) { |
506 | dev_dbg(dev, "Link is already disabled\n" ); |
507 | return; |
508 | } |
509 | |
510 | qcom_pcie_disable_resources(pcie_ep); |
511 | pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED; |
512 | } |
513 | |
514 | /* Common DWC controller ops */ |
515 | static const struct dw_pcie_ops pci_ops = { |
516 | .link_up = qcom_pcie_dw_link_up, |
517 | .start_link = qcom_pcie_dw_start_link, |
518 | .stop_link = qcom_pcie_dw_stop_link, |
519 | .write_dbi2 = qcom_pcie_dw_write_dbi2, |
520 | }; |
521 | |
522 | static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev, |
523 | struct qcom_pcie_ep *pcie_ep) |
524 | { |
525 | struct device *dev = &pdev->dev; |
526 | struct dw_pcie *pci = &pcie_ep->pci; |
527 | struct device_node *syscon; |
528 | struct resource *res; |
529 | int ret; |
530 | |
531 | pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, name: "parf" ); |
532 | if (IS_ERR(ptr: pcie_ep->parf)) |
533 | return PTR_ERR(ptr: pcie_ep->parf); |
534 | |
535 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi" ); |
536 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); |
537 | if (IS_ERR(ptr: pci->dbi_base)) |
538 | return PTR_ERR(ptr: pci->dbi_base); |
539 | pci->dbi_base2 = pci->dbi_base; |
540 | |
541 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi" ); |
542 | pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res); |
543 | if (IS_ERR(ptr: pcie_ep->elbi)) |
544 | return PTR_ERR(ptr: pcie_ep->elbi); |
545 | |
546 | pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
547 | "mmio" ); |
548 | if (!pcie_ep->mmio_res) { |
549 | dev_err(dev, "Failed to get mmio resource\n" ); |
550 | return -EINVAL; |
551 | } |
552 | |
553 | pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, res: pcie_ep->mmio_res); |
554 | if (IS_ERR(ptr: pcie_ep->mmio)) |
555 | return PTR_ERR(ptr: pcie_ep->mmio); |
556 | |
557 | syscon = of_parse_phandle(np: dev->of_node, phandle_name: "qcom,perst-regs" , index: 0); |
558 | if (!syscon) { |
559 | dev_dbg(dev, "PERST separation not available\n" ); |
560 | return 0; |
561 | } |
562 | |
563 | pcie_ep->perst_map = syscon_node_to_regmap(np: syscon); |
564 | of_node_put(node: syscon); |
565 | if (IS_ERR(ptr: pcie_ep->perst_map)) |
566 | return PTR_ERR(ptr: pcie_ep->perst_map); |
567 | |
568 | ret = of_property_read_u32_index(np: dev->of_node, propname: "qcom,perst-regs" , |
569 | index: 1, out_value: &pcie_ep->perst_en); |
570 | if (ret < 0) { |
571 | dev_err(dev, "No Perst Enable offset in syscon\n" ); |
572 | return ret; |
573 | } |
574 | |
575 | ret = of_property_read_u32_index(np: dev->of_node, propname: "qcom,perst-regs" , |
576 | index: 2, out_value: &pcie_ep->perst_sep_en); |
577 | if (ret < 0) { |
578 | dev_err(dev, "No Perst Separation Enable offset in syscon\n" ); |
579 | return ret; |
580 | } |
581 | |
582 | return 0; |
583 | } |
584 | |
585 | static int qcom_pcie_ep_get_resources(struct platform_device *pdev, |
586 | struct qcom_pcie_ep *pcie_ep) |
587 | { |
588 | struct device *dev = &pdev->dev; |
589 | int ret; |
590 | |
591 | ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep); |
592 | if (ret) { |
593 | dev_err(dev, "Failed to get io resources %d\n" , ret); |
594 | return ret; |
595 | } |
596 | |
597 | pcie_ep->num_clks = devm_clk_bulk_get_all(dev, clks: &pcie_ep->clks); |
598 | if (pcie_ep->num_clks < 0) { |
599 | dev_err(dev, "Failed to get clocks\n" ); |
600 | return pcie_ep->num_clks; |
601 | } |
602 | |
603 | pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, id: "core" ); |
604 | if (IS_ERR(ptr: pcie_ep->core_reset)) |
605 | return PTR_ERR(ptr: pcie_ep->core_reset); |
606 | |
607 | pcie_ep->reset = devm_gpiod_get(dev, con_id: "reset" , flags: GPIOD_IN); |
608 | if (IS_ERR(ptr: pcie_ep->reset)) |
609 | return PTR_ERR(ptr: pcie_ep->reset); |
610 | |
611 | pcie_ep->wake = devm_gpiod_get_optional(dev, con_id: "wake" , flags: GPIOD_OUT_LOW); |
612 | if (IS_ERR(ptr: pcie_ep->wake)) |
613 | return PTR_ERR(ptr: pcie_ep->wake); |
614 | |
615 | pcie_ep->phy = devm_phy_optional_get(dev, string: "pciephy" ); |
616 | if (IS_ERR(ptr: pcie_ep->phy)) |
617 | ret = PTR_ERR(ptr: pcie_ep->phy); |
618 | |
619 | pcie_ep->icc_mem = devm_of_icc_get(dev, name: "pcie-mem" ); |
620 | if (IS_ERR(ptr: pcie_ep->icc_mem)) |
621 | ret = PTR_ERR(ptr: pcie_ep->icc_mem); |
622 | |
623 | return ret; |
624 | } |
625 | |
626 | /* TODO: Notify clients about PCIe state change */ |
627 | static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data) |
628 | { |
629 | struct qcom_pcie_ep *pcie_ep = data; |
630 | struct dw_pcie *pci = &pcie_ep->pci; |
631 | struct device *dev = pci->dev; |
632 | u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS); |
633 | u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK); |
634 | u32 dstate, val; |
635 | |
636 | writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR); |
637 | status &= mask; |
638 | |
639 | if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) { |
640 | dev_dbg(dev, "Received Linkdown event\n" ); |
641 | pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN; |
642 | pci_epc_linkdown(epc: pci->ep.epc); |
643 | } else if (FIELD_GET(PARF_INT_ALL_BME, status)) { |
644 | dev_dbg(dev, "Received BME event. Link is enabled!\n" ); |
645 | pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED; |
646 | qcom_pcie_ep_icc_update(pcie_ep); |
647 | pci_epc_bme_notify(epc: pci->ep.epc); |
648 | } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) { |
649 | dev_dbg(dev, "Received PM Turn-off event! Entering L23\n" ); |
650 | val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); |
651 | val |= PARF_PM_CTRL_READY_ENTR_L23; |
652 | writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); |
653 | } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) { |
654 | dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) & |
655 | DBI_CON_STATUS_POWER_STATE_MASK; |
656 | dev_dbg(dev, "Received D%d state event\n" , dstate); |
657 | if (dstate == 3) { |
658 | val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); |
659 | val |= PARF_PM_CTRL_REQ_EXIT_L1; |
660 | writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); |
661 | } |
662 | } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { |
663 | dev_dbg(dev, "Received Linkup event. Enumeration complete!\n" ); |
664 | dw_pcie_ep_linkup(ep: &pci->ep); |
665 | pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP; |
666 | } else { |
667 | dev_err(dev, "Received unknown event: %d\n" , status); |
668 | } |
669 | |
670 | return IRQ_HANDLED; |
671 | } |
672 | |
673 | static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data) |
674 | { |
675 | struct qcom_pcie_ep *pcie_ep = data; |
676 | struct dw_pcie *pci = &pcie_ep->pci; |
677 | struct device *dev = pci->dev; |
678 | u32 perst; |
679 | |
680 | perst = gpiod_get_value(desc: pcie_ep->reset); |
681 | if (perst) { |
682 | dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n" ); |
683 | qcom_pcie_perst_assert(pci); |
684 | } else { |
685 | dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n" ); |
686 | qcom_pcie_perst_deassert(pci); |
687 | } |
688 | |
689 | irq_set_irq_type(irq: gpiod_to_irq(desc: pcie_ep->reset), |
690 | type: (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW)); |
691 | |
692 | return IRQ_HANDLED; |
693 | } |
694 | |
695 | static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, |
696 | struct qcom_pcie_ep *pcie_ep) |
697 | { |
698 | int ret; |
699 | |
700 | pcie_ep->global_irq = platform_get_irq_byname(pdev, "global" ); |
701 | if (pcie_ep->global_irq < 0) |
702 | return pcie_ep->global_irq; |
703 | |
704 | ret = devm_request_threaded_irq(dev: &pdev->dev, irq: pcie_ep->global_irq, NULL, |
705 | thread_fn: qcom_pcie_ep_global_irq_thread, |
706 | IRQF_ONESHOT, |
707 | devname: "global_irq" , dev_id: pcie_ep); |
708 | if (ret) { |
709 | dev_err(&pdev->dev, "Failed to request Global IRQ\n" ); |
710 | return ret; |
711 | } |
712 | |
713 | pcie_ep->perst_irq = gpiod_to_irq(desc: pcie_ep->reset); |
714 | irq_set_status_flags(irq: pcie_ep->perst_irq, set: IRQ_NOAUTOEN); |
715 | ret = devm_request_threaded_irq(dev: &pdev->dev, irq: pcie_ep->perst_irq, NULL, |
716 | thread_fn: qcom_pcie_ep_perst_irq_thread, |
717 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
718 | devname: "perst_irq" , dev_id: pcie_ep); |
719 | if (ret) { |
720 | dev_err(&pdev->dev, "Failed to request PERST IRQ\n" ); |
721 | disable_irq(irq: pcie_ep->global_irq); |
722 | return ret; |
723 | } |
724 | |
725 | return 0; |
726 | } |
727 | |
728 | static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, |
729 | unsigned int type, u16 interrupt_num) |
730 | { |
731 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
732 | |
733 | switch (type) { |
734 | case PCI_IRQ_INTX: |
735 | return dw_pcie_ep_raise_intx_irq(ep, func_no); |
736 | case PCI_IRQ_MSI: |
737 | return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); |
738 | default: |
739 | dev_err(pci->dev, "Unknown IRQ type\n" ); |
740 | return -EINVAL; |
741 | } |
742 | } |
743 | |
744 | static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data) |
745 | { |
746 | struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *) |
747 | dev_get_drvdata(dev: s->private); |
748 | |
749 | seq_printf(m: s, fmt: "L0s transition count: %u\n" , |
750 | readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); |
751 | |
752 | seq_printf(m: s, fmt: "L1 transition count: %u\n" , |
753 | readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); |
754 | |
755 | seq_printf(m: s, fmt: "L1.1 transition count: %u\n" , |
756 | readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); |
757 | |
758 | seq_printf(m: s, fmt: "L1.2 transition count: %u\n" , |
759 | readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); |
760 | |
761 | seq_printf(m: s, fmt: "L2 transition count: %u\n" , |
762 | readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); |
763 | |
764 | return 0; |
765 | } |
766 | |
767 | static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep) |
768 | { |
769 | struct dw_pcie *pci = &pcie_ep->pci; |
770 | |
771 | debugfs_create_devm_seqfile(dev: pci->dev, name: "link_transition_count" , parent: pcie_ep->debugfs, |
772 | read_fn: qcom_pcie_ep_link_transition_count); |
773 | } |
774 | |
775 | static const struct pci_epc_features qcom_pcie_epc_features = { |
776 | .linkup_notifier = true, |
777 | .core_init_notifier = true, |
778 | .msi_capable = true, |
779 | .msix_capable = false, |
780 | .align = SZ_4K, |
781 | }; |
782 | |
783 | static const struct pci_epc_features * |
784 | qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep) |
785 | { |
786 | return &qcom_pcie_epc_features; |
787 | } |
788 | |
789 | static void qcom_pcie_ep_init(struct dw_pcie_ep *ep) |
790 | { |
791 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
792 | enum pci_barno bar; |
793 | |
794 | for (bar = BAR_0; bar <= BAR_5; bar++) |
795 | dw_pcie_ep_reset_bar(pci, bar); |
796 | } |
797 | |
798 | static const struct dw_pcie_ep_ops pci_ep_ops = { |
799 | .init = qcom_pcie_ep_init, |
800 | .raise_irq = qcom_pcie_ep_raise_irq, |
801 | .get_features = qcom_pcie_epc_get_features, |
802 | }; |
803 | |
804 | static int qcom_pcie_ep_probe(struct platform_device *pdev) |
805 | { |
806 | struct device *dev = &pdev->dev; |
807 | struct qcom_pcie_ep *pcie_ep; |
808 | char *name; |
809 | int ret; |
810 | |
811 | pcie_ep = devm_kzalloc(dev, size: sizeof(*pcie_ep), GFP_KERNEL); |
812 | if (!pcie_ep) |
813 | return -ENOMEM; |
814 | |
815 | pcie_ep->pci.dev = dev; |
816 | pcie_ep->pci.ops = &pci_ops; |
817 | pcie_ep->pci.ep.ops = &pci_ep_ops; |
818 | pcie_ep->pci.edma.nr_irqs = 1; |
819 | platform_set_drvdata(pdev, data: pcie_ep); |
820 | |
821 | ret = qcom_pcie_ep_get_resources(pdev, pcie_ep); |
822 | if (ret) |
823 | return ret; |
824 | |
825 | ret = qcom_pcie_enable_resources(pcie_ep); |
826 | if (ret) { |
827 | dev_err(dev, "Failed to enable resources: %d\n" , ret); |
828 | return ret; |
829 | } |
830 | |
831 | ret = dw_pcie_ep_init(ep: &pcie_ep->pci.ep); |
832 | if (ret) { |
833 | dev_err(dev, "Failed to initialize endpoint: %d\n" , ret); |
834 | goto err_disable_resources; |
835 | } |
836 | |
837 | ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep); |
838 | if (ret) |
839 | goto err_disable_resources; |
840 | |
841 | name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%pOFP" , dev->of_node); |
842 | if (!name) { |
843 | ret = -ENOMEM; |
844 | goto err_disable_irqs; |
845 | } |
846 | |
847 | pcie_ep->debugfs = debugfs_create_dir(name, NULL); |
848 | qcom_pcie_ep_init_debugfs(pcie_ep); |
849 | |
850 | return 0; |
851 | |
852 | err_disable_irqs: |
853 | disable_irq(irq: pcie_ep->global_irq); |
854 | disable_irq(irq: pcie_ep->perst_irq); |
855 | |
856 | err_disable_resources: |
857 | qcom_pcie_disable_resources(pcie_ep); |
858 | |
859 | return ret; |
860 | } |
861 | |
862 | static void qcom_pcie_ep_remove(struct platform_device *pdev) |
863 | { |
864 | struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev); |
865 | |
866 | disable_irq(irq: pcie_ep->global_irq); |
867 | disable_irq(irq: pcie_ep->perst_irq); |
868 | |
869 | debugfs_remove_recursive(dentry: pcie_ep->debugfs); |
870 | |
871 | if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) |
872 | return; |
873 | |
874 | qcom_pcie_disable_resources(pcie_ep); |
875 | } |
876 | |
877 | static const struct of_device_id qcom_pcie_ep_match[] = { |
878 | { .compatible = "qcom,sdx55-pcie-ep" , }, |
879 | { .compatible = "qcom,sm8450-pcie-ep" , }, |
880 | { } |
881 | }; |
882 | MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); |
883 | |
884 | static struct platform_driver qcom_pcie_ep_driver = { |
885 | .probe = qcom_pcie_ep_probe, |
886 | .remove_new = qcom_pcie_ep_remove, |
887 | .driver = { |
888 | .name = "qcom-pcie-ep" , |
889 | .of_match_table = qcom_pcie_ep_match, |
890 | }, |
891 | }; |
892 | builtin_platform_driver(qcom_pcie_ep_driver); |
893 | |
894 | MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>" ); |
895 | MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>" ); |
896 | MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver" ); |
897 | MODULE_LICENSE("GPL v2" ); |
898 | |