1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | // |
3 | // Driver for Cadence QSPI Controller |
4 | // |
5 | // Copyright Altera Corporation (C) 2012-2014. All rights reserved. |
6 | // Copyright Intel Corporation (C) 2019-2020. All rights reserved. |
7 | // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com |
8 | |
9 | #include <linux/clk.h> |
10 | #include <linux/completion.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/err.h> |
15 | #include <linux/errno.h> |
16 | #include <linux/firmware/xlnx-zynqmp.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/io.h> |
19 | #include <linux/iopoll.h> |
20 | #include <linux/jiffies.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/log2.h> |
23 | #include <linux/module.h> |
24 | #include <linux/of.h> |
25 | #include <linux/platform_device.h> |
26 | #include <linux/pm_runtime.h> |
27 | #include <linux/reset.h> |
28 | #include <linux/sched.h> |
29 | #include <linux/spi/spi.h> |
30 | #include <linux/spi/spi-mem.h> |
31 | #include <linux/timer.h> |
32 | |
33 | #define CQSPI_NAME "cadence-qspi" |
34 | #define CQSPI_MAX_CHIPSELECT 4 |
35 | |
36 | static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); |
37 | |
38 | /* Quirks */ |
39 | #define CQSPI_NEEDS_WR_DELAY BIT(0) |
40 | #define CQSPI_DISABLE_DAC_MODE BIT(1) |
41 | #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) |
42 | #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) |
43 | #define CQSPI_SLOW_SRAM BIT(4) |
44 | #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) |
45 | #define CQSPI_RD_NO_IRQ BIT(6) |
46 | #define CQSPI_DMA_SET_MASK BIT(7) |
47 | #define CQSPI_SUPPORT_DEVICE_RESET BIT(8) |
48 | #define CQSPI_DISABLE_STIG_MODE BIT(9) |
49 | |
50 | /* Capabilities */ |
51 | #define CQSPI_SUPPORTS_OCTAL BIT(0) |
52 | #define CQSPI_SUPPORTS_QUAD BIT(1) |
53 | |
54 | #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) |
55 | |
56 | enum { |
57 | CLK_QSPI_APB = 0, |
58 | CLK_QSPI_AHB, |
59 | CLK_QSPI_NUM, |
60 | }; |
61 | |
62 | struct cqspi_st; |
63 | |
64 | struct cqspi_flash_pdata { |
65 | struct cqspi_st *cqspi; |
66 | u32 clk_rate; |
67 | u32 read_delay; |
68 | u32 tshsl_ns; |
69 | u32 tsd2d_ns; |
70 | u32 tchsh_ns; |
71 | u32 tslch_ns; |
72 | u8 cs; |
73 | }; |
74 | |
75 | struct cqspi_st { |
76 | struct platform_device *pdev; |
77 | struct spi_controller *host; |
78 | struct clk *clk; |
79 | struct clk *clks[CLK_QSPI_NUM]; |
80 | unsigned int sclk; |
81 | |
82 | void __iomem *iobase; |
83 | void __iomem *ahb_base; |
84 | resource_size_t ahb_size; |
85 | struct completion transfer_complete; |
86 | |
87 | struct dma_chan *rx_chan; |
88 | struct completion rx_dma_complete; |
89 | dma_addr_t mmap_phys_base; |
90 | |
91 | int current_cs; |
92 | unsigned long master_ref_clk_hz; |
93 | bool is_decoded_cs; |
94 | u32 fifo_depth; |
95 | u32 fifo_width; |
96 | u32 num_chipselect; |
97 | bool rclk_en; |
98 | u32 trigger_address; |
99 | u32 wr_delay; |
100 | bool use_direct_mode; |
101 | bool use_direct_mode_wr; |
102 | struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; |
103 | bool use_dma_read; |
104 | u32 pd_dev_id; |
105 | bool wr_completion; |
106 | bool slow_sram; |
107 | bool apb_ahb_hazard; |
108 | |
109 | bool is_jh7110; /* Flag for StarFive JH7110 SoC */ |
110 | bool disable_stig_mode; |
111 | |
112 | const struct cqspi_driver_platdata *ddata; |
113 | }; |
114 | |
115 | struct cqspi_driver_platdata { |
116 | u32 hwcaps_mask; |
117 | u16 quirks; |
118 | int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, |
119 | u_char *rxbuf, loff_t from_addr, size_t n_rx); |
120 | u32 (*get_dma_status)(struct cqspi_st *cqspi); |
121 | int (*jh7110_clk_init)(struct platform_device *pdev, |
122 | struct cqspi_st *cqspi); |
123 | }; |
124 | |
125 | /* Operation timeout value */ |
126 | #define CQSPI_TIMEOUT_MS 500 |
127 | #define CQSPI_READ_TIMEOUT_MS 10 |
128 | #define CQSPI_BUSYWAIT_TIMEOUT_US 500 |
129 | |
130 | /* Runtime_pm autosuspend delay */ |
131 | #define CQSPI_AUTOSUSPEND_TIMEOUT 2000 |
132 | |
133 | #define CQSPI_DUMMY_CLKS_PER_BYTE 8 |
134 | #define CQSPI_DUMMY_BYTES_MAX 4 |
135 | #define CQSPI_DUMMY_CLKS_MAX 31 |
136 | |
137 | #define CQSPI_STIG_DATA_LEN_MAX 8 |
138 | |
139 | /* Register map */ |
140 | #define CQSPI_REG_CONFIG 0x00 |
141 | #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) |
142 | #define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7) |
143 | #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) |
144 | #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 |
145 | #define CQSPI_REG_CONFIG_DMA_MASK BIT(15) |
146 | #define CQSPI_REG_CONFIG_BAUD_LSB 19 |
147 | #define CQSPI_REG_CONFIG_DTR_PROTO BIT(24) |
148 | #define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30) |
149 | #define CQSPI_REG_CONFIG_IDLE_LSB 31 |
150 | #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF |
151 | #define CQSPI_REG_CONFIG_BAUD_MASK 0xF |
152 | #define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5) |
153 | #define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6) |
154 | |
155 | #define CQSPI_REG_RD_INSTR 0x04 |
156 | #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 |
157 | #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 |
158 | #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 |
159 | #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 |
160 | #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 |
161 | #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 |
162 | #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 |
163 | #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 |
164 | #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 |
165 | #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F |
166 | |
167 | #define CQSPI_REG_WR_INSTR 0x08 |
168 | #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 |
169 | #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 |
170 | #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 |
171 | |
172 | #define CQSPI_REG_DELAY 0x0C |
173 | #define CQSPI_REG_DELAY_TSLCH_LSB 0 |
174 | #define CQSPI_REG_DELAY_TCHSH_LSB 8 |
175 | #define CQSPI_REG_DELAY_TSD2D_LSB 16 |
176 | #define CQSPI_REG_DELAY_TSHSL_LSB 24 |
177 | #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF |
178 | #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF |
179 | #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF |
180 | #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF |
181 | |
182 | #define CQSPI_REG_READCAPTURE 0x10 |
183 | #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 |
184 | #define CQSPI_REG_READCAPTURE_DELAY_LSB 1 |
185 | #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF |
186 | |
187 | #define CQSPI_REG_SIZE 0x14 |
188 | #define CQSPI_REG_SIZE_ADDRESS_LSB 0 |
189 | #define CQSPI_REG_SIZE_PAGE_LSB 4 |
190 | #define CQSPI_REG_SIZE_BLOCK_LSB 16 |
191 | #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF |
192 | #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF |
193 | #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F |
194 | |
195 | #define CQSPI_REG_SRAMPARTITION 0x18 |
196 | #define CQSPI_REG_INDIRECTTRIGGER 0x1C |
197 | |
198 | #define CQSPI_REG_DMA 0x20 |
199 | #define CQSPI_REG_DMA_SINGLE_LSB 0 |
200 | #define CQSPI_REG_DMA_BURST_LSB 8 |
201 | #define CQSPI_REG_DMA_SINGLE_MASK 0xFF |
202 | #define CQSPI_REG_DMA_BURST_MASK 0xFF |
203 | |
204 | #define CQSPI_REG_REMAP 0x24 |
205 | #define CQSPI_REG_MODE_BIT 0x28 |
206 | |
207 | #define CQSPI_REG_SDRAMLEVEL 0x2C |
208 | #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 |
209 | #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 |
210 | #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF |
211 | #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF |
212 | |
213 | #define CQSPI_REG_WR_COMPLETION_CTRL 0x38 |
214 | #define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14) |
215 | |
216 | #define CQSPI_REG_IRQSTATUS 0x40 |
217 | #define CQSPI_REG_IRQMASK 0x44 |
218 | |
219 | #define CQSPI_REG_INDIRECTRD 0x60 |
220 | #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) |
221 | #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) |
222 | #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) |
223 | |
224 | #define CQSPI_REG_INDIRECTRDWATERMARK 0x64 |
225 | #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 |
226 | #define CQSPI_REG_INDIRECTRDBYTES 0x6C |
227 | |
228 | #define CQSPI_REG_CMDCTRL 0x90 |
229 | #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) |
230 | #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) |
231 | #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7 |
232 | #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 |
233 | #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 |
234 | #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 |
235 | #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 |
236 | #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 |
237 | #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 |
238 | #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 |
239 | #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 |
240 | #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 |
241 | #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 |
242 | #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F |
243 | |
244 | #define CQSPI_REG_INDIRECTWR 0x70 |
245 | #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) |
246 | #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) |
247 | #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) |
248 | |
249 | #define CQSPI_REG_INDIRECTWRWATERMARK 0x74 |
250 | #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 |
251 | #define CQSPI_REG_INDIRECTWRBYTES 0x7C |
252 | |
253 | #define CQSPI_REG_INDTRIG_ADDRRANGE 0x80 |
254 | |
255 | #define CQSPI_REG_CMDADDRESS 0x94 |
256 | #define CQSPI_REG_CMDREADDATALOWER 0xA0 |
257 | #define CQSPI_REG_CMDREADDATAUPPER 0xA4 |
258 | #define CQSPI_REG_CMDWRITEDATALOWER 0xA8 |
259 | #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC |
260 | |
261 | #define CQSPI_REG_POLLING_STATUS 0xB0 |
262 | #define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16 |
263 | |
264 | #define CQSPI_REG_OP_EXT_LOWER 0xE0 |
265 | #define CQSPI_REG_OP_EXT_READ_LSB 24 |
266 | #define CQSPI_REG_OP_EXT_WRITE_LSB 16 |
267 | #define CQSPI_REG_OP_EXT_STIG_LSB 0 |
268 | |
269 | #define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000 |
270 | |
271 | #define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800 |
272 | #define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804 |
273 | |
274 | #define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C |
275 | |
276 | #define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814 |
277 | #define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818 |
278 | #define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C |
279 | #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1) |
280 | |
281 | #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828 |
282 | |
283 | #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00 |
284 | #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6 |
285 | |
286 | /* Interrupt status bits */ |
287 | #define CQSPI_REG_IRQ_MODE_ERR BIT(0) |
288 | #define CQSPI_REG_IRQ_UNDERFLOW BIT(1) |
289 | #define CQSPI_REG_IRQ_IND_COMP BIT(2) |
290 | #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) |
291 | #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) |
292 | #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) |
293 | #define CQSPI_REG_IRQ_WATERMARK BIT(6) |
294 | #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) |
295 | |
296 | #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ |
297 | CQSPI_REG_IRQ_IND_SRAM_FULL | \ |
298 | CQSPI_REG_IRQ_IND_COMP) |
299 | |
300 | #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ |
301 | CQSPI_REG_IRQ_WATERMARK | \ |
302 | CQSPI_REG_IRQ_UNDERFLOW) |
303 | |
304 | #define CQSPI_IRQ_STATUS_MASK 0x1FFFF |
305 | #define CQSPI_DMA_UNALIGN 0x3 |
306 | |
307 | #define CQSPI_REG_VERSAL_DMA_VAL 0x602 |
308 | |
309 | static int cqspi_wait_for_bit(const struct cqspi_driver_platdata *ddata, |
310 | void __iomem *reg, const u32 mask, bool clr, |
311 | bool busywait) |
312 | { |
313 | u64 timeout_us = CQSPI_TIMEOUT_MS * USEC_PER_MSEC; |
314 | u32 val; |
315 | |
316 | if (busywait) { |
317 | int ret = readl_relaxed_poll_timeout(reg, val, |
318 | (((clr ? ~val : val) & mask) == mask), |
319 | 0, CQSPI_BUSYWAIT_TIMEOUT_US); |
320 | |
321 | if (ret != -ETIMEDOUT) |
322 | return ret; |
323 | |
324 | timeout_us -= CQSPI_BUSYWAIT_TIMEOUT_US; |
325 | } |
326 | |
327 | return readl_relaxed_poll_timeout(reg, val, |
328 | (((clr ? ~val : val) & mask) == mask), |
329 | 10, timeout_us); |
330 | } |
331 | |
332 | static bool cqspi_is_idle(struct cqspi_st *cqspi) |
333 | { |
334 | u32 reg = readl(addr: cqspi->iobase + CQSPI_REG_CONFIG); |
335 | |
336 | return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB); |
337 | } |
338 | |
339 | static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) |
340 | { |
341 | u32 reg = readl(addr: cqspi->iobase + CQSPI_REG_SDRAMLEVEL); |
342 | |
343 | reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; |
344 | return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; |
345 | } |
346 | |
347 | static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi) |
348 | { |
349 | u32 dma_status; |
350 | |
351 | dma_status = readl(addr: cqspi->iobase + |
352 | CQSPI_REG_VERSAL_DMA_DST_I_STS); |
353 | writel(val: dma_status, addr: cqspi->iobase + |
354 | CQSPI_REG_VERSAL_DMA_DST_I_STS); |
355 | |
356 | return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK; |
357 | } |
358 | |
359 | static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) |
360 | { |
361 | struct cqspi_st *cqspi = dev; |
362 | const struct cqspi_driver_platdata *ddata = cqspi->ddata; |
363 | unsigned int irq_status; |
364 | |
365 | /* Read interrupt status */ |
366 | irq_status = readl(addr: cqspi->iobase + CQSPI_REG_IRQSTATUS); |
367 | |
368 | /* Clear interrupt */ |
369 | writel(val: irq_status, addr: cqspi->iobase + CQSPI_REG_IRQSTATUS); |
370 | |
371 | if (cqspi->use_dma_read && ddata && ddata->get_dma_status) { |
372 | if (ddata->get_dma_status(cqspi)) { |
373 | complete(&cqspi->transfer_complete); |
374 | return IRQ_HANDLED; |
375 | } |
376 | } |
377 | |
378 | else if (!cqspi->slow_sram) |
379 | irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; |
380 | else |
381 | irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR; |
382 | |
383 | if (irq_status) |
384 | complete(&cqspi->transfer_complete); |
385 | |
386 | return IRQ_HANDLED; |
387 | } |
388 | |
389 | static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op) |
390 | { |
391 | u32 rdreg = 0; |
392 | |
393 | rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; |
394 | rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; |
395 | rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; |
396 | |
397 | return rdreg; |
398 | } |
399 | |
400 | static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op) |
401 | { |
402 | unsigned int dummy_clk; |
403 | |
404 | if (!op->dummy.nbytes) |
405 | return 0; |
406 | |
407 | dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); |
408 | if (op->cmd.dtr) |
409 | dummy_clk /= 2; |
410 | |
411 | return dummy_clk; |
412 | } |
413 | |
414 | static int cqspi_wait_idle(struct cqspi_st *cqspi) |
415 | { |
416 | const unsigned int poll_idle_retry = 3; |
417 | unsigned int count = 0; |
418 | unsigned long timeout; |
419 | |
420 | timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); |
421 | while (1) { |
422 | /* |
423 | * Read few times in succession to ensure the controller |
424 | * is indeed idle, that is, the bit does not transition |
425 | * low again. |
426 | */ |
427 | if (cqspi_is_idle(cqspi)) |
428 | count++; |
429 | else |
430 | count = 0; |
431 | |
432 | if (count >= poll_idle_retry) |
433 | return 0; |
434 | |
435 | if (time_after(jiffies, timeout)) { |
436 | /* Timeout, in busy mode. */ |
437 | dev_err(&cqspi->pdev->dev, |
438 | "QSPI is still busy after %dms timeout.\n" , |
439 | CQSPI_TIMEOUT_MS); |
440 | return -ETIMEDOUT; |
441 | } |
442 | |
443 | cpu_relax(); |
444 | } |
445 | } |
446 | |
447 | static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) |
448 | { |
449 | void __iomem *reg_base = cqspi->iobase; |
450 | int ret; |
451 | |
452 | /* Write the CMDCTRL without start execution. */ |
453 | writel(val: reg, addr: reg_base + CQSPI_REG_CMDCTRL); |
454 | /* Start execute */ |
455 | reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; |
456 | writel(val: reg, addr: reg_base + CQSPI_REG_CMDCTRL); |
457 | |
458 | /* Polling for completion. */ |
459 | ret = cqspi_wait_for_bit(ddata: cqspi->ddata, reg: reg_base + CQSPI_REG_CMDCTRL, |
460 | CQSPI_REG_CMDCTRL_INPROGRESS_MASK, clr: 1, busywait: true); |
461 | if (ret) { |
462 | dev_err(&cqspi->pdev->dev, |
463 | "Flash command execution timed out.\n" ); |
464 | return ret; |
465 | } |
466 | |
467 | /* Polling QSPI idle status. */ |
468 | return cqspi_wait_idle(cqspi); |
469 | } |
470 | |
471 | static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata, |
472 | const struct spi_mem_op *op, |
473 | unsigned int shift) |
474 | { |
475 | struct cqspi_st *cqspi = f_pdata->cqspi; |
476 | void __iomem *reg_base = cqspi->iobase; |
477 | unsigned int reg; |
478 | u8 ext; |
479 | |
480 | if (op->cmd.nbytes != 2) |
481 | return -EINVAL; |
482 | |
483 | /* Opcode extension is the LSB. */ |
484 | ext = op->cmd.opcode & 0xff; |
485 | |
486 | reg = readl(addr: reg_base + CQSPI_REG_OP_EXT_LOWER); |
487 | reg &= ~(0xff << shift); |
488 | reg |= ext << shift; |
489 | writel(val: reg, addr: reg_base + CQSPI_REG_OP_EXT_LOWER); |
490 | |
491 | return 0; |
492 | } |
493 | |
494 | static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata, |
495 | const struct spi_mem_op *op, unsigned int shift) |
496 | { |
497 | struct cqspi_st *cqspi = f_pdata->cqspi; |
498 | void __iomem *reg_base = cqspi->iobase; |
499 | unsigned int reg; |
500 | int ret; |
501 | |
502 | reg = readl(addr: reg_base + CQSPI_REG_CONFIG); |
503 | |
504 | /* |
505 | * We enable dual byte opcode here. The callers have to set up the |
506 | * extension opcode based on which type of operation it is. |
507 | */ |
508 | if (op->cmd.dtr) { |
509 | reg |= CQSPI_REG_CONFIG_DTR_PROTO; |
510 | reg |= CQSPI_REG_CONFIG_DUAL_OPCODE; |
511 | |
512 | /* Set up command opcode extension. */ |
513 | ret = cqspi_setup_opcode_ext(f_pdata, op, shift); |
514 | if (ret) |
515 | return ret; |
516 | } else { |
517 | unsigned int mask = CQSPI_REG_CONFIG_DTR_PROTO | CQSPI_REG_CONFIG_DUAL_OPCODE; |
518 | /* Shortcut if DTR is already disabled. */ |
519 | if ((reg & mask) == 0) |
520 | return 0; |
521 | reg &= ~mask; |
522 | } |
523 | |
524 | writel(val: reg, addr: reg_base + CQSPI_REG_CONFIG); |
525 | |
526 | return cqspi_wait_idle(cqspi); |
527 | } |
528 | |
529 | static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata, |
530 | const struct spi_mem_op *op) |
531 | { |
532 | struct cqspi_st *cqspi = f_pdata->cqspi; |
533 | void __iomem *reg_base = cqspi->iobase; |
534 | u8 *rxbuf = op->data.buf.in; |
535 | u8 opcode; |
536 | size_t n_rx = op->data.nbytes; |
537 | unsigned int rdreg; |
538 | unsigned int reg; |
539 | unsigned int dummy_clk; |
540 | size_t read_len; |
541 | int status; |
542 | |
543 | status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); |
544 | if (status) |
545 | return status; |
546 | |
547 | if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { |
548 | dev_err(&cqspi->pdev->dev, |
549 | "Invalid input argument, len %zu rxbuf 0x%p\n" , |
550 | n_rx, rxbuf); |
551 | return -EINVAL; |
552 | } |
553 | |
554 | if (op->cmd.dtr) |
555 | opcode = op->cmd.opcode >> 8; |
556 | else |
557 | opcode = op->cmd.opcode; |
558 | |
559 | reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; |
560 | |
561 | rdreg = cqspi_calc_rdreg(op); |
562 | writel(val: rdreg, addr: reg_base + CQSPI_REG_RD_INSTR); |
563 | |
564 | dummy_clk = cqspi_calc_dummy(op); |
565 | if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) |
566 | return -EOPNOTSUPP; |
567 | |
568 | if (dummy_clk) |
569 | reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK) |
570 | << CQSPI_REG_CMDCTRL_DUMMY_LSB; |
571 | |
572 | reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); |
573 | |
574 | /* 0 means 1 byte. */ |
575 | reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) |
576 | << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); |
577 | |
578 | /* setup ADDR BIT field */ |
579 | if (op->addr.nbytes) { |
580 | reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); |
581 | reg |= ((op->addr.nbytes - 1) & |
582 | CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) |
583 | << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; |
584 | |
585 | writel(val: op->addr.val, addr: reg_base + CQSPI_REG_CMDADDRESS); |
586 | } |
587 | |
588 | status = cqspi_exec_flash_cmd(cqspi, reg); |
589 | if (status) |
590 | return status; |
591 | |
592 | reg = readl(addr: reg_base + CQSPI_REG_CMDREADDATALOWER); |
593 | |
594 | /* Put the read value into rx_buf */ |
595 | read_len = (n_rx > 4) ? 4 : n_rx; |
596 | memcpy(rxbuf, ®, read_len); |
597 | rxbuf += read_len; |
598 | |
599 | if (n_rx > 4) { |
600 | reg = readl(addr: reg_base + CQSPI_REG_CMDREADDATAUPPER); |
601 | |
602 | read_len = n_rx - read_len; |
603 | memcpy(rxbuf, ®, read_len); |
604 | } |
605 | |
606 | /* Reset CMD_CTRL Reg once command read completes */ |
607 | writel(val: 0, addr: reg_base + CQSPI_REG_CMDCTRL); |
608 | |
609 | return 0; |
610 | } |
611 | |
612 | static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata, |
613 | const struct spi_mem_op *op) |
614 | { |
615 | struct cqspi_st *cqspi = f_pdata->cqspi; |
616 | void __iomem *reg_base = cqspi->iobase; |
617 | u8 opcode; |
618 | const u8 *txbuf = op->data.buf.out; |
619 | size_t n_tx = op->data.nbytes; |
620 | unsigned int reg; |
621 | unsigned int data; |
622 | size_t write_len; |
623 | int ret; |
624 | |
625 | ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); |
626 | if (ret) |
627 | return ret; |
628 | |
629 | if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { |
630 | dev_err(&cqspi->pdev->dev, |
631 | "Invalid input argument, cmdlen %zu txbuf 0x%p\n" , |
632 | n_tx, txbuf); |
633 | return -EINVAL; |
634 | } |
635 | |
636 | reg = cqspi_calc_rdreg(op); |
637 | writel(val: reg, addr: reg_base + CQSPI_REG_RD_INSTR); |
638 | |
639 | if (op->cmd.dtr) |
640 | opcode = op->cmd.opcode >> 8; |
641 | else |
642 | opcode = op->cmd.opcode; |
643 | |
644 | reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; |
645 | |
646 | if (op->addr.nbytes) { |
647 | reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); |
648 | reg |= ((op->addr.nbytes - 1) & |
649 | CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) |
650 | << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; |
651 | |
652 | writel(val: op->addr.val, addr: reg_base + CQSPI_REG_CMDADDRESS); |
653 | } |
654 | |
655 | if (n_tx) { |
656 | reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); |
657 | reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) |
658 | << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; |
659 | data = 0; |
660 | write_len = (n_tx > 4) ? 4 : n_tx; |
661 | memcpy(&data, txbuf, write_len); |
662 | txbuf += write_len; |
663 | writel(val: data, addr: reg_base + CQSPI_REG_CMDWRITEDATALOWER); |
664 | |
665 | if (n_tx > 4) { |
666 | data = 0; |
667 | write_len = n_tx - 4; |
668 | memcpy(&data, txbuf, write_len); |
669 | writel(val: data, addr: reg_base + CQSPI_REG_CMDWRITEDATAUPPER); |
670 | } |
671 | } |
672 | |
673 | ret = cqspi_exec_flash_cmd(cqspi, reg); |
674 | |
675 | /* Reset CMD_CTRL Reg once command write completes */ |
676 | writel(val: 0, addr: reg_base + CQSPI_REG_CMDCTRL); |
677 | |
678 | return ret; |
679 | } |
680 | |
681 | static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, |
682 | const struct spi_mem_op *op) |
683 | { |
684 | struct cqspi_st *cqspi = f_pdata->cqspi; |
685 | void __iomem *reg_base = cqspi->iobase; |
686 | unsigned int dummy_clk = 0; |
687 | unsigned int reg; |
688 | int ret; |
689 | u8 opcode; |
690 | |
691 | ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB); |
692 | if (ret) |
693 | return ret; |
694 | |
695 | if (op->cmd.dtr) |
696 | opcode = op->cmd.opcode >> 8; |
697 | else |
698 | opcode = op->cmd.opcode; |
699 | |
700 | reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; |
701 | reg |= cqspi_calc_rdreg(op); |
702 | |
703 | /* Setup dummy clock cycles */ |
704 | dummy_clk = cqspi_calc_dummy(op); |
705 | |
706 | if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) |
707 | return -EOPNOTSUPP; |
708 | |
709 | if (dummy_clk) |
710 | reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) |
711 | << CQSPI_REG_RD_INSTR_DUMMY_LSB; |
712 | |
713 | writel(val: reg, addr: reg_base + CQSPI_REG_RD_INSTR); |
714 | |
715 | /* Set address width */ |
716 | reg = readl(addr: reg_base + CQSPI_REG_SIZE); |
717 | reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; |
718 | reg |= (op->addr.nbytes - 1); |
719 | writel(val: reg, addr: reg_base + CQSPI_REG_SIZE); |
720 | return 0; |
721 | } |
722 | |
723 | static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, |
724 | u8 *rxbuf, loff_t from_addr, |
725 | const size_t n_rx) |
726 | { |
727 | struct cqspi_st *cqspi = f_pdata->cqspi; |
728 | bool use_irq = !(cqspi->ddata && cqspi->ddata->quirks & CQSPI_RD_NO_IRQ); |
729 | struct device *dev = &cqspi->pdev->dev; |
730 | void __iomem *reg_base = cqspi->iobase; |
731 | void __iomem *ahb_base = cqspi->ahb_base; |
732 | unsigned int remaining = n_rx; |
733 | unsigned int mod_bytes = n_rx % 4; |
734 | unsigned int bytes_to_read = 0; |
735 | u8 *rxbuf_end = rxbuf + n_rx; |
736 | int ret = 0; |
737 | |
738 | writel(val: from_addr, addr: reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); |
739 | writel(val: remaining, addr: reg_base + CQSPI_REG_INDIRECTRDBYTES); |
740 | |
741 | /* Clear all interrupts. */ |
742 | writel(CQSPI_IRQ_STATUS_MASK, addr: reg_base + CQSPI_REG_IRQSTATUS); |
743 | |
744 | /* |
745 | * On SoCFPGA platform reading the SRAM is slow due to |
746 | * hardware limitation and causing read interrupt storm to CPU, |
747 | * so enabling only watermark interrupt to disable all read |
748 | * interrupts later as we want to run "bytes to read" loop with |
749 | * all the read interrupts disabled for max performance. |
750 | */ |
751 | |
752 | if (use_irq && cqspi->slow_sram) |
753 | writel(CQSPI_REG_IRQ_WATERMARK, addr: reg_base + CQSPI_REG_IRQMASK); |
754 | else if (use_irq) |
755 | writel(CQSPI_IRQ_MASK_RD, addr: reg_base + CQSPI_REG_IRQMASK); |
756 | else |
757 | writel(val: 0, addr: reg_base + CQSPI_REG_IRQMASK); |
758 | |
759 | reinit_completion(x: &cqspi->transfer_complete); |
760 | writel(CQSPI_REG_INDIRECTRD_START_MASK, |
761 | addr: reg_base + CQSPI_REG_INDIRECTRD); |
762 | |
763 | while (remaining > 0) { |
764 | if (use_irq && |
765 | !wait_for_completion_timeout(x: &cqspi->transfer_complete, |
766 | timeout: msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) |
767 | ret = -ETIMEDOUT; |
768 | |
769 | /* |
770 | * Disable all read interrupts until |
771 | * we are out of "bytes to read" |
772 | */ |
773 | if (cqspi->slow_sram) |
774 | writel(val: 0x0, addr: reg_base + CQSPI_REG_IRQMASK); |
775 | |
776 | bytes_to_read = cqspi_get_rd_sram_level(cqspi); |
777 | |
778 | if (ret && bytes_to_read == 0) { |
779 | dev_err(dev, "Indirect read timeout, no bytes\n" ); |
780 | goto failrd; |
781 | } |
782 | |
783 | while (bytes_to_read != 0) { |
784 | unsigned int word_remain = round_down(remaining, 4); |
785 | |
786 | bytes_to_read *= cqspi->fifo_width; |
787 | bytes_to_read = bytes_to_read > remaining ? |
788 | remaining : bytes_to_read; |
789 | bytes_to_read = round_down(bytes_to_read, 4); |
790 | /* Read 4 byte word chunks then single bytes */ |
791 | if (bytes_to_read) { |
792 | ioread32_rep(port: ahb_base, buf: rxbuf, |
793 | count: (bytes_to_read / 4)); |
794 | } else if (!word_remain && mod_bytes) { |
795 | unsigned int temp = ioread32(ahb_base); |
796 | |
797 | bytes_to_read = mod_bytes; |
798 | memcpy(rxbuf, &temp, min((unsigned int) |
799 | (rxbuf_end - rxbuf), |
800 | bytes_to_read)); |
801 | } |
802 | rxbuf += bytes_to_read; |
803 | remaining -= bytes_to_read; |
804 | bytes_to_read = cqspi_get_rd_sram_level(cqspi); |
805 | } |
806 | |
807 | if (use_irq && remaining > 0) { |
808 | reinit_completion(x: &cqspi->transfer_complete); |
809 | if (cqspi->slow_sram) |
810 | writel(CQSPI_REG_IRQ_WATERMARK, addr: reg_base + CQSPI_REG_IRQMASK); |
811 | } |
812 | } |
813 | |
814 | /* Check indirect done status */ |
815 | ret = cqspi_wait_for_bit(ddata: cqspi->ddata, reg: reg_base + CQSPI_REG_INDIRECTRD, |
816 | CQSPI_REG_INDIRECTRD_DONE_MASK, clr: 0, busywait: true); |
817 | if (ret) { |
818 | dev_err(dev, "Indirect read completion error (%i)\n" , ret); |
819 | goto failrd; |
820 | } |
821 | |
822 | /* Disable interrupt */ |
823 | writel(val: 0, addr: reg_base + CQSPI_REG_IRQMASK); |
824 | |
825 | /* Clear indirect completion status */ |
826 | writel(CQSPI_REG_INDIRECTRD_DONE_MASK, addr: reg_base + CQSPI_REG_INDIRECTRD); |
827 | |
828 | return 0; |
829 | |
830 | failrd: |
831 | /* Disable interrupt */ |
832 | writel(val: 0, addr: reg_base + CQSPI_REG_IRQMASK); |
833 | |
834 | /* Cancel the indirect read */ |
835 | writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK, |
836 | addr: reg_base + CQSPI_REG_INDIRECTRD); |
837 | return ret; |
838 | } |
839 | |
840 | static void cqspi_device_reset(struct cqspi_st *cqspi) |
841 | { |
842 | u32 reg; |
843 | |
844 | reg = readl(addr: cqspi->iobase + CQSPI_REG_CONFIG); |
845 | reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK; |
846 | writel(val: reg, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
847 | /* |
848 | * NOTE: Delay timing implementation is derived from |
849 | * spi_nor_hw_reset() |
850 | */ |
851 | writel(val: reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
852 | usleep_range(min: 1, max: 5); |
853 | writel(val: reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
854 | usleep_range(min: 100, max: 150); |
855 | writel(val: reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
856 | usleep_range(min: 1000, max: 1200); |
857 | } |
858 | |
859 | static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) |
860 | { |
861 | void __iomem *reg_base = cqspi->iobase; |
862 | unsigned int reg; |
863 | |
864 | reg = readl(addr: reg_base + CQSPI_REG_CONFIG); |
865 | |
866 | if (enable) |
867 | reg |= CQSPI_REG_CONFIG_ENABLE_MASK; |
868 | else |
869 | reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; |
870 | |
871 | writel(val: reg, addr: reg_base + CQSPI_REG_CONFIG); |
872 | } |
873 | |
874 | static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata, |
875 | u_char *rxbuf, loff_t from_addr, |
876 | size_t n_rx) |
877 | { |
878 | struct cqspi_st *cqspi = f_pdata->cqspi; |
879 | struct device *dev = &cqspi->pdev->dev; |
880 | void __iomem *reg_base = cqspi->iobase; |
881 | u32 reg, bytes_to_dma; |
882 | loff_t addr = from_addr; |
883 | void *buf = rxbuf; |
884 | dma_addr_t dma_addr; |
885 | u8 bytes_rem; |
886 | int ret = 0; |
887 | |
888 | bytes_rem = n_rx % 4; |
889 | bytes_to_dma = (n_rx - bytes_rem); |
890 | |
891 | if (!bytes_to_dma) |
892 | goto nondmard; |
893 | |
894 | ret = zynqmp_pm_ospi_mux_select(dev_id: cqspi->pd_dev_id, select: PM_OSPI_MUX_SEL_DMA); |
895 | if (ret) |
896 | return ret; |
897 | |
898 | cqspi_controller_enable(cqspi, enable: 0); |
899 | |
900 | reg = readl(addr: cqspi->iobase + CQSPI_REG_CONFIG); |
901 | reg |= CQSPI_REG_CONFIG_DMA_MASK; |
902 | writel(val: reg, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
903 | |
904 | cqspi_controller_enable(cqspi, enable: 1); |
905 | |
906 | dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE); |
907 | if (dma_mapping_error(dev, dma_addr)) { |
908 | dev_err(dev, "dma mapping failed\n" ); |
909 | return -ENOMEM; |
910 | } |
911 | |
912 | writel(val: from_addr, addr: reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); |
913 | writel(val: bytes_to_dma, addr: reg_base + CQSPI_REG_INDIRECTRDBYTES); |
914 | writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL, |
915 | addr: reg_base + CQSPI_REG_INDTRIG_ADDRRANGE); |
916 | |
917 | /* Clear all interrupts. */ |
918 | writel(CQSPI_IRQ_STATUS_MASK, addr: reg_base + CQSPI_REG_IRQSTATUS); |
919 | |
920 | /* Enable DMA done interrupt */ |
921 | writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK, |
922 | addr: reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN); |
923 | |
924 | /* Default DMA periph configuration */ |
925 | writel(CQSPI_REG_VERSAL_DMA_VAL, addr: reg_base + CQSPI_REG_DMA); |
926 | |
927 | /* Configure DMA Dst address */ |
928 | writel(lower_32_bits(dma_addr), |
929 | addr: reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR); |
930 | writel(upper_32_bits(dma_addr), |
931 | addr: reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB); |
932 | |
933 | /* Configure DMA Src address */ |
934 | writel(val: cqspi->trigger_address, addr: reg_base + |
935 | CQSPI_REG_VERSAL_DMA_SRC_ADDR); |
936 | |
937 | /* Set DMA destination size */ |
938 | writel(val: bytes_to_dma, addr: reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE); |
939 | |
940 | /* Set DMA destination control */ |
941 | writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL, |
942 | addr: reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL); |
943 | |
944 | writel(CQSPI_REG_INDIRECTRD_START_MASK, |
945 | addr: reg_base + CQSPI_REG_INDIRECTRD); |
946 | |
947 | reinit_completion(x: &cqspi->transfer_complete); |
948 | |
949 | if (!wait_for_completion_timeout(x: &cqspi->transfer_complete, |
950 | timeout: msecs_to_jiffies(max_t(size_t, bytes_to_dma, 500)))) { |
951 | ret = -ETIMEDOUT; |
952 | goto failrd; |
953 | } |
954 | |
955 | /* Disable DMA interrupt */ |
956 | writel(val: 0x0, addr: cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS); |
957 | |
958 | /* Clear indirect completion status */ |
959 | writel(CQSPI_REG_INDIRECTRD_DONE_MASK, |
960 | addr: cqspi->iobase + CQSPI_REG_INDIRECTRD); |
961 | dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); |
962 | |
963 | cqspi_controller_enable(cqspi, enable: 0); |
964 | |
965 | reg = readl(addr: cqspi->iobase + CQSPI_REG_CONFIG); |
966 | reg &= ~CQSPI_REG_CONFIG_DMA_MASK; |
967 | writel(val: reg, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
968 | |
969 | cqspi_controller_enable(cqspi, enable: 1); |
970 | |
971 | ret = zynqmp_pm_ospi_mux_select(dev_id: cqspi->pd_dev_id, |
972 | select: PM_OSPI_MUX_SEL_LINEAR); |
973 | if (ret) |
974 | return ret; |
975 | |
976 | nondmard: |
977 | if (bytes_rem) { |
978 | addr += bytes_to_dma; |
979 | buf += bytes_to_dma; |
980 | ret = cqspi_indirect_read_execute(f_pdata, rxbuf: buf, from_addr: addr, |
981 | n_rx: bytes_rem); |
982 | if (ret) |
983 | return ret; |
984 | } |
985 | |
986 | return 0; |
987 | |
988 | failrd: |
989 | /* Disable DMA interrupt */ |
990 | writel(val: 0x0, addr: reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS); |
991 | |
992 | /* Cancel the indirect read */ |
993 | writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, |
994 | addr: reg_base + CQSPI_REG_INDIRECTRD); |
995 | |
996 | dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); |
997 | |
998 | reg = readl(addr: cqspi->iobase + CQSPI_REG_CONFIG); |
999 | reg &= ~CQSPI_REG_CONFIG_DMA_MASK; |
1000 | writel(val: reg, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
1001 | |
1002 | zynqmp_pm_ospi_mux_select(dev_id: cqspi->pd_dev_id, select: PM_OSPI_MUX_SEL_LINEAR); |
1003 | |
1004 | return ret; |
1005 | } |
1006 | |
1007 | static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, |
1008 | const struct spi_mem_op *op) |
1009 | { |
1010 | unsigned int reg; |
1011 | int ret; |
1012 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1013 | void __iomem *reg_base = cqspi->iobase; |
1014 | u8 opcode; |
1015 | |
1016 | ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB); |
1017 | if (ret) |
1018 | return ret; |
1019 | |
1020 | if (op->cmd.dtr) |
1021 | opcode = op->cmd.opcode >> 8; |
1022 | else |
1023 | opcode = op->cmd.opcode; |
1024 | |
1025 | /* Set opcode. */ |
1026 | reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; |
1027 | reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB; |
1028 | reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB; |
1029 | writel(val: reg, addr: reg_base + CQSPI_REG_WR_INSTR); |
1030 | reg = cqspi_calc_rdreg(op); |
1031 | writel(val: reg, addr: reg_base + CQSPI_REG_RD_INSTR); |
1032 | |
1033 | /* |
1034 | * SPI NAND flashes require the address of the status register to be |
1035 | * passed in the Read SR command. Also, some SPI NOR flashes like the |
1036 | * cypress Semper flash expect a 4-byte dummy address in the Read SR |
1037 | * command in DTR mode. |
1038 | * |
1039 | * But this controller does not support address phase in the Read SR |
1040 | * command when doing auto-HW polling. So, disable write completion |
1041 | * polling on the controller's side. spinand and spi-nor will take |
1042 | * care of polling the status register. |
1043 | */ |
1044 | if (cqspi->wr_completion) { |
1045 | reg = readl(addr: reg_base + CQSPI_REG_WR_COMPLETION_CTRL); |
1046 | reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; |
1047 | writel(val: reg, addr: reg_base + CQSPI_REG_WR_COMPLETION_CTRL); |
1048 | /* |
1049 | * DAC mode require auto polling as flash needs to be polled |
1050 | * for write completion in case of bubble in SPI transaction |
1051 | * due to slow CPU/DMA master. |
1052 | */ |
1053 | cqspi->use_direct_mode_wr = false; |
1054 | } |
1055 | |
1056 | reg = readl(addr: reg_base + CQSPI_REG_SIZE); |
1057 | reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; |
1058 | reg |= (op->addr.nbytes - 1); |
1059 | writel(val: reg, addr: reg_base + CQSPI_REG_SIZE); |
1060 | return 0; |
1061 | } |
1062 | |
1063 | static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, |
1064 | loff_t to_addr, const u8 *txbuf, |
1065 | const size_t n_tx) |
1066 | { |
1067 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1068 | struct device *dev = &cqspi->pdev->dev; |
1069 | void __iomem *reg_base = cqspi->iobase; |
1070 | unsigned int remaining = n_tx; |
1071 | unsigned int write_bytes; |
1072 | int ret; |
1073 | |
1074 | writel(val: to_addr, addr: reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); |
1075 | writel(val: remaining, addr: reg_base + CQSPI_REG_INDIRECTWRBYTES); |
1076 | |
1077 | /* Clear all interrupts. */ |
1078 | writel(CQSPI_IRQ_STATUS_MASK, addr: reg_base + CQSPI_REG_IRQSTATUS); |
1079 | |
1080 | writel(CQSPI_IRQ_MASK_WR, addr: reg_base + CQSPI_REG_IRQMASK); |
1081 | |
1082 | reinit_completion(x: &cqspi->transfer_complete); |
1083 | writel(CQSPI_REG_INDIRECTWR_START_MASK, |
1084 | addr: reg_base + CQSPI_REG_INDIRECTWR); |
1085 | /* |
1086 | * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access |
1087 | * Controller programming sequence, couple of cycles of |
1088 | * QSPI_REF_CLK delay is required for the above bit to |
1089 | * be internally synchronized by the QSPI module. Provide 5 |
1090 | * cycles of delay. |
1091 | */ |
1092 | if (cqspi->wr_delay) |
1093 | ndelay(cqspi->wr_delay); |
1094 | |
1095 | /* |
1096 | * If a hazard exists between the APB and AHB interfaces, perform a |
1097 | * dummy readback from the controller to ensure synchronization. |
1098 | */ |
1099 | if (cqspi->apb_ahb_hazard) |
1100 | readl(addr: reg_base + CQSPI_REG_INDIRECTWR); |
1101 | |
1102 | while (remaining > 0) { |
1103 | size_t write_words, mod_bytes; |
1104 | |
1105 | write_bytes = remaining; |
1106 | write_words = write_bytes / 4; |
1107 | mod_bytes = write_bytes % 4; |
1108 | /* Write 4 bytes at a time then single bytes. */ |
1109 | if (write_words) { |
1110 | iowrite32_rep(port: cqspi->ahb_base, buf: txbuf, count: write_words); |
1111 | txbuf += (write_words * 4); |
1112 | } |
1113 | if (mod_bytes) { |
1114 | unsigned int temp = 0xFFFFFFFF; |
1115 | |
1116 | memcpy(&temp, txbuf, mod_bytes); |
1117 | iowrite32(temp, cqspi->ahb_base); |
1118 | txbuf += mod_bytes; |
1119 | } |
1120 | |
1121 | if (!wait_for_completion_timeout(x: &cqspi->transfer_complete, |
1122 | timeout: msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { |
1123 | dev_err(dev, "Indirect write timeout\n" ); |
1124 | ret = -ETIMEDOUT; |
1125 | goto failwr; |
1126 | } |
1127 | |
1128 | remaining -= write_bytes; |
1129 | |
1130 | if (remaining > 0) |
1131 | reinit_completion(x: &cqspi->transfer_complete); |
1132 | } |
1133 | |
1134 | /* Check indirect done status */ |
1135 | ret = cqspi_wait_for_bit(ddata: cqspi->ddata, reg: reg_base + CQSPI_REG_INDIRECTWR, |
1136 | CQSPI_REG_INDIRECTWR_DONE_MASK, clr: 0, busywait: false); |
1137 | if (ret) { |
1138 | dev_err(dev, "Indirect write completion error (%i)\n" , ret); |
1139 | goto failwr; |
1140 | } |
1141 | |
1142 | /* Disable interrupt. */ |
1143 | writel(val: 0, addr: reg_base + CQSPI_REG_IRQMASK); |
1144 | |
1145 | /* Clear indirect completion status */ |
1146 | writel(CQSPI_REG_INDIRECTWR_DONE_MASK, addr: reg_base + CQSPI_REG_INDIRECTWR); |
1147 | |
1148 | cqspi_wait_idle(cqspi); |
1149 | |
1150 | return 0; |
1151 | |
1152 | failwr: |
1153 | /* Disable interrupt. */ |
1154 | writel(val: 0, addr: reg_base + CQSPI_REG_IRQMASK); |
1155 | |
1156 | /* Cancel the indirect write */ |
1157 | writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, |
1158 | addr: reg_base + CQSPI_REG_INDIRECTWR); |
1159 | return ret; |
1160 | } |
1161 | |
1162 | static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata) |
1163 | { |
1164 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1165 | void __iomem *reg_base = cqspi->iobase; |
1166 | unsigned int chip_select = f_pdata->cs; |
1167 | unsigned int reg; |
1168 | |
1169 | reg = readl(addr: reg_base + CQSPI_REG_CONFIG); |
1170 | if (cqspi->is_decoded_cs) { |
1171 | reg |= CQSPI_REG_CONFIG_DECODE_MASK; |
1172 | } else { |
1173 | reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; |
1174 | |
1175 | /* Convert CS if without decoder. |
1176 | * CS0 to 4b'1110 |
1177 | * CS1 to 4b'1101 |
1178 | * CS2 to 4b'1011 |
1179 | * CS3 to 4b'0111 |
1180 | */ |
1181 | chip_select = 0xF & ~(1 << chip_select); |
1182 | } |
1183 | |
1184 | reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK |
1185 | << CQSPI_REG_CONFIG_CHIPSELECT_LSB); |
1186 | reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) |
1187 | << CQSPI_REG_CONFIG_CHIPSELECT_LSB; |
1188 | writel(val: reg, addr: reg_base + CQSPI_REG_CONFIG); |
1189 | } |
1190 | |
1191 | static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, |
1192 | const unsigned int ns_val) |
1193 | { |
1194 | unsigned int ticks; |
1195 | |
1196 | ticks = ref_clk_hz / 1000; /* kHz */ |
1197 | ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); |
1198 | |
1199 | return ticks; |
1200 | } |
1201 | |
1202 | static void cqspi_delay(struct cqspi_flash_pdata *f_pdata) |
1203 | { |
1204 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1205 | void __iomem *iobase = cqspi->iobase; |
1206 | const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; |
1207 | unsigned int tshsl, tchsh, tslch, tsd2d; |
1208 | unsigned int reg; |
1209 | unsigned int tsclk; |
1210 | |
1211 | /* calculate the number of ref ticks for one sclk tick */ |
1212 | tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); |
1213 | |
1214 | tshsl = calculate_ticks_for_ns(ref_clk_hz, ns_val: f_pdata->tshsl_ns); |
1215 | /* this particular value must be at least one sclk */ |
1216 | if (tshsl < tsclk) |
1217 | tshsl = tsclk; |
1218 | |
1219 | tchsh = calculate_ticks_for_ns(ref_clk_hz, ns_val: f_pdata->tchsh_ns); |
1220 | tslch = calculate_ticks_for_ns(ref_clk_hz, ns_val: f_pdata->tslch_ns); |
1221 | tsd2d = calculate_ticks_for_ns(ref_clk_hz, ns_val: f_pdata->tsd2d_ns); |
1222 | |
1223 | reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) |
1224 | << CQSPI_REG_DELAY_TSHSL_LSB; |
1225 | reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) |
1226 | << CQSPI_REG_DELAY_TCHSH_LSB; |
1227 | reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) |
1228 | << CQSPI_REG_DELAY_TSLCH_LSB; |
1229 | reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) |
1230 | << CQSPI_REG_DELAY_TSD2D_LSB; |
1231 | writel(val: reg, addr: iobase + CQSPI_REG_DELAY); |
1232 | } |
1233 | |
1234 | static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) |
1235 | { |
1236 | const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; |
1237 | void __iomem *reg_base = cqspi->iobase; |
1238 | u32 reg, div; |
1239 | |
1240 | /* Recalculate the baudrate divisor based on QSPI specification. */ |
1241 | div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; |
1242 | |
1243 | /* Maximum baud divisor */ |
1244 | if (div > CQSPI_REG_CONFIG_BAUD_MASK) { |
1245 | div = CQSPI_REG_CONFIG_BAUD_MASK; |
1246 | dev_warn(&cqspi->pdev->dev, |
1247 | "Unable to adjust clock <= %d hz. Reduced to %d hz\n" , |
1248 | cqspi->sclk, ref_clk_hz/((div+1)*2)); |
1249 | } |
1250 | |
1251 | reg = readl(addr: reg_base + CQSPI_REG_CONFIG); |
1252 | reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); |
1253 | reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; |
1254 | writel(val: reg, addr: reg_base + CQSPI_REG_CONFIG); |
1255 | } |
1256 | |
1257 | static void cqspi_readdata_capture(struct cqspi_st *cqspi, |
1258 | const bool bypass, |
1259 | const unsigned int delay) |
1260 | { |
1261 | void __iomem *reg_base = cqspi->iobase; |
1262 | unsigned int reg; |
1263 | |
1264 | reg = readl(addr: reg_base + CQSPI_REG_READCAPTURE); |
1265 | |
1266 | if (bypass) |
1267 | reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); |
1268 | else |
1269 | reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); |
1270 | |
1271 | reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK |
1272 | << CQSPI_REG_READCAPTURE_DELAY_LSB); |
1273 | |
1274 | reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) |
1275 | << CQSPI_REG_READCAPTURE_DELAY_LSB; |
1276 | |
1277 | writel(val: reg, addr: reg_base + CQSPI_REG_READCAPTURE); |
1278 | } |
1279 | |
1280 | static void cqspi_configure(struct cqspi_flash_pdata *f_pdata, |
1281 | unsigned long sclk) |
1282 | { |
1283 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1284 | int switch_cs = (cqspi->current_cs != f_pdata->cs); |
1285 | int switch_ck = (cqspi->sclk != sclk); |
1286 | |
1287 | if (switch_cs || switch_ck) |
1288 | cqspi_controller_enable(cqspi, enable: 0); |
1289 | |
1290 | /* Switch chip select. */ |
1291 | if (switch_cs) { |
1292 | cqspi->current_cs = f_pdata->cs; |
1293 | cqspi_chipselect(f_pdata); |
1294 | } |
1295 | |
1296 | /* Setup baudrate divisor and delays */ |
1297 | if (switch_ck) { |
1298 | cqspi->sclk = sclk; |
1299 | cqspi_config_baudrate_div(cqspi); |
1300 | cqspi_delay(f_pdata); |
1301 | cqspi_readdata_capture(cqspi, bypass: !cqspi->rclk_en, |
1302 | delay: f_pdata->read_delay); |
1303 | } |
1304 | |
1305 | if (switch_cs || switch_ck) |
1306 | cqspi_controller_enable(cqspi, enable: 1); |
1307 | } |
1308 | |
1309 | static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata, |
1310 | const struct spi_mem_op *op) |
1311 | { |
1312 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1313 | loff_t to = op->addr.val; |
1314 | size_t len = op->data.nbytes; |
1315 | const u_char *buf = op->data.buf.out; |
1316 | int ret; |
1317 | |
1318 | ret = cqspi_write_setup(f_pdata, op); |
1319 | if (ret) |
1320 | return ret; |
1321 | |
1322 | /* |
1323 | * Some flashes like the Cypress Semper flash expect a dummy 4-byte |
1324 | * address (all 0s) with the read status register command in DTR mode. |
1325 | * But this controller does not support sending dummy address bytes to |
1326 | * the flash when it is polling the write completion register in DTR |
1327 | * mode. So, we can not use direct mode when in DTR mode for writing |
1328 | * data. |
1329 | */ |
1330 | if (!op->cmd.dtr && cqspi->use_direct_mode && |
1331 | cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) { |
1332 | memcpy_toio(cqspi->ahb_base + to, buf, len); |
1333 | return cqspi_wait_idle(cqspi); |
1334 | } |
1335 | |
1336 | return cqspi_indirect_write_execute(f_pdata, to_addr: to, txbuf: buf, n_tx: len); |
1337 | } |
1338 | |
1339 | static void cqspi_rx_dma_callback(void *param) |
1340 | { |
1341 | struct cqspi_st *cqspi = param; |
1342 | |
1343 | complete(&cqspi->rx_dma_complete); |
1344 | } |
1345 | |
1346 | static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, |
1347 | u_char *buf, loff_t from, size_t len) |
1348 | { |
1349 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1350 | struct device *dev = &cqspi->pdev->dev; |
1351 | enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
1352 | dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; |
1353 | int ret = 0; |
1354 | struct dma_async_tx_descriptor *tx; |
1355 | dma_cookie_t cookie; |
1356 | dma_addr_t dma_dst; |
1357 | struct device *ddev; |
1358 | |
1359 | if (!cqspi->rx_chan || !virt_addr_valid(buf)) { |
1360 | memcpy_fromio(buf, cqspi->ahb_base + from, len); |
1361 | return 0; |
1362 | } |
1363 | |
1364 | ddev = cqspi->rx_chan->device->dev; |
1365 | dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE); |
1366 | if (dma_mapping_error(dev: ddev, dma_addr: dma_dst)) { |
1367 | dev_err(dev, "dma mapping failed\n" ); |
1368 | return -ENOMEM; |
1369 | } |
1370 | tx = dmaengine_prep_dma_memcpy(chan: cqspi->rx_chan, dest: dma_dst, src: dma_src, |
1371 | len, flags); |
1372 | if (!tx) { |
1373 | dev_err(dev, "device_prep_dma_memcpy error\n" ); |
1374 | ret = -EIO; |
1375 | goto err_unmap; |
1376 | } |
1377 | |
1378 | tx->callback = cqspi_rx_dma_callback; |
1379 | tx->callback_param = cqspi; |
1380 | cookie = tx->tx_submit(tx); |
1381 | reinit_completion(x: &cqspi->rx_dma_complete); |
1382 | |
1383 | ret = dma_submit_error(cookie); |
1384 | if (ret) { |
1385 | dev_err(dev, "dma_submit_error %d\n" , cookie); |
1386 | ret = -EIO; |
1387 | goto err_unmap; |
1388 | } |
1389 | |
1390 | dma_async_issue_pending(chan: cqspi->rx_chan); |
1391 | if (!wait_for_completion_timeout(x: &cqspi->rx_dma_complete, |
1392 | timeout: msecs_to_jiffies(max_t(size_t, len, 500)))) { |
1393 | dmaengine_terminate_sync(chan: cqspi->rx_chan); |
1394 | dev_err(dev, "DMA wait_for_completion_timeout\n" ); |
1395 | ret = -ETIMEDOUT; |
1396 | goto err_unmap; |
1397 | } |
1398 | |
1399 | err_unmap: |
1400 | dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE); |
1401 | |
1402 | return ret; |
1403 | } |
1404 | |
1405 | static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, |
1406 | const struct spi_mem_op *op) |
1407 | { |
1408 | struct cqspi_st *cqspi = f_pdata->cqspi; |
1409 | const struct cqspi_driver_platdata *ddata = cqspi->ddata; |
1410 | loff_t from = op->addr.val; |
1411 | size_t len = op->data.nbytes; |
1412 | u_char *buf = op->data.buf.in; |
1413 | u64 dma_align = (u64)(uintptr_t)buf; |
1414 | int ret; |
1415 | |
1416 | ret = cqspi_read_setup(f_pdata, op); |
1417 | if (ret) |
1418 | return ret; |
1419 | |
1420 | if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) |
1421 | return cqspi_direct_read_execute(f_pdata, buf, from, len); |
1422 | |
1423 | if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma && |
1424 | virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0)) |
1425 | return ddata->indirect_read_dma(f_pdata, buf, from, len); |
1426 | |
1427 | return cqspi_indirect_read_execute(f_pdata, rxbuf: buf, from_addr: from, n_rx: len); |
1428 | } |
1429 | |
1430 | static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) |
1431 | { |
1432 | struct cqspi_st *cqspi = spi_controller_get_devdata(ctlr: mem->spi->controller); |
1433 | struct cqspi_flash_pdata *f_pdata; |
1434 | |
1435 | f_pdata = &cqspi->f_pdata[spi_get_chipselect(spi: mem->spi, idx: 0)]; |
1436 | cqspi_configure(f_pdata, sclk: op->max_freq); |
1437 | |
1438 | if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { |
1439 | /* |
1440 | * Performing reads in DAC mode forces to read minimum 4 bytes |
1441 | * which is unsupported on some flash devices during register |
1442 | * reads, prefer STIG mode for such small reads. |
1443 | */ |
1444 | if (!op->addr.nbytes || |
1445 | (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX && |
1446 | !cqspi->disable_stig_mode)) |
1447 | return cqspi_command_read(f_pdata, op); |
1448 | |
1449 | return cqspi_read(f_pdata, op); |
1450 | } |
1451 | |
1452 | if (!op->addr.nbytes || !op->data.buf.out) |
1453 | return cqspi_command_write(f_pdata, op); |
1454 | |
1455 | return cqspi_write(f_pdata, op); |
1456 | } |
1457 | |
1458 | static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) |
1459 | { |
1460 | int ret; |
1461 | struct cqspi_st *cqspi = spi_controller_get_devdata(ctlr: mem->spi->controller); |
1462 | struct device *dev = &cqspi->pdev->dev; |
1463 | |
1464 | ret = pm_runtime_resume_and_get(dev); |
1465 | if (ret) { |
1466 | dev_err(&mem->spi->dev, "resume failed with %d\n" , ret); |
1467 | return ret; |
1468 | } |
1469 | |
1470 | ret = cqspi_mem_process(mem, op); |
1471 | |
1472 | pm_runtime_mark_last_busy(dev); |
1473 | pm_runtime_put_autosuspend(dev); |
1474 | |
1475 | if (ret) |
1476 | dev_err(&mem->spi->dev, "operation failed with %d\n" , ret); |
1477 | |
1478 | return ret; |
1479 | } |
1480 | |
1481 | static bool cqspi_supports_mem_op(struct spi_mem *mem, |
1482 | const struct spi_mem_op *op) |
1483 | { |
1484 | bool all_true, all_false; |
1485 | |
1486 | /* |
1487 | * op->dummy.dtr is required for converting nbytes into ncycles. |
1488 | * Also, don't check the dtr field of the op phase having zero nbytes. |
1489 | */ |
1490 | all_true = op->cmd.dtr && |
1491 | (!op->addr.nbytes || op->addr.dtr) && |
1492 | (!op->dummy.nbytes || op->dummy.dtr) && |
1493 | (!op->data.nbytes || op->data.dtr); |
1494 | |
1495 | all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && |
1496 | !op->data.dtr; |
1497 | |
1498 | if (all_true) { |
1499 | /* Right now we only support 8-8-8 DTR mode. */ |
1500 | if (op->cmd.nbytes && op->cmd.buswidth != 8) |
1501 | return false; |
1502 | if (op->addr.nbytes && op->addr.buswidth != 8) |
1503 | return false; |
1504 | if (op->data.nbytes && op->data.buswidth != 8) |
1505 | return false; |
1506 | } else if (!all_false) { |
1507 | /* Mixed DTR modes are not supported. */ |
1508 | return false; |
1509 | } |
1510 | |
1511 | return spi_mem_default_supports_op(mem, op); |
1512 | } |
1513 | |
1514 | static int cqspi_of_get_flash_pdata(struct platform_device *pdev, |
1515 | struct cqspi_flash_pdata *f_pdata, |
1516 | struct device_node *np) |
1517 | { |
1518 | if (of_property_read_u32(np, propname: "cdns,read-delay" , out_value: &f_pdata->read_delay)) { |
1519 | dev_err(&pdev->dev, "couldn't determine read-delay\n" ); |
1520 | return -ENXIO; |
1521 | } |
1522 | |
1523 | if (of_property_read_u32(np, propname: "cdns,tshsl-ns" , out_value: &f_pdata->tshsl_ns)) { |
1524 | dev_err(&pdev->dev, "couldn't determine tshsl-ns\n" ); |
1525 | return -ENXIO; |
1526 | } |
1527 | |
1528 | if (of_property_read_u32(np, propname: "cdns,tsd2d-ns" , out_value: &f_pdata->tsd2d_ns)) { |
1529 | dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n" ); |
1530 | return -ENXIO; |
1531 | } |
1532 | |
1533 | if (of_property_read_u32(np, propname: "cdns,tchsh-ns" , out_value: &f_pdata->tchsh_ns)) { |
1534 | dev_err(&pdev->dev, "couldn't determine tchsh-ns\n" ); |
1535 | return -ENXIO; |
1536 | } |
1537 | |
1538 | if (of_property_read_u32(np, propname: "cdns,tslch-ns" , out_value: &f_pdata->tslch_ns)) { |
1539 | dev_err(&pdev->dev, "couldn't determine tslch-ns\n" ); |
1540 | return -ENXIO; |
1541 | } |
1542 | |
1543 | if (of_property_read_u32(np, propname: "spi-max-frequency" , out_value: &f_pdata->clk_rate)) { |
1544 | dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n" ); |
1545 | return -ENXIO; |
1546 | } |
1547 | |
1548 | return 0; |
1549 | } |
1550 | |
1551 | static int cqspi_of_get_pdata(struct cqspi_st *cqspi) |
1552 | { |
1553 | struct device *dev = &cqspi->pdev->dev; |
1554 | struct device_node *np = dev->of_node; |
1555 | u32 id[2]; |
1556 | |
1557 | cqspi->is_decoded_cs = of_property_read_bool(np, propname: "cdns,is-decoded-cs" ); |
1558 | |
1559 | if (of_property_read_u32(np, propname: "cdns,fifo-depth" , out_value: &cqspi->fifo_depth)) { |
1560 | /* Zero signals FIFO depth should be runtime detected. */ |
1561 | cqspi->fifo_depth = 0; |
1562 | } |
1563 | |
1564 | if (of_property_read_u32(np, propname: "cdns,fifo-width" , out_value: &cqspi->fifo_width)) { |
1565 | dev_err(dev, "couldn't determine fifo-width\n" ); |
1566 | return -ENXIO; |
1567 | } |
1568 | |
1569 | if (of_property_read_u32(np, propname: "cdns,trigger-address" , |
1570 | out_value: &cqspi->trigger_address)) { |
1571 | dev_err(dev, "couldn't determine trigger-address\n" ); |
1572 | return -ENXIO; |
1573 | } |
1574 | |
1575 | if (of_property_read_u32(np, propname: "num-cs" , out_value: &cqspi->num_chipselect)) |
1576 | cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT; |
1577 | |
1578 | cqspi->rclk_en = of_property_read_bool(np, propname: "cdns,rclk-en" ); |
1579 | |
1580 | if (!of_property_read_u32_array(np, propname: "power-domains" , out_values: id, |
1581 | ARRAY_SIZE(id))) |
1582 | cqspi->pd_dev_id = id[1]; |
1583 | |
1584 | return 0; |
1585 | } |
1586 | |
1587 | static void cqspi_controller_init(struct cqspi_st *cqspi) |
1588 | { |
1589 | u32 reg; |
1590 | |
1591 | /* Configure the remap address register, no remap */ |
1592 | writel(val: 0, addr: cqspi->iobase + CQSPI_REG_REMAP); |
1593 | |
1594 | /* Disable all interrupts. */ |
1595 | writel(val: 0, addr: cqspi->iobase + CQSPI_REG_IRQMASK); |
1596 | |
1597 | /* Configure the SRAM split to 1:1 . */ |
1598 | writel(val: cqspi->fifo_depth / 2, addr: cqspi->iobase + CQSPI_REG_SRAMPARTITION); |
1599 | |
1600 | /* Load indirect trigger address. */ |
1601 | writel(val: cqspi->trigger_address, |
1602 | addr: cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); |
1603 | |
1604 | /* Program read watermark -- 1/2 of the FIFO. */ |
1605 | writel(val: cqspi->fifo_depth * cqspi->fifo_width / 2, |
1606 | addr: cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); |
1607 | /* Program write watermark -- 1/8 of the FIFO. */ |
1608 | writel(val: cqspi->fifo_depth * cqspi->fifo_width / 8, |
1609 | addr: cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); |
1610 | |
1611 | /* Disable direct access controller */ |
1612 | if (!cqspi->use_direct_mode) { |
1613 | reg = readl(addr: cqspi->iobase + CQSPI_REG_CONFIG); |
1614 | reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL; |
1615 | writel(val: reg, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
1616 | } |
1617 | |
1618 | /* Enable DMA interface */ |
1619 | if (cqspi->use_dma_read) { |
1620 | reg = readl(addr: cqspi->iobase + CQSPI_REG_CONFIG); |
1621 | reg |= CQSPI_REG_CONFIG_DMA_MASK; |
1622 | writel(val: reg, addr: cqspi->iobase + CQSPI_REG_CONFIG); |
1623 | } |
1624 | } |
1625 | |
1626 | static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi) |
1627 | { |
1628 | struct device *dev = &cqspi->pdev->dev; |
1629 | u32 reg, fifo_depth; |
1630 | |
1631 | /* |
1632 | * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N |
1633 | * the FIFO depth. |
1634 | */ |
1635 | writel(U32_MAX, addr: cqspi->iobase + CQSPI_REG_SRAMPARTITION); |
1636 | reg = readl(addr: cqspi->iobase + CQSPI_REG_SRAMPARTITION); |
1637 | fifo_depth = reg + 1; |
1638 | |
1639 | /* FIFO depth of zero means no value from devicetree was provided. */ |
1640 | if (cqspi->fifo_depth == 0) { |
1641 | cqspi->fifo_depth = fifo_depth; |
1642 | dev_dbg(dev, "using FIFO depth of %u\n" , fifo_depth); |
1643 | } else if (fifo_depth != cqspi->fifo_depth) { |
1644 | dev_warn(dev, "detected FIFO depth (%u) different from config (%u)\n" , |
1645 | fifo_depth, cqspi->fifo_depth); |
1646 | } |
1647 | } |
1648 | |
1649 | static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) |
1650 | { |
1651 | dma_cap_mask_t mask; |
1652 | |
1653 | dma_cap_zero(mask); |
1654 | dma_cap_set(DMA_MEMCPY, mask); |
1655 | |
1656 | cqspi->rx_chan = dma_request_chan_by_mask(mask: &mask); |
1657 | if (IS_ERR(ptr: cqspi->rx_chan)) { |
1658 | int ret = PTR_ERR(ptr: cqspi->rx_chan); |
1659 | |
1660 | cqspi->rx_chan = NULL; |
1661 | if (ret == -ENODEV) { |
1662 | /* DMA support is not mandatory */ |
1663 | dev_info(&cqspi->pdev->dev, "No Rx DMA available\n" ); |
1664 | return 0; |
1665 | } |
1666 | |
1667 | return dev_err_probe(dev: &cqspi->pdev->dev, err: ret, fmt: "No Rx DMA available\n" ); |
1668 | } |
1669 | init_completion(x: &cqspi->rx_dma_complete); |
1670 | |
1671 | return 0; |
1672 | } |
1673 | |
1674 | static const char *cqspi_get_name(struct spi_mem *mem) |
1675 | { |
1676 | struct cqspi_st *cqspi = spi_controller_get_devdata(ctlr: mem->spi->controller); |
1677 | struct device *dev = &cqspi->pdev->dev; |
1678 | |
1679 | return devm_kasprintf(dev, GFP_KERNEL, fmt: "%s.%d" , dev_name(dev), |
1680 | spi_get_chipselect(spi: mem->spi, idx: 0)); |
1681 | } |
1682 | |
1683 | static const struct spi_controller_mem_ops cqspi_mem_ops = { |
1684 | .exec_op = cqspi_exec_mem_op, |
1685 | .get_name = cqspi_get_name, |
1686 | .supports_op = cqspi_supports_mem_op, |
1687 | }; |
1688 | |
1689 | static const struct spi_controller_mem_caps cqspi_mem_caps = { |
1690 | .dtr = true, |
1691 | .per_op_freq = true, |
1692 | }; |
1693 | |
1694 | static int cqspi_setup_flash(struct cqspi_st *cqspi) |
1695 | { |
1696 | unsigned int max_cs = cqspi->num_chipselect - 1; |
1697 | struct platform_device *pdev = cqspi->pdev; |
1698 | struct device *dev = &pdev->dev; |
1699 | struct cqspi_flash_pdata *f_pdata; |
1700 | unsigned int cs; |
1701 | int ret; |
1702 | |
1703 | /* Get flash device data */ |
1704 | for_each_available_child_of_node_scoped(dev->of_node, np) { |
1705 | ret = of_property_read_u32(np, propname: "reg" , out_value: &cs); |
1706 | if (ret) { |
1707 | dev_err(dev, "Couldn't determine chip select.\n" ); |
1708 | return ret; |
1709 | } |
1710 | |
1711 | if (cs >= cqspi->num_chipselect) { |
1712 | dev_err(dev, "Chip select %d out of range.\n" , cs); |
1713 | return -EINVAL; |
1714 | } else if (cs < max_cs) { |
1715 | max_cs = cs; |
1716 | } |
1717 | |
1718 | f_pdata = &cqspi->f_pdata[cs]; |
1719 | f_pdata->cqspi = cqspi; |
1720 | f_pdata->cs = cs; |
1721 | |
1722 | ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); |
1723 | if (ret) |
1724 | return ret; |
1725 | } |
1726 | |
1727 | cqspi->num_chipselect = max_cs + 1; |
1728 | return 0; |
1729 | } |
1730 | |
1731 | static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi) |
1732 | { |
1733 | static struct clk_bulk_data qspiclk[] = { |
1734 | { .id = "apb" }, |
1735 | { .id = "ahb" }, |
1736 | }; |
1737 | |
1738 | int ret = 0; |
1739 | |
1740 | ret = devm_clk_bulk_get(dev: &pdev->dev, ARRAY_SIZE(qspiclk), clks: qspiclk); |
1741 | if (ret) { |
1742 | dev_err(&pdev->dev, "%s: failed to get qspi clocks\n" , __func__); |
1743 | return ret; |
1744 | } |
1745 | |
1746 | cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk; |
1747 | cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk; |
1748 | |
1749 | ret = clk_prepare_enable(clk: cqspi->clks[CLK_QSPI_APB]); |
1750 | if (ret) { |
1751 | dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n" , __func__); |
1752 | return ret; |
1753 | } |
1754 | |
1755 | ret = clk_prepare_enable(clk: cqspi->clks[CLK_QSPI_AHB]); |
1756 | if (ret) { |
1757 | dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n" , __func__); |
1758 | goto disable_apb_clk; |
1759 | } |
1760 | |
1761 | cqspi->is_jh7110 = true; |
1762 | |
1763 | return 0; |
1764 | |
1765 | disable_apb_clk: |
1766 | clk_disable_unprepare(clk: cqspi->clks[CLK_QSPI_APB]); |
1767 | |
1768 | return ret; |
1769 | } |
1770 | |
1771 | static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi) |
1772 | { |
1773 | clk_disable_unprepare(clk: cqspi->clks[CLK_QSPI_AHB]); |
1774 | clk_disable_unprepare(clk: cqspi->clks[CLK_QSPI_APB]); |
1775 | } |
1776 | static int cqspi_probe(struct platform_device *pdev) |
1777 | { |
1778 | const struct cqspi_driver_platdata *ddata; |
1779 | struct reset_control *rstc, *rstc_ocp, *rstc_ref; |
1780 | struct device *dev = &pdev->dev; |
1781 | struct spi_controller *host; |
1782 | struct resource *res_ahb; |
1783 | struct cqspi_st *cqspi; |
1784 | int ret; |
1785 | int irq; |
1786 | |
1787 | host = devm_spi_alloc_host(dev: &pdev->dev, size: sizeof(*cqspi)); |
1788 | if (!host) |
1789 | return -ENOMEM; |
1790 | |
1791 | host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL; |
1792 | host->mem_ops = &cqspi_mem_ops; |
1793 | host->mem_caps = &cqspi_mem_caps; |
1794 | host->dev.of_node = pdev->dev.of_node; |
1795 | |
1796 | cqspi = spi_controller_get_devdata(ctlr: host); |
1797 | |
1798 | cqspi->pdev = pdev; |
1799 | cqspi->host = host; |
1800 | cqspi->is_jh7110 = false; |
1801 | cqspi->ddata = ddata = of_device_get_match_data(dev); |
1802 | platform_set_drvdata(pdev, data: cqspi); |
1803 | |
1804 | /* Obtain configuration from OF. */ |
1805 | ret = cqspi_of_get_pdata(cqspi); |
1806 | if (ret) { |
1807 | dev_err(dev, "Cannot get mandatory OF data.\n" ); |
1808 | return -ENODEV; |
1809 | } |
1810 | |
1811 | /* Obtain QSPI clock. */ |
1812 | cqspi->clk = devm_clk_get(dev, NULL); |
1813 | if (IS_ERR(ptr: cqspi->clk)) { |
1814 | dev_err(dev, "Cannot claim QSPI clock.\n" ); |
1815 | ret = PTR_ERR(ptr: cqspi->clk); |
1816 | return ret; |
1817 | } |
1818 | |
1819 | /* Obtain and remap controller address. */ |
1820 | cqspi->iobase = devm_platform_ioremap_resource(pdev, index: 0); |
1821 | if (IS_ERR(ptr: cqspi->iobase)) { |
1822 | dev_err(dev, "Cannot remap controller address.\n" ); |
1823 | ret = PTR_ERR(ptr: cqspi->iobase); |
1824 | return ret; |
1825 | } |
1826 | |
1827 | /* Obtain and remap AHB address. */ |
1828 | cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, index: 1, res: &res_ahb); |
1829 | if (IS_ERR(ptr: cqspi->ahb_base)) { |
1830 | dev_err(dev, "Cannot remap AHB address.\n" ); |
1831 | ret = PTR_ERR(ptr: cqspi->ahb_base); |
1832 | return ret; |
1833 | } |
1834 | cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; |
1835 | cqspi->ahb_size = resource_size(res: res_ahb); |
1836 | |
1837 | init_completion(x: &cqspi->transfer_complete); |
1838 | |
1839 | /* Obtain IRQ line. */ |
1840 | irq = platform_get_irq(pdev, 0); |
1841 | if (irq < 0) |
1842 | return -ENXIO; |
1843 | |
1844 | ret = pm_runtime_set_active(dev); |
1845 | if (ret) |
1846 | return ret; |
1847 | |
1848 | |
1849 | ret = clk_prepare_enable(clk: cqspi->clk); |
1850 | if (ret) { |
1851 | dev_err(dev, "Cannot enable QSPI clock.\n" ); |
1852 | goto probe_clk_failed; |
1853 | } |
1854 | |
1855 | /* Obtain QSPI reset control */ |
1856 | rstc = devm_reset_control_get_optional_exclusive(dev, id: "qspi" ); |
1857 | if (IS_ERR(ptr: rstc)) { |
1858 | ret = PTR_ERR(ptr: rstc); |
1859 | dev_err(dev, "Cannot get QSPI reset.\n" ); |
1860 | goto probe_reset_failed; |
1861 | } |
1862 | |
1863 | rstc_ocp = devm_reset_control_get_optional_exclusive(dev, id: "qspi-ocp" ); |
1864 | if (IS_ERR(ptr: rstc_ocp)) { |
1865 | ret = PTR_ERR(ptr: rstc_ocp); |
1866 | dev_err(dev, "Cannot get QSPI OCP reset.\n" ); |
1867 | goto probe_reset_failed; |
1868 | } |
1869 | |
1870 | if (of_device_is_compatible(device: pdev->dev.of_node, "starfive,jh7110-qspi" )) { |
1871 | rstc_ref = devm_reset_control_get_optional_exclusive(dev, id: "rstc_ref" ); |
1872 | if (IS_ERR(ptr: rstc_ref)) { |
1873 | ret = PTR_ERR(ptr: rstc_ref); |
1874 | dev_err(dev, "Cannot get QSPI REF reset.\n" ); |
1875 | goto probe_reset_failed; |
1876 | } |
1877 | reset_control_assert(rstc: rstc_ref); |
1878 | reset_control_deassert(rstc: rstc_ref); |
1879 | } |
1880 | |
1881 | reset_control_assert(rstc); |
1882 | reset_control_deassert(rstc); |
1883 | |
1884 | reset_control_assert(rstc: rstc_ocp); |
1885 | reset_control_deassert(rstc: rstc_ocp); |
1886 | |
1887 | cqspi->master_ref_clk_hz = clk_get_rate(clk: cqspi->clk); |
1888 | host->max_speed_hz = cqspi->master_ref_clk_hz; |
1889 | |
1890 | /* write completion is supported by default */ |
1891 | cqspi->wr_completion = true; |
1892 | |
1893 | if (ddata) { |
1894 | if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) |
1895 | cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, |
1896 | cqspi->master_ref_clk_hz); |
1897 | if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) |
1898 | host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; |
1899 | if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD) |
1900 | host->mode_bits |= SPI_TX_QUAD; |
1901 | if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) { |
1902 | cqspi->use_direct_mode = true; |
1903 | cqspi->use_direct_mode_wr = true; |
1904 | } |
1905 | if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) |
1906 | cqspi->use_dma_read = true; |
1907 | if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) |
1908 | cqspi->wr_completion = false; |
1909 | if (ddata->quirks & CQSPI_SLOW_SRAM) |
1910 | cqspi->slow_sram = true; |
1911 | if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR) |
1912 | cqspi->apb_ahb_hazard = true; |
1913 | |
1914 | if (ddata->jh7110_clk_init) { |
1915 | ret = cqspi_jh7110_clk_init(pdev, cqspi); |
1916 | if (ret) |
1917 | goto probe_reset_failed; |
1918 | } |
1919 | if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) |
1920 | cqspi->disable_stig_mode = true; |
1921 | |
1922 | if (ddata->quirks & CQSPI_DMA_SET_MASK) { |
1923 | ret = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(64)); |
1924 | if (ret) |
1925 | goto probe_reset_failed; |
1926 | } |
1927 | } |
1928 | |
1929 | ret = devm_request_irq(dev, irq, handler: cqspi_irq_handler, irqflags: 0, |
1930 | devname: pdev->name, dev_id: cqspi); |
1931 | if (ret) { |
1932 | dev_err(dev, "Cannot request IRQ.\n" ); |
1933 | goto probe_reset_failed; |
1934 | } |
1935 | |
1936 | cqspi_wait_idle(cqspi); |
1937 | cqspi_controller_enable(cqspi, enable: 0); |
1938 | cqspi_controller_detect_fifo_depth(cqspi); |
1939 | cqspi_controller_init(cqspi); |
1940 | cqspi_controller_enable(cqspi, enable: 1); |
1941 | cqspi->current_cs = -1; |
1942 | cqspi->sclk = 0; |
1943 | |
1944 | ret = cqspi_setup_flash(cqspi); |
1945 | if (ret) { |
1946 | dev_err(dev, "failed to setup flash parameters %d\n" , ret); |
1947 | goto probe_setup_failed; |
1948 | } |
1949 | |
1950 | host->num_chipselect = cqspi->num_chipselect; |
1951 | |
1952 | if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) |
1953 | cqspi_device_reset(cqspi); |
1954 | |
1955 | if (cqspi->use_direct_mode) { |
1956 | ret = cqspi_request_mmap_dma(cqspi); |
1957 | if (ret == -EPROBE_DEFER) |
1958 | goto probe_setup_failed; |
1959 | } |
1960 | |
1961 | ret = devm_pm_runtime_enable(dev); |
1962 | if (ret) { |
1963 | if (cqspi->rx_chan) |
1964 | dma_release_channel(chan: cqspi->rx_chan); |
1965 | goto probe_setup_failed; |
1966 | } |
1967 | |
1968 | pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); |
1969 | pm_runtime_use_autosuspend(dev); |
1970 | pm_runtime_get_noresume(dev); |
1971 | |
1972 | ret = spi_register_controller(ctlr: host); |
1973 | if (ret) { |
1974 | dev_err(&pdev->dev, "failed to register SPI ctlr %d\n" , ret); |
1975 | goto probe_setup_failed; |
1976 | } |
1977 | |
1978 | pm_runtime_mark_last_busy(dev); |
1979 | pm_runtime_put_autosuspend(dev); |
1980 | |
1981 | return 0; |
1982 | probe_setup_failed: |
1983 | cqspi_controller_enable(cqspi, enable: 0); |
1984 | probe_reset_failed: |
1985 | if (cqspi->is_jh7110) |
1986 | cqspi_jh7110_disable_clk(pdev, cqspi); |
1987 | clk_disable_unprepare(clk: cqspi->clk); |
1988 | probe_clk_failed: |
1989 | return ret; |
1990 | } |
1991 | |
1992 | static void cqspi_remove(struct platform_device *pdev) |
1993 | { |
1994 | struct cqspi_st *cqspi = platform_get_drvdata(pdev); |
1995 | |
1996 | spi_unregister_controller(ctlr: cqspi->host); |
1997 | cqspi_controller_enable(cqspi, enable: 0); |
1998 | |
1999 | if (cqspi->rx_chan) |
2000 | dma_release_channel(chan: cqspi->rx_chan); |
2001 | |
2002 | clk_disable_unprepare(clk: cqspi->clk); |
2003 | |
2004 | if (cqspi->is_jh7110) |
2005 | cqspi_jh7110_disable_clk(pdev, cqspi); |
2006 | |
2007 | pm_runtime_put_sync(dev: &pdev->dev); |
2008 | pm_runtime_disable(dev: &pdev->dev); |
2009 | } |
2010 | |
2011 | static int cqspi_runtime_suspend(struct device *dev) |
2012 | { |
2013 | struct cqspi_st *cqspi = dev_get_drvdata(dev); |
2014 | |
2015 | cqspi_controller_enable(cqspi, enable: 0); |
2016 | clk_disable_unprepare(clk: cqspi->clk); |
2017 | return 0; |
2018 | } |
2019 | |
2020 | static int cqspi_runtime_resume(struct device *dev) |
2021 | { |
2022 | struct cqspi_st *cqspi = dev_get_drvdata(dev); |
2023 | |
2024 | clk_prepare_enable(clk: cqspi->clk); |
2025 | cqspi_wait_idle(cqspi); |
2026 | cqspi_controller_enable(cqspi, enable: 0); |
2027 | cqspi_controller_init(cqspi); |
2028 | cqspi_controller_enable(cqspi, enable: 1); |
2029 | |
2030 | cqspi->current_cs = -1; |
2031 | cqspi->sclk = 0; |
2032 | return 0; |
2033 | } |
2034 | |
2035 | static int cqspi_suspend(struct device *dev) |
2036 | { |
2037 | struct cqspi_st *cqspi = dev_get_drvdata(dev); |
2038 | int ret; |
2039 | |
2040 | ret = spi_controller_suspend(ctlr: cqspi->host); |
2041 | if (ret) |
2042 | return ret; |
2043 | |
2044 | return pm_runtime_force_suspend(dev); |
2045 | } |
2046 | |
2047 | static int cqspi_resume(struct device *dev) |
2048 | { |
2049 | struct cqspi_st *cqspi = dev_get_drvdata(dev); |
2050 | int ret; |
2051 | |
2052 | ret = pm_runtime_force_resume(dev); |
2053 | if (ret) { |
2054 | dev_err(dev, "pm_runtime_force_resume failed on resume\n" ); |
2055 | return ret; |
2056 | } |
2057 | |
2058 | return spi_controller_resume(ctlr: cqspi->host); |
2059 | } |
2060 | |
2061 | static const struct dev_pm_ops cqspi_dev_pm_ops = { |
2062 | RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL) |
2063 | SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume) |
2064 | }; |
2065 | |
2066 | static const struct cqspi_driver_platdata cdns_qspi = { |
2067 | .quirks = CQSPI_DISABLE_DAC_MODE, |
2068 | }; |
2069 | |
2070 | static const struct cqspi_driver_platdata k2g_qspi = { |
2071 | .quirks = CQSPI_NEEDS_WR_DELAY, |
2072 | }; |
2073 | |
2074 | static const struct cqspi_driver_platdata am654_ospi = { |
2075 | .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD, |
2076 | .quirks = CQSPI_NEEDS_WR_DELAY, |
2077 | }; |
2078 | |
2079 | static const struct cqspi_driver_platdata intel_lgm_qspi = { |
2080 | .quirks = CQSPI_DISABLE_DAC_MODE, |
2081 | }; |
2082 | |
2083 | static const struct cqspi_driver_platdata socfpga_qspi = { |
2084 | .quirks = CQSPI_DISABLE_DAC_MODE |
2085 | | CQSPI_NO_SUPPORT_WR_COMPLETION |
2086 | | CQSPI_SLOW_SRAM |
2087 | | CQSPI_DISABLE_STIG_MODE, |
2088 | }; |
2089 | |
2090 | static const struct cqspi_driver_platdata versal_ospi = { |
2091 | .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, |
2092 | .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA |
2093 | | CQSPI_DMA_SET_MASK, |
2094 | .indirect_read_dma = cqspi_versal_indirect_read_dma, |
2095 | .get_dma_status = cqspi_get_versal_dma_status, |
2096 | }; |
2097 | |
2098 | static const struct cqspi_driver_platdata versal2_ospi = { |
2099 | .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, |
2100 | .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA |
2101 | | CQSPI_DMA_SET_MASK |
2102 | | CQSPI_SUPPORT_DEVICE_RESET, |
2103 | .indirect_read_dma = cqspi_versal_indirect_read_dma, |
2104 | .get_dma_status = cqspi_get_versal_dma_status, |
2105 | }; |
2106 | |
2107 | static const struct cqspi_driver_platdata jh7110_qspi = { |
2108 | .quirks = CQSPI_DISABLE_DAC_MODE, |
2109 | .jh7110_clk_init = cqspi_jh7110_clk_init, |
2110 | }; |
2111 | |
2112 | static const struct cqspi_driver_platdata pensando_cdns_qspi = { |
2113 | .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE, |
2114 | }; |
2115 | |
2116 | static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = { |
2117 | .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, |
2118 | .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | |
2119 | CQSPI_RD_NO_IRQ, |
2120 | }; |
2121 | |
2122 | static const struct of_device_id cqspi_dt_ids[] = { |
2123 | { |
2124 | .compatible = "cdns,qspi-nor" , |
2125 | .data = &cdns_qspi, |
2126 | }, |
2127 | { |
2128 | .compatible = "ti,k2g-qspi" , |
2129 | .data = &k2g_qspi, |
2130 | }, |
2131 | { |
2132 | .compatible = "ti,am654-ospi" , |
2133 | .data = &am654_ospi, |
2134 | }, |
2135 | { |
2136 | .compatible = "intel,lgm-qspi" , |
2137 | .data = &intel_lgm_qspi, |
2138 | }, |
2139 | { |
2140 | .compatible = "xlnx,versal-ospi-1.0" , |
2141 | .data = &versal_ospi, |
2142 | }, |
2143 | { |
2144 | .compatible = "intel,socfpga-qspi" , |
2145 | .data = &socfpga_qspi, |
2146 | }, |
2147 | { |
2148 | .compatible = "starfive,jh7110-qspi" , |
2149 | .data = &jh7110_qspi, |
2150 | }, |
2151 | { |
2152 | .compatible = "amd,pensando-elba-qspi" , |
2153 | .data = &pensando_cdns_qspi, |
2154 | }, |
2155 | { |
2156 | .compatible = "mobileye,eyeq5-ospi" , |
2157 | .data = &mobileye_eyeq5_ospi, |
2158 | }, |
2159 | { |
2160 | .compatible = "amd,versal2-ospi" , |
2161 | .data = &versal2_ospi, |
2162 | }, |
2163 | { /* end of table */ } |
2164 | }; |
2165 | |
2166 | MODULE_DEVICE_TABLE(of, cqspi_dt_ids); |
2167 | |
2168 | static struct platform_driver cqspi_platform_driver = { |
2169 | .probe = cqspi_probe, |
2170 | .remove = cqspi_remove, |
2171 | .driver = { |
2172 | .name = CQSPI_NAME, |
2173 | .pm = pm_ptr(&cqspi_dev_pm_ops), |
2174 | .of_match_table = cqspi_dt_ids, |
2175 | }, |
2176 | }; |
2177 | |
2178 | module_platform_driver(cqspi_platform_driver); |
2179 | |
2180 | MODULE_DESCRIPTION("Cadence QSPI Controller Driver" ); |
2181 | MODULE_LICENSE("GPL v2" ); |
2182 | MODULE_ALIAS("platform:" CQSPI_NAME); |
2183 | MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>" ); |
2184 | MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>" ); |
2185 | MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>" ); |
2186 | MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>" ); |
2187 | MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>" ); |
2188 | |