1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (c) 2017-2018, The Linux foundation. All rights reserved. |
3 | |
4 | #include <linux/clk.h> |
5 | #include <linux/dmapool.h> |
6 | #include <linux/dma-mapping.h> |
7 | #include <linux/interconnect.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/io.h> |
10 | #include <linux/module.h> |
11 | #include <linux/of.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/pinctrl/consumer.h> |
14 | #include <linux/pm_runtime.h> |
15 | #include <linux/pm_opp.h> |
16 | #include <linux/spi/spi.h> |
17 | #include <linux/spi/spi-mem.h> |
18 | |
19 | |
20 | #define QSPI_NUM_CS 2 |
21 | #define QSPI_BYTES_PER_WORD 4 |
22 | |
23 | #define MSTR_CONFIG 0x0000 |
24 | #define FULL_CYCLE_MODE BIT(3) |
25 | #define FB_CLK_EN BIT(4) |
26 | #define PIN_HOLDN BIT(6) |
27 | #define PIN_WPN BIT(7) |
28 | #define DMA_ENABLE BIT(8) |
29 | #define BIG_ENDIAN_MODE BIT(9) |
30 | #define SPI_MODE_MSK 0xc00 |
31 | #define SPI_MODE_SHFT 10 |
32 | #define CHIP_SELECT_NUM BIT(12) |
33 | #define SBL_EN BIT(13) |
34 | #define LPA_BASE_MSK 0x3c000 |
35 | #define LPA_BASE_SHFT 14 |
36 | #define TX_DATA_DELAY_MSK 0xc0000 |
37 | #define TX_DATA_DELAY_SHFT 18 |
38 | #define TX_CLK_DELAY_MSK 0x300000 |
39 | #define TX_CLK_DELAY_SHFT 20 |
40 | #define TX_CS_N_DELAY_MSK 0xc00000 |
41 | #define TX_CS_N_DELAY_SHFT 22 |
42 | #define TX_DATA_OE_DELAY_MSK 0x3000000 |
43 | #define TX_DATA_OE_DELAY_SHFT 24 |
44 | |
45 | #define AHB_MASTER_CFG 0x0004 |
46 | #define HMEM_TYPE_START_MID_TRANS_MSK 0x7 |
47 | #define HMEM_TYPE_START_MID_TRANS_SHFT 0 |
48 | #define HMEM_TYPE_LAST_TRANS_MSK 0x38 |
49 | #define HMEM_TYPE_LAST_TRANS_SHFT 3 |
50 | #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0 |
51 | #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6 |
52 | #define HMEMTYPE_READ_TRANS_MSK 0x700 |
53 | #define HMEMTYPE_READ_TRANS_SHFT 8 |
54 | #define HSHARED BIT(11) |
55 | #define HINNERSHARED BIT(12) |
56 | |
57 | #define MSTR_INT_EN 0x000C |
58 | #define MSTR_INT_STATUS 0x0010 |
59 | #define RESP_FIFO_UNDERRUN BIT(0) |
60 | #define RESP_FIFO_NOT_EMPTY BIT(1) |
61 | #define RESP_FIFO_RDY BIT(2) |
62 | #define HRESP_FROM_NOC_ERR BIT(3) |
63 | #define WR_FIFO_EMPTY BIT(9) |
64 | #define WR_FIFO_FULL BIT(10) |
65 | #define WR_FIFO_OVERRUN BIT(11) |
66 | #define TRANSACTION_DONE BIT(16) |
67 | #define DMA_CHAIN_DONE BIT(31) |
68 | #define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \ |
69 | WR_FIFO_OVERRUN) |
70 | #define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \ |
71 | WR_FIFO_EMPTY | WR_FIFO_FULL | \ |
72 | TRANSACTION_DONE | DMA_CHAIN_DONE) |
73 | |
74 | #define PIO_XFER_CTRL 0x0014 |
75 | #define REQUEST_COUNT_MSK 0xffff |
76 | |
77 | #define PIO_XFER_CFG 0x0018 |
78 | #define TRANSFER_DIRECTION BIT(0) |
79 | #define MULTI_IO_MODE_MSK 0xe |
80 | #define MULTI_IO_MODE_SHFT 1 |
81 | #define TRANSFER_FRAGMENT BIT(8) |
82 | #define SDR_1BIT 1 |
83 | #define SDR_2BIT 2 |
84 | #define SDR_4BIT 3 |
85 | #define DDR_1BIT 5 |
86 | #define DDR_2BIT 6 |
87 | #define DDR_4BIT 7 |
88 | #define DMA_DESC_SINGLE_SPI 1 |
89 | #define DMA_DESC_DUAL_SPI 2 |
90 | #define DMA_DESC_QUAD_SPI 3 |
91 | |
92 | #define PIO_XFER_STATUS 0x001c |
93 | #define WR_FIFO_BYTES_MSK 0xffff0000 |
94 | #define WR_FIFO_BYTES_SHFT 16 |
95 | |
96 | #define PIO_DATAOUT_1B 0x0020 |
97 | #define PIO_DATAOUT_4B 0x0024 |
98 | |
99 | #define RD_FIFO_CFG 0x0028 |
100 | #define CONTINUOUS_MODE BIT(0) |
101 | |
102 | #define RD_FIFO_STATUS 0x002c |
103 | #define FIFO_EMPTY BIT(11) |
104 | #define WR_CNTS_MSK 0x7f0 |
105 | #define WR_CNTS_SHFT 4 |
106 | #define RDY_64BYTE BIT(3) |
107 | #define RDY_32BYTE BIT(2) |
108 | #define RDY_16BYTE BIT(1) |
109 | #define FIFO_RDY BIT(0) |
110 | |
111 | #define RD_FIFO_RESET 0x0030 |
112 | #define RESET_FIFO BIT(0) |
113 | |
114 | #define NEXT_DMA_DESC_ADDR 0x0040 |
115 | #define CURRENT_DMA_DESC_ADDR 0x0044 |
116 | #define CURRENT_MEM_ADDR 0x0048 |
117 | |
118 | #define CUR_MEM_ADDR 0x0048 |
119 | #define HW_VERSION 0x004c |
120 | #define RD_FIFO 0x0050 |
121 | #define SAMPLING_CLK_CFG 0x0090 |
122 | #define SAMPLING_CLK_STATUS 0x0094 |
123 | |
124 | #define QSPI_ALIGN_REQ 32 |
125 | |
126 | enum qspi_dir { |
127 | QSPI_READ, |
128 | QSPI_WRITE, |
129 | }; |
130 | |
131 | struct qspi_cmd_desc { |
132 | u32 data_address; |
133 | u32 next_descriptor; |
134 | u32 direction:1; |
135 | u32 multi_io_mode:3; |
136 | u32 reserved1:4; |
137 | u32 fragment:1; |
138 | u32 reserved2:7; |
139 | u32 length:16; |
140 | }; |
141 | |
142 | struct qspi_xfer { |
143 | union { |
144 | const void *tx_buf; |
145 | void *rx_buf; |
146 | }; |
147 | unsigned int rem_bytes; |
148 | unsigned int buswidth; |
149 | enum qspi_dir dir; |
150 | bool is_last; |
151 | }; |
152 | |
153 | enum qspi_clocks { |
154 | QSPI_CLK_CORE, |
155 | QSPI_CLK_IFACE, |
156 | QSPI_NUM_CLKS |
157 | }; |
158 | |
159 | /* |
160 | * Number of entries in sgt returned from spi framework that- |
161 | * will be supported. Can be modified as required. |
162 | * In practice, given max_dma_len is 64KB, the number of |
163 | * entries is not expected to exceed 1. |
164 | */ |
165 | #define QSPI_MAX_SG 5 |
166 | |
167 | struct qcom_qspi { |
168 | void __iomem *base; |
169 | struct device *dev; |
170 | struct clk_bulk_data *clks; |
171 | struct qspi_xfer xfer; |
172 | struct dma_pool *dma_cmd_pool; |
173 | dma_addr_t dma_cmd_desc[QSPI_MAX_SG]; |
174 | void *virt_cmd_desc[QSPI_MAX_SG]; |
175 | unsigned int n_cmd_desc; |
176 | struct icc_path *icc_path_cpu_to_qspi; |
177 | unsigned long last_speed; |
178 | /* Lock to protect data accessed by IRQs */ |
179 | spinlock_t lock; |
180 | }; |
181 | |
182 | static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl, |
183 | unsigned int buswidth) |
184 | { |
185 | switch (buswidth) { |
186 | case 1: |
187 | return SDR_1BIT; |
188 | case 2: |
189 | return SDR_2BIT; |
190 | case 4: |
191 | return SDR_4BIT; |
192 | default: |
193 | dev_warn_once(ctrl->dev, |
194 | "Unexpected bus width: %u\n" , buswidth); |
195 | return SDR_1BIT; |
196 | } |
197 | } |
198 | |
199 | static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl) |
200 | { |
201 | u32 pio_xfer_cfg; |
202 | u32 iomode; |
203 | const struct qspi_xfer *xfer; |
204 | |
205 | xfer = &ctrl->xfer; |
206 | pio_xfer_cfg = readl(addr: ctrl->base + PIO_XFER_CFG); |
207 | pio_xfer_cfg &= ~TRANSFER_DIRECTION; |
208 | pio_xfer_cfg |= xfer->dir; |
209 | if (xfer->is_last) |
210 | pio_xfer_cfg &= ~TRANSFER_FRAGMENT; |
211 | else |
212 | pio_xfer_cfg |= TRANSFER_FRAGMENT; |
213 | pio_xfer_cfg &= ~MULTI_IO_MODE_MSK; |
214 | iomode = qspi_buswidth_to_iomode(ctrl, buswidth: xfer->buswidth); |
215 | pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT; |
216 | |
217 | writel(val: pio_xfer_cfg, addr: ctrl->base + PIO_XFER_CFG); |
218 | } |
219 | |
220 | static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl) |
221 | { |
222 | u32 pio_xfer_ctrl; |
223 | |
224 | pio_xfer_ctrl = readl(addr: ctrl->base + PIO_XFER_CTRL); |
225 | pio_xfer_ctrl &= ~REQUEST_COUNT_MSK; |
226 | pio_xfer_ctrl |= ctrl->xfer.rem_bytes; |
227 | writel(val: pio_xfer_ctrl, addr: ctrl->base + PIO_XFER_CTRL); |
228 | } |
229 | |
230 | static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl) |
231 | { |
232 | u32 ints; |
233 | |
234 | qcom_qspi_pio_xfer_cfg(ctrl); |
235 | |
236 | /* Ack any previous interrupts that might be hanging around */ |
237 | writel(QSPI_ALL_IRQS, addr: ctrl->base + MSTR_INT_STATUS); |
238 | |
239 | /* Setup new interrupts */ |
240 | if (ctrl->xfer.dir == QSPI_WRITE) |
241 | ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY; |
242 | else |
243 | ints = QSPI_ERR_IRQS | RESP_FIFO_RDY; |
244 | writel(val: ints, addr: ctrl->base + MSTR_INT_EN); |
245 | |
246 | /* Kick off the transfer */ |
247 | qcom_qspi_pio_xfer_ctrl(ctrl); |
248 | } |
249 | |
250 | static void qcom_qspi_handle_err(struct spi_controller *host, |
251 | struct spi_message *msg) |
252 | { |
253 | u32 int_status; |
254 | struct qcom_qspi *ctrl = spi_controller_get_devdata(ctlr: host); |
255 | unsigned long flags; |
256 | int i; |
257 | |
258 | spin_lock_irqsave(&ctrl->lock, flags); |
259 | writel(val: 0, addr: ctrl->base + MSTR_INT_EN); |
260 | int_status = readl(addr: ctrl->base + MSTR_INT_STATUS); |
261 | writel(val: int_status, addr: ctrl->base + MSTR_INT_STATUS); |
262 | ctrl->xfer.rem_bytes = 0; |
263 | |
264 | /* free cmd descriptors if they are around (DMA mode) */ |
265 | for (i = 0; i < ctrl->n_cmd_desc; i++) |
266 | dma_pool_free(pool: ctrl->dma_cmd_pool, vaddr: ctrl->virt_cmd_desc[i], |
267 | addr: ctrl->dma_cmd_desc[i]); |
268 | ctrl->n_cmd_desc = 0; |
269 | spin_unlock_irqrestore(lock: &ctrl->lock, flags); |
270 | } |
271 | |
272 | static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz) |
273 | { |
274 | int ret; |
275 | unsigned int avg_bw_cpu; |
276 | |
277 | if (speed_hz == ctrl->last_speed) |
278 | return 0; |
279 | |
280 | /* In regular operation (SBL_EN=1) core must be 4x transfer clock */ |
281 | ret = dev_pm_opp_set_rate(dev: ctrl->dev, target_freq: speed_hz * 4); |
282 | if (ret) { |
283 | dev_err(ctrl->dev, "Failed to set core clk %d\n" , ret); |
284 | return ret; |
285 | } |
286 | |
287 | /* |
288 | * Set BW quota for CPU. |
289 | * We don't have explicit peak requirement so keep it equal to avg_bw. |
290 | */ |
291 | avg_bw_cpu = Bps_to_icc(speed_hz); |
292 | ret = icc_set_bw(path: ctrl->icc_path_cpu_to_qspi, avg_bw: avg_bw_cpu, peak_bw: avg_bw_cpu); |
293 | if (ret) { |
294 | dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n" , |
295 | __func__, ret); |
296 | return ret; |
297 | } |
298 | |
299 | ctrl->last_speed = speed_hz; |
300 | |
301 | return 0; |
302 | } |
303 | |
304 | static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr, |
305 | uint32_t n_bytes) |
306 | { |
307 | struct qspi_cmd_desc *virt_cmd_desc, *prev; |
308 | dma_addr_t dma_cmd_desc; |
309 | |
310 | /* allocate for dma cmd descriptor */ |
311 | virt_cmd_desc = dma_pool_alloc(pool: ctrl->dma_cmd_pool, GFP_ATOMIC | __GFP_ZERO, handle: &dma_cmd_desc); |
312 | if (!virt_cmd_desc) { |
313 | dev_warn_once(ctrl->dev, "Couldn't find memory for descriptor\n" ); |
314 | return -EAGAIN; |
315 | } |
316 | |
317 | ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc; |
318 | ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc; |
319 | ctrl->n_cmd_desc++; |
320 | |
321 | /* setup cmd descriptor */ |
322 | virt_cmd_desc->data_address = dma_ptr; |
323 | virt_cmd_desc->direction = ctrl->xfer.dir; |
324 | virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, buswidth: ctrl->xfer.buswidth); |
325 | virt_cmd_desc->fragment = !ctrl->xfer.is_last; |
326 | virt_cmd_desc->length = n_bytes; |
327 | |
328 | /* update previous descriptor */ |
329 | if (ctrl->n_cmd_desc >= 2) { |
330 | prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2]; |
331 | prev->next_descriptor = dma_cmd_desc; |
332 | prev->fragment = 1; |
333 | } |
334 | |
335 | return 0; |
336 | } |
337 | |
338 | static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl, |
339 | struct spi_transfer *xfer) |
340 | { |
341 | int ret; |
342 | struct sg_table *sgt; |
343 | dma_addr_t dma_ptr_sg; |
344 | unsigned int dma_len_sg; |
345 | int i; |
346 | |
347 | if (ctrl->n_cmd_desc) { |
348 | dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n" , ctrl->n_cmd_desc); |
349 | return -EIO; |
350 | } |
351 | |
352 | sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg; |
353 | if (!sgt->nents || sgt->nents > QSPI_MAX_SG) { |
354 | dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n" , sgt->nents); |
355 | return -EAGAIN; |
356 | } |
357 | |
358 | for (i = 0; i < sgt->nents; i++) { |
359 | dma_ptr_sg = sg_dma_address(sgt->sgl + i); |
360 | dma_len_sg = sg_dma_len(sgt->sgl + i); |
361 | if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) { |
362 | dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n" , QSPI_ALIGN_REQ); |
363 | return -EAGAIN; |
364 | } |
365 | /* |
366 | * When reading with DMA the controller writes to memory 1 word |
367 | * at a time. If the length isn't a multiple of 4 bytes then |
368 | * the controller can clobber the things later in memory. |
369 | * Fallback to PIO to be safe. |
370 | */ |
371 | if (ctrl->xfer.dir == QSPI_READ && (dma_len_sg & 0x03)) { |
372 | dev_warn_once(ctrl->dev, "fallback to PIO for read of size %#010x\n" , |
373 | dma_len_sg); |
374 | return -EAGAIN; |
375 | } |
376 | } |
377 | |
378 | for (i = 0; i < sgt->nents; i++) { |
379 | dma_ptr_sg = sg_dma_address(sgt->sgl + i); |
380 | dma_len_sg = sg_dma_len(sgt->sgl + i); |
381 | |
382 | ret = qcom_qspi_alloc_desc(ctrl, dma_ptr: dma_ptr_sg, n_bytes: dma_len_sg); |
383 | if (ret) |
384 | goto cleanup; |
385 | } |
386 | return 0; |
387 | |
388 | cleanup: |
389 | for (i = 0; i < ctrl->n_cmd_desc; i++) |
390 | dma_pool_free(pool: ctrl->dma_cmd_pool, vaddr: ctrl->virt_cmd_desc[i], |
391 | addr: ctrl->dma_cmd_desc[i]); |
392 | ctrl->n_cmd_desc = 0; |
393 | return ret; |
394 | } |
395 | |
396 | static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl) |
397 | { |
398 | /* Setup new interrupts */ |
399 | writel(DMA_CHAIN_DONE, addr: ctrl->base + MSTR_INT_EN); |
400 | |
401 | /* kick off transfer */ |
402 | writel(val: (u32)((ctrl->dma_cmd_desc)[0]), addr: ctrl->base + NEXT_DMA_DESC_ADDR); |
403 | } |
404 | |
405 | /* Switch to DMA if transfer length exceeds this */ |
406 | #define QSPI_MAX_BYTES_FIFO 64 |
407 | |
408 | static bool qcom_qspi_can_dma(struct spi_controller *ctlr, |
409 | struct spi_device *slv, struct spi_transfer *xfer) |
410 | { |
411 | return xfer->len > QSPI_MAX_BYTES_FIFO; |
412 | } |
413 | |
414 | static int qcom_qspi_transfer_one(struct spi_controller *host, |
415 | struct spi_device *slv, |
416 | struct spi_transfer *xfer) |
417 | { |
418 | struct qcom_qspi *ctrl = spi_controller_get_devdata(ctlr: host); |
419 | int ret; |
420 | unsigned long speed_hz; |
421 | unsigned long flags; |
422 | u32 mstr_cfg; |
423 | |
424 | speed_hz = slv->max_speed_hz; |
425 | if (xfer->speed_hz) |
426 | speed_hz = xfer->speed_hz; |
427 | |
428 | ret = qcom_qspi_set_speed(ctrl, speed_hz); |
429 | if (ret) |
430 | return ret; |
431 | |
432 | spin_lock_irqsave(&ctrl->lock, flags); |
433 | mstr_cfg = readl(addr: ctrl->base + MSTR_CONFIG); |
434 | |
435 | /* We are half duplex, so either rx or tx will be set */ |
436 | if (xfer->rx_buf) { |
437 | ctrl->xfer.dir = QSPI_READ; |
438 | ctrl->xfer.buswidth = xfer->rx_nbits; |
439 | ctrl->xfer.rx_buf = xfer->rx_buf; |
440 | } else { |
441 | ctrl->xfer.dir = QSPI_WRITE; |
442 | ctrl->xfer.buswidth = xfer->tx_nbits; |
443 | ctrl->xfer.tx_buf = xfer->tx_buf; |
444 | } |
445 | ctrl->xfer.is_last = list_is_last(list: &xfer->transfer_list, |
446 | head: &host->cur_msg->transfers); |
447 | ctrl->xfer.rem_bytes = xfer->len; |
448 | |
449 | if (xfer->rx_sg.nents || xfer->tx_sg.nents) { |
450 | /* do DMA transfer */ |
451 | if (!(mstr_cfg & DMA_ENABLE)) { |
452 | mstr_cfg |= DMA_ENABLE; |
453 | writel(val: mstr_cfg, addr: ctrl->base + MSTR_CONFIG); |
454 | } |
455 | |
456 | ret = qcom_qspi_setup_dma_desc(ctrl, xfer); |
457 | if (ret != -EAGAIN) { |
458 | if (!ret) { |
459 | dma_wmb(); |
460 | qcom_qspi_dma_xfer(ctrl); |
461 | } |
462 | goto exit; |
463 | } |
464 | dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n" ); |
465 | ret = 0; /* We'll retry w/ PIO */ |
466 | } |
467 | |
468 | if (mstr_cfg & DMA_ENABLE) { |
469 | mstr_cfg &= ~DMA_ENABLE; |
470 | writel(val: mstr_cfg, addr: ctrl->base + MSTR_CONFIG); |
471 | } |
472 | qcom_qspi_pio_xfer(ctrl); |
473 | |
474 | exit: |
475 | spin_unlock_irqrestore(lock: &ctrl->lock, flags); |
476 | |
477 | if (ret) |
478 | return ret; |
479 | |
480 | /* We'll call spi_finalize_current_transfer() when done */ |
481 | return 1; |
482 | } |
483 | |
484 | static int qcom_qspi_prepare_message(struct spi_controller *host, |
485 | struct spi_message *message) |
486 | { |
487 | u32 mstr_cfg; |
488 | struct qcom_qspi *ctrl; |
489 | int tx_data_oe_delay = 1; |
490 | int tx_data_delay = 1; |
491 | unsigned long flags; |
492 | |
493 | ctrl = spi_controller_get_devdata(ctlr: host); |
494 | spin_lock_irqsave(&ctrl->lock, flags); |
495 | |
496 | mstr_cfg = readl(addr: ctrl->base + MSTR_CONFIG); |
497 | mstr_cfg &= ~CHIP_SELECT_NUM; |
498 | if (spi_get_chipselect(spi: message->spi, idx: 0)) |
499 | mstr_cfg |= CHIP_SELECT_NUM; |
500 | |
501 | mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE; |
502 | mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK); |
503 | mstr_cfg |= message->spi->mode << SPI_MODE_SHFT; |
504 | mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT; |
505 | mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT; |
506 | mstr_cfg &= ~DMA_ENABLE; |
507 | |
508 | writel(val: mstr_cfg, addr: ctrl->base + MSTR_CONFIG); |
509 | spin_unlock_irqrestore(lock: &ctrl->lock, flags); |
510 | |
511 | return 0; |
512 | } |
513 | |
514 | static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl) |
515 | { |
516 | ctrl->dma_cmd_pool = dmam_pool_create(name: "qspi cmd desc pool" , |
517 | dev: ctrl->dev, size: sizeof(struct qspi_cmd_desc), align: 0, allocation: 0); |
518 | if (!ctrl->dma_cmd_pool) |
519 | return -ENOMEM; |
520 | |
521 | return 0; |
522 | } |
523 | |
524 | static irqreturn_t pio_read(struct qcom_qspi *ctrl) |
525 | { |
526 | u32 rd_fifo_status; |
527 | u32 rd_fifo; |
528 | unsigned int wr_cnts; |
529 | unsigned int bytes_to_read; |
530 | unsigned int words_to_read; |
531 | u32 *word_buf; |
532 | u8 *byte_buf; |
533 | int i; |
534 | |
535 | rd_fifo_status = readl(addr: ctrl->base + RD_FIFO_STATUS); |
536 | |
537 | if (!(rd_fifo_status & FIFO_RDY)) { |
538 | dev_dbg(ctrl->dev, "Spurious IRQ %#x\n" , rd_fifo_status); |
539 | return IRQ_NONE; |
540 | } |
541 | |
542 | wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT; |
543 | wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes); |
544 | |
545 | words_to_read = wr_cnts / QSPI_BYTES_PER_WORD; |
546 | bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD; |
547 | |
548 | if (words_to_read) { |
549 | word_buf = ctrl->xfer.rx_buf; |
550 | ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD; |
551 | ioread32_rep(port: ctrl->base + RD_FIFO, buf: word_buf, count: words_to_read); |
552 | ctrl->xfer.rx_buf = word_buf + words_to_read; |
553 | } |
554 | |
555 | if (bytes_to_read) { |
556 | byte_buf = ctrl->xfer.rx_buf; |
557 | rd_fifo = readl(addr: ctrl->base + RD_FIFO); |
558 | ctrl->xfer.rem_bytes -= bytes_to_read; |
559 | for (i = 0; i < bytes_to_read; i++) |
560 | *byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE); |
561 | ctrl->xfer.rx_buf = byte_buf; |
562 | } |
563 | |
564 | return IRQ_HANDLED; |
565 | } |
566 | |
567 | static irqreturn_t pio_write(struct qcom_qspi *ctrl) |
568 | { |
569 | const void *xfer_buf = ctrl->xfer.tx_buf; |
570 | const int *word_buf; |
571 | const char *byte_buf; |
572 | unsigned int wr_fifo_bytes; |
573 | unsigned int wr_fifo_words; |
574 | unsigned int wr_size; |
575 | unsigned int rem_words; |
576 | |
577 | wr_fifo_bytes = readl(addr: ctrl->base + PIO_XFER_STATUS); |
578 | wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT; |
579 | |
580 | if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) { |
581 | /* Process the last 1-3 bytes */ |
582 | wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes); |
583 | ctrl->xfer.rem_bytes -= wr_size; |
584 | |
585 | byte_buf = xfer_buf; |
586 | while (wr_size--) |
587 | writel(val: *byte_buf++, |
588 | addr: ctrl->base + PIO_DATAOUT_1B); |
589 | ctrl->xfer.tx_buf = byte_buf; |
590 | } else { |
591 | /* |
592 | * Process all the whole words; to keep things simple we'll |
593 | * just wait for the next interrupt to handle the last 1-3 |
594 | * bytes if we don't have an even number of words. |
595 | */ |
596 | rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD; |
597 | wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD; |
598 | |
599 | wr_size = min(rem_words, wr_fifo_words); |
600 | ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD; |
601 | |
602 | word_buf = xfer_buf; |
603 | iowrite32_rep(port: ctrl->base + PIO_DATAOUT_4B, buf: word_buf, count: wr_size); |
604 | ctrl->xfer.tx_buf = word_buf + wr_size; |
605 | |
606 | } |
607 | |
608 | return IRQ_HANDLED; |
609 | } |
610 | |
611 | static irqreturn_t qcom_qspi_irq(int irq, void *dev_id) |
612 | { |
613 | u32 int_status; |
614 | struct qcom_qspi *ctrl = dev_id; |
615 | irqreturn_t ret = IRQ_NONE; |
616 | |
617 | spin_lock(lock: &ctrl->lock); |
618 | |
619 | int_status = readl(addr: ctrl->base + MSTR_INT_STATUS); |
620 | writel(val: int_status, addr: ctrl->base + MSTR_INT_STATUS); |
621 | |
622 | /* Ignore disabled interrupts */ |
623 | int_status &= readl(addr: ctrl->base + MSTR_INT_EN); |
624 | |
625 | /* PIO mode handling */ |
626 | if (ctrl->xfer.dir == QSPI_WRITE) { |
627 | if (int_status & WR_FIFO_EMPTY) |
628 | ret = pio_write(ctrl); |
629 | } else { |
630 | if (int_status & RESP_FIFO_RDY) |
631 | ret = pio_read(ctrl); |
632 | } |
633 | |
634 | if (int_status & QSPI_ERR_IRQS) { |
635 | if (int_status & RESP_FIFO_UNDERRUN) |
636 | dev_err(ctrl->dev, "IRQ error: FIFO underrun\n" ); |
637 | if (int_status & WR_FIFO_OVERRUN) |
638 | dev_err(ctrl->dev, "IRQ error: FIFO overrun\n" ); |
639 | if (int_status & HRESP_FROM_NOC_ERR) |
640 | dev_err(ctrl->dev, "IRQ error: NOC response error\n" ); |
641 | ret = IRQ_HANDLED; |
642 | } |
643 | |
644 | if (!ctrl->xfer.rem_bytes) { |
645 | writel(val: 0, addr: ctrl->base + MSTR_INT_EN); |
646 | spi_finalize_current_transfer(ctlr: dev_get_drvdata(dev: ctrl->dev)); |
647 | } |
648 | |
649 | /* DMA mode handling */ |
650 | if (int_status & DMA_CHAIN_DONE) { |
651 | int i; |
652 | |
653 | writel(val: 0, addr: ctrl->base + MSTR_INT_EN); |
654 | ctrl->xfer.rem_bytes = 0; |
655 | |
656 | for (i = 0; i < ctrl->n_cmd_desc; i++) |
657 | dma_pool_free(pool: ctrl->dma_cmd_pool, vaddr: ctrl->virt_cmd_desc[i], |
658 | addr: ctrl->dma_cmd_desc[i]); |
659 | ctrl->n_cmd_desc = 0; |
660 | |
661 | ret = IRQ_HANDLED; |
662 | spi_finalize_current_transfer(ctlr: dev_get_drvdata(dev: ctrl->dev)); |
663 | } |
664 | |
665 | spin_unlock(lock: &ctrl->lock); |
666 | return ret; |
667 | } |
668 | |
669 | static int qcom_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) |
670 | { |
671 | /* |
672 | * If qcom_qspi_can_dma() is going to return false we don't need to |
673 | * adjust anything. |
674 | */ |
675 | if (op->data.nbytes <= QSPI_MAX_BYTES_FIFO) |
676 | return 0; |
677 | |
678 | /* |
679 | * When reading, the transfer needs to be a multiple of 4 bytes so |
680 | * shrink the transfer if that's not true. The caller will then do a |
681 | * second transfer to finish things up. |
682 | */ |
683 | if (op->data.dir == SPI_MEM_DATA_IN && (op->data.nbytes & 0x3)) |
684 | op->data.nbytes &= ~0x3; |
685 | |
686 | return 0; |
687 | } |
688 | |
689 | static const struct spi_controller_mem_ops qcom_qspi_mem_ops = { |
690 | .adjust_op_size = qcom_qspi_adjust_op_size, |
691 | }; |
692 | |
693 | static int qcom_qspi_probe(struct platform_device *pdev) |
694 | { |
695 | int ret; |
696 | struct device *dev; |
697 | struct spi_controller *host; |
698 | struct qcom_qspi *ctrl; |
699 | |
700 | dev = &pdev->dev; |
701 | |
702 | host = devm_spi_alloc_host(dev, size: sizeof(*ctrl)); |
703 | if (!host) |
704 | return -ENOMEM; |
705 | |
706 | platform_set_drvdata(pdev, data: host); |
707 | |
708 | ctrl = spi_controller_get_devdata(ctlr: host); |
709 | |
710 | spin_lock_init(&ctrl->lock); |
711 | ctrl->dev = dev; |
712 | ctrl->base = devm_platform_ioremap_resource(pdev, index: 0); |
713 | if (IS_ERR(ptr: ctrl->base)) |
714 | return PTR_ERR(ptr: ctrl->base); |
715 | |
716 | ctrl->clks = devm_kcalloc(dev, n: QSPI_NUM_CLKS, |
717 | size: sizeof(*ctrl->clks), GFP_KERNEL); |
718 | if (!ctrl->clks) |
719 | return -ENOMEM; |
720 | |
721 | ctrl->clks[QSPI_CLK_CORE].id = "core" ; |
722 | ctrl->clks[QSPI_CLK_IFACE].id = "iface" ; |
723 | ret = devm_clk_bulk_get(dev, num_clks: QSPI_NUM_CLKS, clks: ctrl->clks); |
724 | if (ret) |
725 | return ret; |
726 | |
727 | ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, name: "qspi-config" ); |
728 | if (IS_ERR(ptr: ctrl->icc_path_cpu_to_qspi)) |
729 | return dev_err_probe(dev, err: PTR_ERR(ptr: ctrl->icc_path_cpu_to_qspi), |
730 | fmt: "Failed to get cpu path\n" ); |
731 | |
732 | /* Set BW vote for register access */ |
733 | ret = icc_set_bw(path: ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000), |
734 | Bps_to_icc(1000)); |
735 | if (ret) { |
736 | dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n" , |
737 | __func__, ret); |
738 | return ret; |
739 | } |
740 | |
741 | ret = icc_disable(path: ctrl->icc_path_cpu_to_qspi); |
742 | if (ret) { |
743 | dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n" , |
744 | __func__, ret); |
745 | return ret; |
746 | } |
747 | |
748 | ret = platform_get_irq(pdev, 0); |
749 | if (ret < 0) |
750 | return ret; |
751 | ret = devm_request_irq(dev, irq: ret, handler: qcom_qspi_irq, irqflags: 0, devname: dev_name(dev), dev_id: ctrl); |
752 | if (ret) { |
753 | dev_err(dev, "Failed to request irq %d\n" , ret); |
754 | return ret; |
755 | } |
756 | |
757 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
758 | if (ret) |
759 | return dev_err_probe(dev, err: ret, fmt: "could not set DMA mask\n" ); |
760 | |
761 | host->max_speed_hz = 300000000; |
762 | host->max_dma_len = 65536; /* as per HPG */ |
763 | host->dma_alignment = QSPI_ALIGN_REQ; |
764 | host->num_chipselect = QSPI_NUM_CS; |
765 | host->bus_num = -1; |
766 | host->dev.of_node = pdev->dev.of_node; |
767 | host->mode_bits = SPI_MODE_0 | |
768 | SPI_TX_DUAL | SPI_RX_DUAL | |
769 | SPI_TX_QUAD | SPI_RX_QUAD; |
770 | host->flags = SPI_CONTROLLER_HALF_DUPLEX; |
771 | host->prepare_message = qcom_qspi_prepare_message; |
772 | host->transfer_one = qcom_qspi_transfer_one; |
773 | host->handle_err = qcom_qspi_handle_err; |
774 | if (of_property_read_bool(np: pdev->dev.of_node, propname: "iommus" )) |
775 | host->can_dma = qcom_qspi_can_dma; |
776 | host->auto_runtime_pm = true; |
777 | host->mem_ops = &qcom_qspi_mem_ops; |
778 | |
779 | ret = devm_pm_opp_set_clkname(dev: &pdev->dev, name: "core" ); |
780 | if (ret) |
781 | return ret; |
782 | /* OPP table is optional */ |
783 | ret = devm_pm_opp_of_add_table(dev: &pdev->dev); |
784 | if (ret && ret != -ENODEV) { |
785 | dev_err(&pdev->dev, "invalid OPP table in device tree\n" ); |
786 | return ret; |
787 | } |
788 | |
789 | ret = qcom_qspi_alloc_dma(ctrl); |
790 | if (ret) |
791 | return ret; |
792 | |
793 | pm_runtime_use_autosuspend(dev); |
794 | pm_runtime_set_autosuspend_delay(dev, delay: 250); |
795 | pm_runtime_enable(dev); |
796 | |
797 | ret = spi_register_controller(ctlr: host); |
798 | if (!ret) |
799 | return 0; |
800 | |
801 | pm_runtime_disable(dev); |
802 | |
803 | return ret; |
804 | } |
805 | |
806 | static void qcom_qspi_remove(struct platform_device *pdev) |
807 | { |
808 | struct spi_controller *host = platform_get_drvdata(pdev); |
809 | |
810 | /* Unregister _before_ disabling pm_runtime() so we stop transfers */ |
811 | spi_unregister_controller(ctlr: host); |
812 | |
813 | pm_runtime_disable(dev: &pdev->dev); |
814 | } |
815 | |
816 | static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev) |
817 | { |
818 | struct spi_controller *host = dev_get_drvdata(dev); |
819 | struct qcom_qspi *ctrl = spi_controller_get_devdata(ctlr: host); |
820 | int ret; |
821 | |
822 | /* Drop the performance state vote */ |
823 | dev_pm_opp_set_rate(dev, target_freq: 0); |
824 | clk_bulk_disable_unprepare(num_clks: QSPI_NUM_CLKS, clks: ctrl->clks); |
825 | |
826 | ret = icc_disable(path: ctrl->icc_path_cpu_to_qspi); |
827 | if (ret) { |
828 | dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n" , |
829 | __func__, ret); |
830 | return ret; |
831 | } |
832 | |
833 | pinctrl_pm_select_sleep_state(dev); |
834 | |
835 | return 0; |
836 | } |
837 | |
838 | static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev) |
839 | { |
840 | struct spi_controller *host = dev_get_drvdata(dev); |
841 | struct qcom_qspi *ctrl = spi_controller_get_devdata(ctlr: host); |
842 | int ret; |
843 | |
844 | pinctrl_pm_select_default_state(dev); |
845 | |
846 | ret = icc_enable(path: ctrl->icc_path_cpu_to_qspi); |
847 | if (ret) { |
848 | dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n" , |
849 | __func__, ret); |
850 | return ret; |
851 | } |
852 | |
853 | ret = clk_bulk_prepare_enable(num_clks: QSPI_NUM_CLKS, clks: ctrl->clks); |
854 | if (ret) |
855 | return ret; |
856 | |
857 | return dev_pm_opp_set_rate(dev, target_freq: ctrl->last_speed * 4); |
858 | } |
859 | |
860 | static int __maybe_unused qcom_qspi_suspend(struct device *dev) |
861 | { |
862 | struct spi_controller *host = dev_get_drvdata(dev); |
863 | int ret; |
864 | |
865 | ret = spi_controller_suspend(ctlr: host); |
866 | if (ret) |
867 | return ret; |
868 | |
869 | ret = pm_runtime_force_suspend(dev); |
870 | if (ret) |
871 | spi_controller_resume(ctlr: host); |
872 | |
873 | return ret; |
874 | } |
875 | |
876 | static int __maybe_unused qcom_qspi_resume(struct device *dev) |
877 | { |
878 | struct spi_controller *host = dev_get_drvdata(dev); |
879 | int ret; |
880 | |
881 | ret = pm_runtime_force_resume(dev); |
882 | if (ret) |
883 | return ret; |
884 | |
885 | ret = spi_controller_resume(ctlr: host); |
886 | if (ret) |
887 | pm_runtime_force_suspend(dev); |
888 | |
889 | return ret; |
890 | } |
891 | |
892 | static const struct dev_pm_ops qcom_qspi_dev_pm_ops = { |
893 | SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend, |
894 | qcom_qspi_runtime_resume, NULL) |
895 | SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume) |
896 | }; |
897 | |
898 | static const struct of_device_id qcom_qspi_dt_match[] = { |
899 | { .compatible = "qcom,qspi-v1" , }, |
900 | { } |
901 | }; |
902 | MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match); |
903 | |
904 | static struct platform_driver qcom_qspi_driver = { |
905 | .driver = { |
906 | .name = "qcom_qspi" , |
907 | .pm = &qcom_qspi_dev_pm_ops, |
908 | .of_match_table = qcom_qspi_dt_match, |
909 | }, |
910 | .probe = qcom_qspi_probe, |
911 | .remove_new = qcom_qspi_remove, |
912 | }; |
913 | module_platform_driver(qcom_qspi_driver); |
914 | |
915 | MODULE_DESCRIPTION("SPI driver for QSPI cores" ); |
916 | MODULE_LICENSE("GPL v2" ); |
917 | |