1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // |
3 | // Copyright 2013 Freescale Semiconductor, Inc. |
4 | // Copyright 2020 NXP |
5 | // |
6 | // Freescale DSPI driver |
7 | // This file contains a driver for the Freescale DSPI |
8 | |
9 | #include <linux/clk.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/of.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/pinctrl/consumer.h> |
19 | #include <linux/regmap.h> |
20 | #include <linux/spi/spi.h> |
21 | #include <linux/spi/spi-fsl-dspi.h> |
22 | |
23 | #define DRIVER_NAME "fsl-dspi" |
24 | |
25 | #define SPI_MCR 0x00 |
26 | #define SPI_MCR_HOST BIT(31) |
27 | #define SPI_MCR_PCSIS(x) ((x) << 16) |
28 | #define SPI_MCR_CLR_TXF BIT(11) |
29 | #define SPI_MCR_CLR_RXF BIT(10) |
30 | #define SPI_MCR_XSPI BIT(3) |
31 | #define SPI_MCR_DIS_TXF BIT(13) |
32 | #define SPI_MCR_DIS_RXF BIT(12) |
33 | #define SPI_MCR_HALT BIT(0) |
34 | |
35 | #define SPI_TCR 0x08 |
36 | #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16) |
37 | |
38 | #define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4)) |
39 | #define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27)) |
40 | #define SPI_CTAR_CPOL BIT(26) |
41 | #define SPI_CTAR_CPHA BIT(25) |
42 | #define SPI_CTAR_LSBFE BIT(24) |
43 | #define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22)) |
44 | #define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20)) |
45 | #define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18)) |
46 | #define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16)) |
47 | #define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12)) |
48 | #define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8)) |
49 | #define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4)) |
50 | #define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0)) |
51 | #define SPI_CTAR_SCALE_BITS 0xf |
52 | |
53 | #define SPI_CTAR0_SLAVE 0x0c |
54 | |
55 | #define SPI_SR 0x2c |
56 | #define SPI_SR_TCFQF BIT(31) |
57 | #define SPI_SR_TFUF BIT(27) |
58 | #define SPI_SR_TFFF BIT(25) |
59 | #define SPI_SR_CMDTCF BIT(23) |
60 | #define SPI_SR_SPEF BIT(21) |
61 | #define SPI_SR_RFOF BIT(19) |
62 | #define SPI_SR_TFIWF BIT(18) |
63 | #define SPI_SR_RFDF BIT(17) |
64 | #define SPI_SR_CMDFFF BIT(16) |
65 | #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ |
66 | SPI_SR_TFUF | SPI_SR_TFFF | \ |
67 | SPI_SR_CMDTCF | SPI_SR_SPEF | \ |
68 | SPI_SR_RFOF | SPI_SR_TFIWF | \ |
69 | SPI_SR_RFDF | SPI_SR_CMDFFF) |
70 | |
71 | #define SPI_RSER_TFFFE BIT(25) |
72 | #define SPI_RSER_TFFFD BIT(24) |
73 | #define SPI_RSER_RFDFE BIT(17) |
74 | #define SPI_RSER_RFDFD BIT(16) |
75 | |
76 | #define SPI_RSER 0x30 |
77 | #define SPI_RSER_TCFQE BIT(31) |
78 | #define SPI_RSER_CMDTCFE BIT(23) |
79 | |
80 | #define SPI_PUSHR 0x34 |
81 | #define SPI_PUSHR_CMD_CONT BIT(15) |
82 | #define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12))) |
83 | #define SPI_PUSHR_CMD_EOQ BIT(11) |
84 | #define SPI_PUSHR_CMD_CTCNT BIT(10) |
85 | #define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0)) |
86 | |
87 | #define SPI_PUSHR_SLAVE 0x34 |
88 | |
89 | #define SPI_POPR 0x38 |
90 | |
91 | #define SPI_TXFR0 0x3c |
92 | #define SPI_TXFR1 0x40 |
93 | #define SPI_TXFR2 0x44 |
94 | #define SPI_TXFR3 0x48 |
95 | #define SPI_RXFR0 0x7c |
96 | #define SPI_RXFR1 0x80 |
97 | #define SPI_RXFR2 0x84 |
98 | #define SPI_RXFR3 0x88 |
99 | |
100 | #define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4)) |
101 | #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16) |
102 | #define SPI_CTARE_DTCP(x) ((x) & 0x7ff) |
103 | |
104 | #define SPI_SREX 0x13c |
105 | |
106 | #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1) |
107 | #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4) |
108 | |
109 | #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) |
110 | |
111 | struct chip_data { |
112 | u32 ctar_val; |
113 | }; |
114 | |
115 | enum dspi_trans_mode { |
116 | DSPI_XSPI_MODE, |
117 | DSPI_DMA_MODE, |
118 | }; |
119 | |
120 | struct fsl_dspi_devtype_data { |
121 | enum dspi_trans_mode trans_mode; |
122 | u8 max_clock_factor; |
123 | int fifo_size; |
124 | }; |
125 | |
126 | enum { |
127 | LS1021A, |
128 | LS1012A, |
129 | LS1028A, |
130 | LS1043A, |
131 | LS1046A, |
132 | LS2080A, |
133 | LS2085A, |
134 | LX2160A, |
135 | MCF5441X, |
136 | VF610, |
137 | }; |
138 | |
139 | static const struct fsl_dspi_devtype_data devtype_data[] = { |
140 | [VF610] = { |
141 | .trans_mode = DSPI_DMA_MODE, |
142 | .max_clock_factor = 2, |
143 | .fifo_size = 4, |
144 | }, |
145 | [LS1021A] = { |
146 | /* Has A-011218 DMA erratum */ |
147 | .trans_mode = DSPI_XSPI_MODE, |
148 | .max_clock_factor = 8, |
149 | .fifo_size = 4, |
150 | }, |
151 | [LS1012A] = { |
152 | /* Has A-011218 DMA erratum */ |
153 | .trans_mode = DSPI_XSPI_MODE, |
154 | .max_clock_factor = 8, |
155 | .fifo_size = 16, |
156 | }, |
157 | [LS1028A] = { |
158 | .trans_mode = DSPI_XSPI_MODE, |
159 | .max_clock_factor = 8, |
160 | .fifo_size = 4, |
161 | }, |
162 | [LS1043A] = { |
163 | /* Has A-011218 DMA erratum */ |
164 | .trans_mode = DSPI_XSPI_MODE, |
165 | .max_clock_factor = 8, |
166 | .fifo_size = 16, |
167 | }, |
168 | [LS1046A] = { |
169 | /* Has A-011218 DMA erratum */ |
170 | .trans_mode = DSPI_XSPI_MODE, |
171 | .max_clock_factor = 8, |
172 | .fifo_size = 16, |
173 | }, |
174 | [LS2080A] = { |
175 | .trans_mode = DSPI_XSPI_MODE, |
176 | .max_clock_factor = 8, |
177 | .fifo_size = 4, |
178 | }, |
179 | [LS2085A] = { |
180 | .trans_mode = DSPI_XSPI_MODE, |
181 | .max_clock_factor = 8, |
182 | .fifo_size = 4, |
183 | }, |
184 | [LX2160A] = { |
185 | .trans_mode = DSPI_XSPI_MODE, |
186 | .max_clock_factor = 8, |
187 | .fifo_size = 4, |
188 | }, |
189 | [MCF5441X] = { |
190 | .trans_mode = DSPI_DMA_MODE, |
191 | .max_clock_factor = 8, |
192 | .fifo_size = 16, |
193 | }, |
194 | }; |
195 | |
196 | struct fsl_dspi_dma { |
197 | u32 *tx_dma_buf; |
198 | struct dma_chan *chan_tx; |
199 | dma_addr_t tx_dma_phys; |
200 | struct completion cmd_tx_complete; |
201 | struct dma_async_tx_descriptor *tx_desc; |
202 | |
203 | u32 *rx_dma_buf; |
204 | struct dma_chan *chan_rx; |
205 | dma_addr_t rx_dma_phys; |
206 | struct completion cmd_rx_complete; |
207 | struct dma_async_tx_descriptor *rx_desc; |
208 | }; |
209 | |
210 | struct fsl_dspi { |
211 | struct spi_controller *ctlr; |
212 | struct platform_device *pdev; |
213 | |
214 | struct regmap *regmap; |
215 | struct regmap *regmap_pushr; |
216 | int irq; |
217 | struct clk *clk; |
218 | |
219 | struct spi_transfer *cur_transfer; |
220 | struct spi_message *cur_msg; |
221 | struct chip_data *cur_chip; |
222 | size_t progress; |
223 | size_t len; |
224 | const void *tx; |
225 | void *rx; |
226 | u16 tx_cmd; |
227 | const struct fsl_dspi_devtype_data *devtype_data; |
228 | |
229 | struct completion xfer_done; |
230 | |
231 | struct fsl_dspi_dma *dma; |
232 | |
233 | int oper_word_size; |
234 | int oper_bits_per_word; |
235 | |
236 | int words_in_flight; |
237 | |
238 | /* |
239 | * Offsets for CMD and TXDATA within SPI_PUSHR when accessed |
240 | * individually (in XSPI mode) |
241 | */ |
242 | int pushr_cmd; |
243 | int pushr_tx; |
244 | |
245 | void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata); |
246 | void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata); |
247 | }; |
248 | |
249 | static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) |
250 | { |
251 | switch (dspi->oper_word_size) { |
252 | case 1: |
253 | *txdata = *(u8 *)dspi->tx; |
254 | break; |
255 | case 2: |
256 | *txdata = *(u16 *)dspi->tx; |
257 | break; |
258 | case 4: |
259 | *txdata = *(u32 *)dspi->tx; |
260 | break; |
261 | } |
262 | dspi->tx += dspi->oper_word_size; |
263 | } |
264 | |
265 | static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) |
266 | { |
267 | switch (dspi->oper_word_size) { |
268 | case 1: |
269 | *(u8 *)dspi->rx = rxdata; |
270 | break; |
271 | case 2: |
272 | *(u16 *)dspi->rx = rxdata; |
273 | break; |
274 | case 4: |
275 | *(u32 *)dspi->rx = rxdata; |
276 | break; |
277 | } |
278 | dspi->rx += dspi->oper_word_size; |
279 | } |
280 | |
281 | static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) |
282 | { |
283 | *txdata = cpu_to_be32(*(u32 *)dspi->tx); |
284 | dspi->tx += sizeof(u32); |
285 | } |
286 | |
287 | static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) |
288 | { |
289 | *(u32 *)dspi->rx = be32_to_cpu(rxdata); |
290 | dspi->rx += sizeof(u32); |
291 | } |
292 | |
293 | static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) |
294 | { |
295 | *txdata = cpu_to_be16(*(u16 *)dspi->tx); |
296 | dspi->tx += sizeof(u16); |
297 | } |
298 | |
299 | static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) |
300 | { |
301 | *(u16 *)dspi->rx = be16_to_cpu(rxdata); |
302 | dspi->rx += sizeof(u16); |
303 | } |
304 | |
305 | static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) |
306 | { |
307 | u16 hi = *(u16 *)dspi->tx; |
308 | u16 lo = *(u16 *)(dspi->tx + 2); |
309 | |
310 | *txdata = (u32)hi << 16 | lo; |
311 | dspi->tx += sizeof(u32); |
312 | } |
313 | |
314 | static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) |
315 | { |
316 | u16 hi = rxdata & 0xffff; |
317 | u16 lo = rxdata >> 16; |
318 | |
319 | *(u16 *)dspi->rx = lo; |
320 | *(u16 *)(dspi->rx + 2) = hi; |
321 | dspi->rx += sizeof(u32); |
322 | } |
323 | |
324 | /* |
325 | * Pop one word from the TX buffer for pushing into the |
326 | * PUSHR register (TX FIFO) |
327 | */ |
328 | static u32 dspi_pop_tx(struct fsl_dspi *dspi) |
329 | { |
330 | u32 txdata = 0; |
331 | |
332 | if (dspi->tx) |
333 | dspi->host_to_dev(dspi, &txdata); |
334 | dspi->len -= dspi->oper_word_size; |
335 | return txdata; |
336 | } |
337 | |
338 | /* Prepare one TX FIFO entry (txdata plus cmd) */ |
339 | static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) |
340 | { |
341 | u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); |
342 | |
343 | if (spi_controller_is_target(ctlr: dspi->ctlr)) |
344 | return data; |
345 | |
346 | if (dspi->len > 0) |
347 | cmd |= SPI_PUSHR_CMD_CONT; |
348 | return cmd << 16 | data; |
349 | } |
350 | |
351 | /* Push one word to the RX buffer from the POPR register (RX FIFO) */ |
352 | static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata) |
353 | { |
354 | if (!dspi->rx) |
355 | return; |
356 | dspi->dev_to_host(dspi, rxdata); |
357 | } |
358 | |
359 | static void dspi_tx_dma_callback(void *arg) |
360 | { |
361 | struct fsl_dspi *dspi = arg; |
362 | struct fsl_dspi_dma *dma = dspi->dma; |
363 | |
364 | complete(&dma->cmd_tx_complete); |
365 | } |
366 | |
367 | static void dspi_rx_dma_callback(void *arg) |
368 | { |
369 | struct fsl_dspi *dspi = arg; |
370 | struct fsl_dspi_dma *dma = dspi->dma; |
371 | int i; |
372 | |
373 | if (dspi->rx) { |
374 | for (i = 0; i < dspi->words_in_flight; i++) |
375 | dspi_push_rx(dspi, rxdata: dspi->dma->rx_dma_buf[i]); |
376 | } |
377 | |
378 | complete(&dma->cmd_rx_complete); |
379 | } |
380 | |
381 | static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) |
382 | { |
383 | struct device *dev = &dspi->pdev->dev; |
384 | struct fsl_dspi_dma *dma = dspi->dma; |
385 | int time_left; |
386 | int i; |
387 | |
388 | for (i = 0; i < dspi->words_in_flight; i++) |
389 | dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi); |
390 | |
391 | dma->tx_desc = dmaengine_prep_slave_single(chan: dma->chan_tx, |
392 | buf: dma->tx_dma_phys, |
393 | len: dspi->words_in_flight * |
394 | DMA_SLAVE_BUSWIDTH_4_BYTES, |
395 | dir: DMA_MEM_TO_DEV, |
396 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
397 | if (!dma->tx_desc) { |
398 | dev_err(dev, "Not able to get desc for DMA xfer\n" ); |
399 | return -EIO; |
400 | } |
401 | |
402 | dma->tx_desc->callback = dspi_tx_dma_callback; |
403 | dma->tx_desc->callback_param = dspi; |
404 | if (dma_submit_error(cookie: dmaengine_submit(desc: dma->tx_desc))) { |
405 | dev_err(dev, "DMA submit failed\n" ); |
406 | return -EINVAL; |
407 | } |
408 | |
409 | dma->rx_desc = dmaengine_prep_slave_single(chan: dma->chan_rx, |
410 | buf: dma->rx_dma_phys, |
411 | len: dspi->words_in_flight * |
412 | DMA_SLAVE_BUSWIDTH_4_BYTES, |
413 | dir: DMA_DEV_TO_MEM, |
414 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
415 | if (!dma->rx_desc) { |
416 | dev_err(dev, "Not able to get desc for DMA xfer\n" ); |
417 | return -EIO; |
418 | } |
419 | |
420 | dma->rx_desc->callback = dspi_rx_dma_callback; |
421 | dma->rx_desc->callback_param = dspi; |
422 | if (dma_submit_error(cookie: dmaengine_submit(desc: dma->rx_desc))) { |
423 | dev_err(dev, "DMA submit failed\n" ); |
424 | return -EINVAL; |
425 | } |
426 | |
427 | reinit_completion(x: &dspi->dma->cmd_rx_complete); |
428 | reinit_completion(x: &dspi->dma->cmd_tx_complete); |
429 | |
430 | dma_async_issue_pending(chan: dma->chan_rx); |
431 | dma_async_issue_pending(chan: dma->chan_tx); |
432 | |
433 | if (spi_controller_is_target(ctlr: dspi->ctlr)) { |
434 | wait_for_completion_interruptible(x: &dspi->dma->cmd_rx_complete); |
435 | return 0; |
436 | } |
437 | |
438 | time_left = wait_for_completion_timeout(x: &dspi->dma->cmd_tx_complete, |
439 | DMA_COMPLETION_TIMEOUT); |
440 | if (time_left == 0) { |
441 | dev_err(dev, "DMA tx timeout\n" ); |
442 | dmaengine_terminate_all(chan: dma->chan_tx); |
443 | dmaengine_terminate_all(chan: dma->chan_rx); |
444 | return -ETIMEDOUT; |
445 | } |
446 | |
447 | time_left = wait_for_completion_timeout(x: &dspi->dma->cmd_rx_complete, |
448 | DMA_COMPLETION_TIMEOUT); |
449 | if (time_left == 0) { |
450 | dev_err(dev, "DMA rx timeout\n" ); |
451 | dmaengine_terminate_all(chan: dma->chan_tx); |
452 | dmaengine_terminate_all(chan: dma->chan_rx); |
453 | return -ETIMEDOUT; |
454 | } |
455 | |
456 | return 0; |
457 | } |
458 | |
459 | static void dspi_setup_accel(struct fsl_dspi *dspi); |
460 | |
461 | static int dspi_dma_xfer(struct fsl_dspi *dspi) |
462 | { |
463 | struct spi_message *message = dspi->cur_msg; |
464 | struct device *dev = &dspi->pdev->dev; |
465 | int ret = 0; |
466 | |
467 | /* |
468 | * dspi->len gets decremented by dspi_pop_tx_pushr in |
469 | * dspi_next_xfer_dma_submit |
470 | */ |
471 | while (dspi->len) { |
472 | /* Figure out operational bits-per-word for this chunk */ |
473 | dspi_setup_accel(dspi); |
474 | |
475 | dspi->words_in_flight = dspi->len / dspi->oper_word_size; |
476 | if (dspi->words_in_flight > dspi->devtype_data->fifo_size) |
477 | dspi->words_in_flight = dspi->devtype_data->fifo_size; |
478 | |
479 | message->actual_length += dspi->words_in_flight * |
480 | dspi->oper_word_size; |
481 | |
482 | ret = dspi_next_xfer_dma_submit(dspi); |
483 | if (ret) { |
484 | dev_err(dev, "DMA transfer failed\n" ); |
485 | break; |
486 | } |
487 | } |
488 | |
489 | return ret; |
490 | } |
491 | |
492 | static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) |
493 | { |
494 | int dma_bufsize = dspi->devtype_data->fifo_size * 2; |
495 | struct device *dev = &dspi->pdev->dev; |
496 | struct dma_slave_config cfg; |
497 | struct fsl_dspi_dma *dma; |
498 | int ret; |
499 | |
500 | dma = devm_kzalloc(dev, size: sizeof(*dma), GFP_KERNEL); |
501 | if (!dma) |
502 | return -ENOMEM; |
503 | |
504 | dma->chan_rx = dma_request_chan(dev, name: "rx" ); |
505 | if (IS_ERR(ptr: dma->chan_rx)) |
506 | return dev_err_probe(dev, err: PTR_ERR(ptr: dma->chan_rx), fmt: "rx dma channel not available\n" ); |
507 | |
508 | dma->chan_tx = dma_request_chan(dev, name: "tx" ); |
509 | if (IS_ERR(ptr: dma->chan_tx)) { |
510 | ret = dev_err_probe(dev, err: PTR_ERR(ptr: dma->chan_tx), fmt: "tx dma channel not available\n" ); |
511 | goto err_tx_channel; |
512 | } |
513 | |
514 | dma->tx_dma_buf = dma_alloc_coherent(dev: dma->chan_tx->device->dev, |
515 | size: dma_bufsize, dma_handle: &dma->tx_dma_phys, |
516 | GFP_KERNEL); |
517 | if (!dma->tx_dma_buf) { |
518 | ret = -ENOMEM; |
519 | goto err_tx_dma_buf; |
520 | } |
521 | |
522 | dma->rx_dma_buf = dma_alloc_coherent(dev: dma->chan_rx->device->dev, |
523 | size: dma_bufsize, dma_handle: &dma->rx_dma_phys, |
524 | GFP_KERNEL); |
525 | if (!dma->rx_dma_buf) { |
526 | ret = -ENOMEM; |
527 | goto err_rx_dma_buf; |
528 | } |
529 | |
530 | memset(&cfg, 0, sizeof(cfg)); |
531 | cfg.src_addr = phy_addr + SPI_POPR; |
532 | cfg.dst_addr = phy_addr + SPI_PUSHR; |
533 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
534 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
535 | cfg.src_maxburst = 1; |
536 | cfg.dst_maxburst = 1; |
537 | |
538 | cfg.direction = DMA_DEV_TO_MEM; |
539 | ret = dmaengine_slave_config(chan: dma->chan_rx, config: &cfg); |
540 | if (ret) { |
541 | dev_err_probe(dev, err: ret, fmt: "can't configure rx dma channel\n" ); |
542 | goto err_slave_config; |
543 | } |
544 | |
545 | cfg.direction = DMA_MEM_TO_DEV; |
546 | ret = dmaengine_slave_config(chan: dma->chan_tx, config: &cfg); |
547 | if (ret) { |
548 | dev_err_probe(dev, err: ret, fmt: "can't configure tx dma channel\n" ); |
549 | goto err_slave_config; |
550 | } |
551 | |
552 | dspi->dma = dma; |
553 | init_completion(x: &dma->cmd_tx_complete); |
554 | init_completion(x: &dma->cmd_rx_complete); |
555 | |
556 | return 0; |
557 | |
558 | err_slave_config: |
559 | dma_free_coherent(dev: dma->chan_rx->device->dev, |
560 | size: dma_bufsize, cpu_addr: dma->rx_dma_buf, dma_handle: dma->rx_dma_phys); |
561 | err_rx_dma_buf: |
562 | dma_free_coherent(dev: dma->chan_tx->device->dev, |
563 | size: dma_bufsize, cpu_addr: dma->tx_dma_buf, dma_handle: dma->tx_dma_phys); |
564 | err_tx_dma_buf: |
565 | dma_release_channel(chan: dma->chan_tx); |
566 | err_tx_channel: |
567 | dma_release_channel(chan: dma->chan_rx); |
568 | |
569 | devm_kfree(dev, p: dma); |
570 | dspi->dma = NULL; |
571 | |
572 | return ret; |
573 | } |
574 | |
575 | static void dspi_release_dma(struct fsl_dspi *dspi) |
576 | { |
577 | int dma_bufsize = dspi->devtype_data->fifo_size * 2; |
578 | struct fsl_dspi_dma *dma = dspi->dma; |
579 | |
580 | if (!dma) |
581 | return; |
582 | |
583 | if (dma->chan_tx) { |
584 | dma_free_coherent(dev: dma->chan_tx->device->dev, size: dma_bufsize, |
585 | cpu_addr: dma->tx_dma_buf, dma_handle: dma->tx_dma_phys); |
586 | dma_release_channel(chan: dma->chan_tx); |
587 | } |
588 | |
589 | if (dma->chan_rx) { |
590 | dma_free_coherent(dev: dma->chan_rx->device->dev, size: dma_bufsize, |
591 | cpu_addr: dma->rx_dma_buf, dma_handle: dma->rx_dma_phys); |
592 | dma_release_channel(chan: dma->chan_rx); |
593 | } |
594 | } |
595 | |
596 | static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, |
597 | unsigned long clkrate) |
598 | { |
599 | /* Valid baud rate pre-scaler values */ |
600 | int pbr_tbl[4] = {2, 3, 5, 7}; |
601 | int brs[16] = { 2, 4, 6, 8, |
602 | 16, 32, 64, 128, |
603 | 256, 512, 1024, 2048, |
604 | 4096, 8192, 16384, 32768 }; |
605 | int scale_needed, scale, minscale = INT_MAX; |
606 | int i, j; |
607 | |
608 | scale_needed = clkrate / speed_hz; |
609 | if (clkrate % speed_hz) |
610 | scale_needed++; |
611 | |
612 | for (i = 0; i < ARRAY_SIZE(brs); i++) |
613 | for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) { |
614 | scale = brs[i] * pbr_tbl[j]; |
615 | if (scale >= scale_needed) { |
616 | if (scale < minscale) { |
617 | minscale = scale; |
618 | *br = i; |
619 | *pbr = j; |
620 | } |
621 | break; |
622 | } |
623 | } |
624 | |
625 | if (minscale == INT_MAX) { |
626 | pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n" , |
627 | speed_hz, clkrate); |
628 | *pbr = ARRAY_SIZE(pbr_tbl) - 1; |
629 | *br = ARRAY_SIZE(brs) - 1; |
630 | } |
631 | } |
632 | |
633 | static void ns_delay_scale(char *psc, char *sc, int delay_ns, |
634 | unsigned long clkrate) |
635 | { |
636 | int scale_needed, scale, minscale = INT_MAX; |
637 | int pscale_tbl[4] = {1, 3, 5, 7}; |
638 | u32 remainder; |
639 | int i, j; |
640 | |
641 | scale_needed = div_u64_rem(dividend: (u64)delay_ns * clkrate, NSEC_PER_SEC, |
642 | remainder: &remainder); |
643 | if (remainder) |
644 | scale_needed++; |
645 | |
646 | for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++) |
647 | for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) { |
648 | scale = pscale_tbl[i] * (2 << j); |
649 | if (scale >= scale_needed) { |
650 | if (scale < minscale) { |
651 | minscale = scale; |
652 | *psc = i; |
653 | *sc = j; |
654 | } |
655 | break; |
656 | } |
657 | } |
658 | |
659 | if (minscale == INT_MAX) { |
660 | pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value" , |
661 | delay_ns, clkrate); |
662 | *psc = ARRAY_SIZE(pscale_tbl) - 1; |
663 | *sc = SPI_CTAR_SCALE_BITS; |
664 | } |
665 | } |
666 | |
667 | static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd) |
668 | { |
669 | /* |
670 | * The only time when the PCS doesn't need continuation after this word |
671 | * is when it's last. We need to look ahead, because we actually call |
672 | * dspi_pop_tx (the function that decrements dspi->len) _after_ |
673 | * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One |
674 | * word is enough. If there's more to transmit than that, |
675 | * dspi_xspi_write will know to split the FIFO writes in 2, and |
676 | * generate a new PUSHR command with the final word that will have PCS |
677 | * deasserted (not continued) here. |
678 | */ |
679 | if (dspi->len > dspi->oper_word_size) |
680 | cmd |= SPI_PUSHR_CMD_CONT; |
681 | regmap_write(map: dspi->regmap_pushr, reg: dspi->pushr_cmd, val: cmd); |
682 | } |
683 | |
684 | static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata) |
685 | { |
686 | regmap_write(map: dspi->regmap_pushr, reg: dspi->pushr_tx, val: txdata); |
687 | } |
688 | |
689 | static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words) |
690 | { |
691 | int num_bytes = num_words * dspi->oper_word_size; |
692 | u16 tx_cmd = dspi->tx_cmd; |
693 | |
694 | /* |
695 | * If the PCS needs to de-assert (i.e. we're at the end of the buffer |
696 | * and cs_change does not want the PCS to stay on), then we need a new |
697 | * PUSHR command, since this one (for the body of the buffer) |
698 | * necessarily has the CONT bit set. |
699 | * So send one word less during this go, to force a split and a command |
700 | * with a single word next time, when CONT will be unset. |
701 | */ |
702 | if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len) |
703 | tx_cmd |= SPI_PUSHR_CMD_EOQ; |
704 | |
705 | /* Update CTARE */ |
706 | regmap_write(map: dspi->regmap, SPI_CTARE(0), |
707 | SPI_FRAME_EBITS(dspi->oper_bits_per_word) | |
708 | SPI_CTARE_DTCP(num_words)); |
709 | |
710 | /* |
711 | * Write the CMD FIFO entry first, and then the two |
712 | * corresponding TX FIFO entries (or one...). |
713 | */ |
714 | dspi_pushr_cmd_write(dspi, cmd: tx_cmd); |
715 | |
716 | /* Fill TX FIFO with as many transfers as possible */ |
717 | while (num_words--) { |
718 | u32 data = dspi_pop_tx(dspi); |
719 | |
720 | dspi_pushr_txdata_write(dspi, txdata: data & 0xFFFF); |
721 | if (dspi->oper_bits_per_word > 16) |
722 | dspi_pushr_txdata_write(dspi, txdata: data >> 16); |
723 | } |
724 | } |
725 | |
726 | static u32 dspi_popr_read(struct fsl_dspi *dspi) |
727 | { |
728 | u32 rxdata = 0; |
729 | |
730 | regmap_read(map: dspi->regmap, SPI_POPR, val: &rxdata); |
731 | return rxdata; |
732 | } |
733 | |
734 | static void dspi_fifo_read(struct fsl_dspi *dspi) |
735 | { |
736 | int num_fifo_entries = dspi->words_in_flight; |
737 | |
738 | /* Read one FIFO entry and push to rx buffer */ |
739 | while (num_fifo_entries--) |
740 | dspi_push_rx(dspi, rxdata: dspi_popr_read(dspi)); |
741 | } |
742 | |
743 | static void dspi_setup_accel(struct fsl_dspi *dspi) |
744 | { |
745 | struct spi_transfer *xfer = dspi->cur_transfer; |
746 | bool odd = !!(dspi->len & 1); |
747 | |
748 | /* No accel for frames not multiple of 8 bits at the moment */ |
749 | if (xfer->bits_per_word % 8) |
750 | goto no_accel; |
751 | |
752 | if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) { |
753 | dspi->oper_bits_per_word = 16; |
754 | } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) { |
755 | dspi->oper_bits_per_word = 8; |
756 | } else { |
757 | /* Start off with maximum supported by hardware */ |
758 | if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) |
759 | dspi->oper_bits_per_word = 32; |
760 | else |
761 | dspi->oper_bits_per_word = 16; |
762 | |
763 | /* |
764 | * And go down only if the buffer can't be sent with |
765 | * words this big |
766 | */ |
767 | do { |
768 | if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8)) |
769 | break; |
770 | |
771 | dspi->oper_bits_per_word /= 2; |
772 | } while (dspi->oper_bits_per_word > 8); |
773 | } |
774 | |
775 | if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) { |
776 | dspi->dev_to_host = dspi_8on32_dev_to_host; |
777 | dspi->host_to_dev = dspi_8on32_host_to_dev; |
778 | } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) { |
779 | dspi->dev_to_host = dspi_8on16_dev_to_host; |
780 | dspi->host_to_dev = dspi_8on16_host_to_dev; |
781 | } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) { |
782 | dspi->dev_to_host = dspi_16on32_dev_to_host; |
783 | dspi->host_to_dev = dspi_16on32_host_to_dev; |
784 | } else { |
785 | no_accel: |
786 | dspi->dev_to_host = dspi_native_dev_to_host; |
787 | dspi->host_to_dev = dspi_native_host_to_dev; |
788 | dspi->oper_bits_per_word = xfer->bits_per_word; |
789 | } |
790 | |
791 | dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8); |
792 | |
793 | /* |
794 | * Update CTAR here (code is common for XSPI and DMA modes). |
795 | * We will update CTARE in the portion specific to XSPI, when we |
796 | * also know the preload value (DTCP). |
797 | */ |
798 | regmap_write(map: dspi->regmap, SPI_CTAR(0), |
799 | val: dspi->cur_chip->ctar_val | |
800 | SPI_FRAME_BITS(dspi->oper_bits_per_word)); |
801 | } |
802 | |
803 | static void dspi_fifo_write(struct fsl_dspi *dspi) |
804 | { |
805 | int num_fifo_entries = dspi->devtype_data->fifo_size; |
806 | struct spi_transfer *xfer = dspi->cur_transfer; |
807 | struct spi_message *msg = dspi->cur_msg; |
808 | int num_words, num_bytes; |
809 | |
810 | dspi_setup_accel(dspi); |
811 | |
812 | /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */ |
813 | if (dspi->oper_word_size == 4) |
814 | num_fifo_entries /= 2; |
815 | |
816 | /* |
817 | * Integer division intentionally trims off odd (or non-multiple of 4) |
818 | * numbers of bytes at the end of the buffer, which will be sent next |
819 | * time using a smaller oper_word_size. |
820 | */ |
821 | num_words = dspi->len / dspi->oper_word_size; |
822 | if (num_words > num_fifo_entries) |
823 | num_words = num_fifo_entries; |
824 | |
825 | /* Update total number of bytes that were transferred */ |
826 | num_bytes = num_words * dspi->oper_word_size; |
827 | msg->actual_length += num_bytes; |
828 | dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8); |
829 | |
830 | /* |
831 | * Update shared variable for use in the next interrupt (both in |
832 | * dspi_fifo_read and in dspi_fifo_write). |
833 | */ |
834 | dspi->words_in_flight = num_words; |
835 | |
836 | spi_take_timestamp_pre(ctlr: dspi->ctlr, xfer, progress: dspi->progress, irqs_off: !dspi->irq); |
837 | |
838 | dspi_xspi_fifo_write(dspi, num_words); |
839 | /* |
840 | * Everything after this point is in a potential race with the next |
841 | * interrupt, so we must never use dspi->words_in_flight again since it |
842 | * might already be modified by the next dspi_fifo_write. |
843 | */ |
844 | |
845 | spi_take_timestamp_post(ctlr: dspi->ctlr, xfer: dspi->cur_transfer, |
846 | progress: dspi->progress, irqs_off: !dspi->irq); |
847 | } |
848 | |
849 | static int dspi_rxtx(struct fsl_dspi *dspi) |
850 | { |
851 | dspi_fifo_read(dspi); |
852 | |
853 | if (!dspi->len) |
854 | /* Success! */ |
855 | return 0; |
856 | |
857 | dspi_fifo_write(dspi); |
858 | |
859 | return -EINPROGRESS; |
860 | } |
861 | |
862 | static int dspi_poll(struct fsl_dspi *dspi) |
863 | { |
864 | int tries = 1000; |
865 | u32 spi_sr; |
866 | |
867 | do { |
868 | regmap_read(map: dspi->regmap, SPI_SR, val: &spi_sr); |
869 | regmap_write(map: dspi->regmap, SPI_SR, val: spi_sr); |
870 | |
871 | if (spi_sr & SPI_SR_CMDTCF) |
872 | break; |
873 | } while (--tries); |
874 | |
875 | if (!tries) |
876 | return -ETIMEDOUT; |
877 | |
878 | return dspi_rxtx(dspi); |
879 | } |
880 | |
881 | static irqreturn_t dspi_interrupt(int irq, void *dev_id) |
882 | { |
883 | struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; |
884 | u32 spi_sr; |
885 | |
886 | regmap_read(map: dspi->regmap, SPI_SR, val: &spi_sr); |
887 | regmap_write(map: dspi->regmap, SPI_SR, val: spi_sr); |
888 | |
889 | if (!(spi_sr & SPI_SR_CMDTCF)) |
890 | return IRQ_NONE; |
891 | |
892 | if (dspi_rxtx(dspi) == 0) |
893 | complete(&dspi->xfer_done); |
894 | |
895 | return IRQ_HANDLED; |
896 | } |
897 | |
898 | static void dspi_assert_cs(struct spi_device *spi, bool *cs) |
899 | { |
900 | if (!spi_get_csgpiod(spi, idx: 0) || *cs) |
901 | return; |
902 | |
903 | gpiod_set_value_cansleep(desc: spi_get_csgpiod(spi, idx: 0), value: true); |
904 | *cs = true; |
905 | } |
906 | |
907 | static void dspi_deassert_cs(struct spi_device *spi, bool *cs) |
908 | { |
909 | if (!spi_get_csgpiod(spi, idx: 0) || !*cs) |
910 | return; |
911 | |
912 | gpiod_set_value_cansleep(desc: spi_get_csgpiod(spi, idx: 0), value: false); |
913 | *cs = false; |
914 | } |
915 | |
916 | static int dspi_transfer_one_message(struct spi_controller *ctlr, |
917 | struct spi_message *message) |
918 | { |
919 | struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); |
920 | struct spi_device *spi = message->spi; |
921 | struct spi_transfer *transfer; |
922 | bool cs = false; |
923 | int status = 0; |
924 | |
925 | message->actual_length = 0; |
926 | |
927 | list_for_each_entry(transfer, &message->transfers, transfer_list) { |
928 | dspi->cur_transfer = transfer; |
929 | dspi->cur_msg = message; |
930 | dspi->cur_chip = spi_get_ctldata(spi); |
931 | |
932 | dspi_assert_cs(spi, cs: &cs); |
933 | |
934 | /* Prepare command word for CMD FIFO */ |
935 | dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0); |
936 | if (!spi_get_csgpiod(spi, idx: 0)) |
937 | dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0)); |
938 | |
939 | if (list_is_last(list: &dspi->cur_transfer->transfer_list, |
940 | head: &dspi->cur_msg->transfers)) { |
941 | /* Leave PCS activated after last transfer when |
942 | * cs_change is set. |
943 | */ |
944 | if (transfer->cs_change) |
945 | dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; |
946 | } else { |
947 | /* Keep PCS active between transfers in same message |
948 | * when cs_change is not set, and de-activate PCS |
949 | * between transfers in the same message when |
950 | * cs_change is set. |
951 | */ |
952 | if (!transfer->cs_change) |
953 | dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; |
954 | } |
955 | |
956 | dspi->tx = transfer->tx_buf; |
957 | dspi->rx = transfer->rx_buf; |
958 | dspi->len = transfer->len; |
959 | dspi->progress = 0; |
960 | |
961 | regmap_update_bits(map: dspi->regmap, SPI_MCR, |
962 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, |
963 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); |
964 | |
965 | spi_take_timestamp_pre(ctlr: dspi->ctlr, xfer: dspi->cur_transfer, |
966 | progress: dspi->progress, irqs_off: !dspi->irq); |
967 | |
968 | if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { |
969 | status = dspi_dma_xfer(dspi); |
970 | } else { |
971 | dspi_fifo_write(dspi); |
972 | |
973 | if (dspi->irq) { |
974 | wait_for_completion(&dspi->xfer_done); |
975 | reinit_completion(x: &dspi->xfer_done); |
976 | } else { |
977 | do { |
978 | status = dspi_poll(dspi); |
979 | } while (status == -EINPROGRESS); |
980 | } |
981 | } |
982 | if (status) |
983 | break; |
984 | |
985 | spi_transfer_delay_exec(t: transfer); |
986 | |
987 | if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT)) |
988 | dspi_deassert_cs(spi, cs: &cs); |
989 | } |
990 | |
991 | message->status = status; |
992 | spi_finalize_current_message(ctlr); |
993 | |
994 | return status; |
995 | } |
996 | |
997 | static int dspi_setup(struct spi_device *spi) |
998 | { |
999 | struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr: spi->controller); |
1000 | u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz); |
1001 | unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; |
1002 | u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4); |
1003 | u32 cs_sck_delay = 0, sck_cs_delay = 0; |
1004 | struct fsl_dspi_platform_data *pdata; |
1005 | unsigned char pasc = 0, asc = 0; |
1006 | struct chip_data *chip; |
1007 | unsigned long clkrate; |
1008 | bool cs = true; |
1009 | |
1010 | /* Only alloc on first setup */ |
1011 | chip = spi_get_ctldata(spi); |
1012 | if (chip == NULL) { |
1013 | chip = kzalloc(size: sizeof(struct chip_data), GFP_KERNEL); |
1014 | if (!chip) |
1015 | return -ENOMEM; |
1016 | } |
1017 | |
1018 | pdata = dev_get_platdata(dev: &dspi->pdev->dev); |
1019 | |
1020 | if (!pdata) { |
1021 | of_property_read_u32(np: spi->dev.of_node, propname: "fsl,spi-cs-sck-delay" , |
1022 | out_value: &cs_sck_delay); |
1023 | |
1024 | of_property_read_u32(np: spi->dev.of_node, propname: "fsl,spi-sck-cs-delay" , |
1025 | out_value: &sck_cs_delay); |
1026 | } else { |
1027 | cs_sck_delay = pdata->cs_sck_delay; |
1028 | sck_cs_delay = pdata->sck_cs_delay; |
1029 | } |
1030 | |
1031 | /* Since tCSC and tASC apply to continuous transfers too, avoid SCK |
1032 | * glitches of half a cycle by never allowing tCSC + tASC to go below |
1033 | * half a SCK period. |
1034 | */ |
1035 | if (cs_sck_delay < quarter_period_ns) |
1036 | cs_sck_delay = quarter_period_ns; |
1037 | if (sck_cs_delay < quarter_period_ns) |
1038 | sck_cs_delay = quarter_period_ns; |
1039 | |
1040 | dev_dbg(&spi->dev, |
1041 | "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n" , |
1042 | cs_sck_delay, sck_cs_delay); |
1043 | |
1044 | clkrate = clk_get_rate(clk: dspi->clk); |
1045 | hz_to_spi_baud(pbr: &pbr, br: &br, speed_hz: spi->max_speed_hz, clkrate); |
1046 | |
1047 | /* Set PCS to SCK delay scale values */ |
1048 | ns_delay_scale(psc: &pcssck, sc: &cssck, delay_ns: cs_sck_delay, clkrate); |
1049 | |
1050 | /* Set After SCK delay scale values */ |
1051 | ns_delay_scale(psc: &pasc, sc: &asc, delay_ns: sck_cs_delay, clkrate); |
1052 | |
1053 | chip->ctar_val = 0; |
1054 | if (spi->mode & SPI_CPOL) |
1055 | chip->ctar_val |= SPI_CTAR_CPOL; |
1056 | if (spi->mode & SPI_CPHA) |
1057 | chip->ctar_val |= SPI_CTAR_CPHA; |
1058 | |
1059 | if (!spi_controller_is_target(ctlr: dspi->ctlr)) { |
1060 | chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) | |
1061 | SPI_CTAR_CSSCK(cssck) | |
1062 | SPI_CTAR_PASC(pasc) | |
1063 | SPI_CTAR_ASC(asc) | |
1064 | SPI_CTAR_PBR(pbr) | |
1065 | SPI_CTAR_BR(br); |
1066 | |
1067 | if (spi->mode & SPI_LSB_FIRST) |
1068 | chip->ctar_val |= SPI_CTAR_LSBFE; |
1069 | } |
1070 | |
1071 | gpiod_direction_output(desc: spi_get_csgpiod(spi, idx: 0), value: false); |
1072 | dspi_deassert_cs(spi, cs: &cs); |
1073 | |
1074 | spi_set_ctldata(spi, state: chip); |
1075 | |
1076 | return 0; |
1077 | } |
1078 | |
1079 | static void dspi_cleanup(struct spi_device *spi) |
1080 | { |
1081 | struct chip_data *chip = spi_get_ctldata(spi); |
1082 | |
1083 | dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n" , |
1084 | spi->controller->bus_num, spi_get_chipselect(spi, 0)); |
1085 | |
1086 | kfree(objp: chip); |
1087 | } |
1088 | |
1089 | static const struct of_device_id fsl_dspi_dt_ids[] = { |
1090 | { |
1091 | .compatible = "fsl,vf610-dspi" , |
1092 | .data = &devtype_data[VF610], |
1093 | }, { |
1094 | .compatible = "fsl,ls1021a-v1.0-dspi" , |
1095 | .data = &devtype_data[LS1021A], |
1096 | }, { |
1097 | .compatible = "fsl,ls1012a-dspi" , |
1098 | .data = &devtype_data[LS1012A], |
1099 | }, { |
1100 | .compatible = "fsl,ls1028a-dspi" , |
1101 | .data = &devtype_data[LS1028A], |
1102 | }, { |
1103 | .compatible = "fsl,ls1043a-dspi" , |
1104 | .data = &devtype_data[LS1043A], |
1105 | }, { |
1106 | .compatible = "fsl,ls1046a-dspi" , |
1107 | .data = &devtype_data[LS1046A], |
1108 | }, { |
1109 | .compatible = "fsl,ls2080a-dspi" , |
1110 | .data = &devtype_data[LS2080A], |
1111 | }, { |
1112 | .compatible = "fsl,ls2085a-dspi" , |
1113 | .data = &devtype_data[LS2085A], |
1114 | }, { |
1115 | .compatible = "fsl,lx2160a-dspi" , |
1116 | .data = &devtype_data[LX2160A], |
1117 | }, |
1118 | { /* sentinel */ } |
1119 | }; |
1120 | MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); |
1121 | |
1122 | #ifdef CONFIG_PM_SLEEP |
1123 | static int dspi_suspend(struct device *dev) |
1124 | { |
1125 | struct fsl_dspi *dspi = dev_get_drvdata(dev); |
1126 | |
1127 | if (dspi->irq) |
1128 | disable_irq(irq: dspi->irq); |
1129 | spi_controller_suspend(ctlr: dspi->ctlr); |
1130 | clk_disable_unprepare(clk: dspi->clk); |
1131 | |
1132 | pinctrl_pm_select_sleep_state(dev); |
1133 | |
1134 | return 0; |
1135 | } |
1136 | |
1137 | static int dspi_resume(struct device *dev) |
1138 | { |
1139 | struct fsl_dspi *dspi = dev_get_drvdata(dev); |
1140 | int ret; |
1141 | |
1142 | pinctrl_pm_select_default_state(dev); |
1143 | |
1144 | ret = clk_prepare_enable(clk: dspi->clk); |
1145 | if (ret) |
1146 | return ret; |
1147 | spi_controller_resume(ctlr: dspi->ctlr); |
1148 | if (dspi->irq) |
1149 | enable_irq(irq: dspi->irq); |
1150 | |
1151 | return 0; |
1152 | } |
1153 | #endif /* CONFIG_PM_SLEEP */ |
1154 | |
1155 | static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); |
1156 | |
1157 | static const struct regmap_range dspi_volatile_ranges[] = { |
1158 | regmap_reg_range(SPI_MCR, SPI_TCR), |
1159 | regmap_reg_range(SPI_SR, SPI_SR), |
1160 | regmap_reg_range(SPI_PUSHR, SPI_RXFR3), |
1161 | }; |
1162 | |
1163 | static const struct regmap_access_table dspi_volatile_table = { |
1164 | .yes_ranges = dspi_volatile_ranges, |
1165 | .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges), |
1166 | }; |
1167 | |
1168 | static const struct regmap_config dspi_regmap_config = { |
1169 | .reg_bits = 32, |
1170 | .val_bits = 32, |
1171 | .reg_stride = 4, |
1172 | .max_register = 0x88, |
1173 | .volatile_table = &dspi_volatile_table, |
1174 | }; |
1175 | |
1176 | static const struct regmap_range dspi_xspi_volatile_ranges[] = { |
1177 | regmap_reg_range(SPI_MCR, SPI_TCR), |
1178 | regmap_reg_range(SPI_SR, SPI_SR), |
1179 | regmap_reg_range(SPI_PUSHR, SPI_RXFR3), |
1180 | regmap_reg_range(SPI_SREX, SPI_SREX), |
1181 | }; |
1182 | |
1183 | static const struct regmap_access_table dspi_xspi_volatile_table = { |
1184 | .yes_ranges = dspi_xspi_volatile_ranges, |
1185 | .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges), |
1186 | }; |
1187 | |
1188 | static const struct regmap_config dspi_xspi_regmap_config[] = { |
1189 | { |
1190 | .reg_bits = 32, |
1191 | .val_bits = 32, |
1192 | .reg_stride = 4, |
1193 | .max_register = 0x13c, |
1194 | .volatile_table = &dspi_xspi_volatile_table, |
1195 | }, |
1196 | { |
1197 | .name = "pushr" , |
1198 | .reg_bits = 16, |
1199 | .val_bits = 16, |
1200 | .reg_stride = 2, |
1201 | .max_register = 0x2, |
1202 | }, |
1203 | }; |
1204 | |
1205 | static int dspi_init(struct fsl_dspi *dspi) |
1206 | { |
1207 | unsigned int mcr; |
1208 | |
1209 | /* Set idle states for all chip select signals to high */ |
1210 | mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0)); |
1211 | |
1212 | if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) |
1213 | mcr |= SPI_MCR_XSPI; |
1214 | if (!spi_controller_is_target(ctlr: dspi->ctlr)) |
1215 | mcr |= SPI_MCR_HOST; |
1216 | |
1217 | regmap_write(map: dspi->regmap, SPI_MCR, val: mcr); |
1218 | regmap_write(map: dspi->regmap, SPI_SR, SPI_SR_CLEAR); |
1219 | |
1220 | switch (dspi->devtype_data->trans_mode) { |
1221 | case DSPI_XSPI_MODE: |
1222 | regmap_write(map: dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE); |
1223 | break; |
1224 | case DSPI_DMA_MODE: |
1225 | regmap_write(map: dspi->regmap, SPI_RSER, |
1226 | SPI_RSER_TFFFE | SPI_RSER_TFFFD | |
1227 | SPI_RSER_RFDFE | SPI_RSER_RFDFD); |
1228 | break; |
1229 | default: |
1230 | dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n" , |
1231 | dspi->devtype_data->trans_mode); |
1232 | return -EINVAL; |
1233 | } |
1234 | |
1235 | return 0; |
1236 | } |
1237 | |
1238 | static int dspi_target_abort(struct spi_controller *host) |
1239 | { |
1240 | struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr: host); |
1241 | |
1242 | /* |
1243 | * Terminate all pending DMA transactions for the SPI working |
1244 | * in TARGET mode. |
1245 | */ |
1246 | if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { |
1247 | dmaengine_terminate_sync(chan: dspi->dma->chan_rx); |
1248 | dmaengine_terminate_sync(chan: dspi->dma->chan_tx); |
1249 | } |
1250 | |
1251 | /* Clear the internal DSPI RX and TX FIFO buffers */ |
1252 | regmap_update_bits(map: dspi->regmap, SPI_MCR, |
1253 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, |
1254 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); |
1255 | |
1256 | return 0; |
1257 | } |
1258 | |
1259 | static int dspi_probe(struct platform_device *pdev) |
1260 | { |
1261 | struct device_node *np = pdev->dev.of_node; |
1262 | const struct regmap_config *regmap_config; |
1263 | struct fsl_dspi_platform_data *pdata; |
1264 | struct spi_controller *ctlr; |
1265 | int ret, cs_num, bus_num = -1; |
1266 | struct fsl_dspi *dspi; |
1267 | struct resource *res; |
1268 | void __iomem *base; |
1269 | bool big_endian; |
1270 | |
1271 | dspi = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dspi), GFP_KERNEL); |
1272 | if (!dspi) |
1273 | return -ENOMEM; |
1274 | |
1275 | ctlr = spi_alloc_host(dev: &pdev->dev, size: 0); |
1276 | if (!ctlr) |
1277 | return -ENOMEM; |
1278 | |
1279 | spi_controller_set_devdata(ctlr, data: dspi); |
1280 | platform_set_drvdata(pdev, data: dspi); |
1281 | |
1282 | dspi->pdev = pdev; |
1283 | dspi->ctlr = ctlr; |
1284 | |
1285 | ctlr->setup = dspi_setup; |
1286 | ctlr->transfer_one_message = dspi_transfer_one_message; |
1287 | ctlr->dev.of_node = pdev->dev.of_node; |
1288 | |
1289 | ctlr->cleanup = dspi_cleanup; |
1290 | ctlr->target_abort = dspi_target_abort; |
1291 | ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; |
1292 | ctlr->use_gpio_descriptors = true; |
1293 | |
1294 | pdata = dev_get_platdata(dev: &pdev->dev); |
1295 | if (pdata) { |
1296 | ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num; |
1297 | ctlr->bus_num = pdata->bus_num; |
1298 | |
1299 | /* Only Coldfire uses platform data */ |
1300 | dspi->devtype_data = &devtype_data[MCF5441X]; |
1301 | big_endian = true; |
1302 | } else { |
1303 | |
1304 | ret = of_property_read_u32(np, propname: "spi-num-chipselects" , out_value: &cs_num); |
1305 | if (ret < 0) { |
1306 | dev_err(&pdev->dev, "can't get spi-num-chipselects\n" ); |
1307 | goto out_ctlr_put; |
1308 | } |
1309 | ctlr->num_chipselect = ctlr->max_native_cs = cs_num; |
1310 | |
1311 | of_property_read_u32(np, propname: "bus-num" , out_value: &bus_num); |
1312 | ctlr->bus_num = bus_num; |
1313 | |
1314 | if (of_property_read_bool(np, propname: "spi-slave" )) |
1315 | ctlr->target = true; |
1316 | |
1317 | dspi->devtype_data = of_device_get_match_data(dev: &pdev->dev); |
1318 | if (!dspi->devtype_data) { |
1319 | dev_err(&pdev->dev, "can't get devtype_data\n" ); |
1320 | ret = -EFAULT; |
1321 | goto out_ctlr_put; |
1322 | } |
1323 | |
1324 | big_endian = of_device_is_big_endian(device: np); |
1325 | } |
1326 | if (big_endian) { |
1327 | dspi->pushr_cmd = 0; |
1328 | dspi->pushr_tx = 2; |
1329 | } else { |
1330 | dspi->pushr_cmd = 2; |
1331 | dspi->pushr_tx = 0; |
1332 | } |
1333 | |
1334 | if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) |
1335 | ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); |
1336 | else |
1337 | ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); |
1338 | |
1339 | base = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &res); |
1340 | if (IS_ERR(ptr: base)) { |
1341 | ret = PTR_ERR(ptr: base); |
1342 | goto out_ctlr_put; |
1343 | } |
1344 | |
1345 | if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) |
1346 | regmap_config = &dspi_xspi_regmap_config[0]; |
1347 | else |
1348 | regmap_config = &dspi_regmap_config; |
1349 | dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config); |
1350 | if (IS_ERR(ptr: dspi->regmap)) { |
1351 | dev_err(&pdev->dev, "failed to init regmap: %ld\n" , |
1352 | PTR_ERR(dspi->regmap)); |
1353 | ret = PTR_ERR(ptr: dspi->regmap); |
1354 | goto out_ctlr_put; |
1355 | } |
1356 | |
1357 | if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) { |
1358 | dspi->regmap_pushr = devm_regmap_init_mmio( |
1359 | &pdev->dev, base + SPI_PUSHR, |
1360 | &dspi_xspi_regmap_config[1]); |
1361 | if (IS_ERR(ptr: dspi->regmap_pushr)) { |
1362 | dev_err(&pdev->dev, |
1363 | "failed to init pushr regmap: %ld\n" , |
1364 | PTR_ERR(dspi->regmap_pushr)); |
1365 | ret = PTR_ERR(ptr: dspi->regmap_pushr); |
1366 | goto out_ctlr_put; |
1367 | } |
1368 | } |
1369 | |
1370 | dspi->clk = devm_clk_get_enabled(dev: &pdev->dev, id: "dspi" ); |
1371 | if (IS_ERR(ptr: dspi->clk)) { |
1372 | ret = PTR_ERR(ptr: dspi->clk); |
1373 | dev_err(&pdev->dev, "unable to get clock\n" ); |
1374 | goto out_ctlr_put; |
1375 | } |
1376 | |
1377 | ret = dspi_init(dspi); |
1378 | if (ret) |
1379 | goto out_ctlr_put; |
1380 | |
1381 | dspi->irq = platform_get_irq(pdev, 0); |
1382 | if (dspi->irq <= 0) { |
1383 | dev_info(&pdev->dev, |
1384 | "can't get platform irq, using poll mode\n" ); |
1385 | dspi->irq = 0; |
1386 | goto poll_mode; |
1387 | } |
1388 | |
1389 | init_completion(x: &dspi->xfer_done); |
1390 | |
1391 | ret = request_threaded_irq(irq: dspi->irq, handler: dspi_interrupt, NULL, |
1392 | IRQF_SHARED, name: pdev->name, dev: dspi); |
1393 | if (ret < 0) { |
1394 | dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n" ); |
1395 | goto out_ctlr_put; |
1396 | } |
1397 | |
1398 | poll_mode: |
1399 | |
1400 | if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { |
1401 | ret = dspi_request_dma(dspi, phy_addr: res->start); |
1402 | if (ret < 0) { |
1403 | dev_err(&pdev->dev, "can't get dma channels\n" ); |
1404 | goto out_free_irq; |
1405 | } |
1406 | } |
1407 | |
1408 | ctlr->max_speed_hz = |
1409 | clk_get_rate(clk: dspi->clk) / dspi->devtype_data->max_clock_factor; |
1410 | |
1411 | if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE) |
1412 | ctlr->ptp_sts_supported = true; |
1413 | |
1414 | ret = spi_register_controller(ctlr); |
1415 | if (ret != 0) { |
1416 | dev_err(&pdev->dev, "Problem registering DSPI ctlr\n" ); |
1417 | goto out_release_dma; |
1418 | } |
1419 | |
1420 | return ret; |
1421 | |
1422 | out_release_dma: |
1423 | dspi_release_dma(dspi); |
1424 | out_free_irq: |
1425 | if (dspi->irq) |
1426 | free_irq(dspi->irq, dspi); |
1427 | out_ctlr_put: |
1428 | spi_controller_put(ctlr); |
1429 | |
1430 | return ret; |
1431 | } |
1432 | |
1433 | static void dspi_remove(struct platform_device *pdev) |
1434 | { |
1435 | struct fsl_dspi *dspi = platform_get_drvdata(pdev); |
1436 | |
1437 | /* Disconnect from the SPI framework */ |
1438 | spi_unregister_controller(ctlr: dspi->ctlr); |
1439 | |
1440 | /* Disable RX and TX */ |
1441 | regmap_update_bits(map: dspi->regmap, SPI_MCR, |
1442 | SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, |
1443 | SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF); |
1444 | |
1445 | /* Stop Running */ |
1446 | regmap_update_bits(map: dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); |
1447 | |
1448 | dspi_release_dma(dspi); |
1449 | if (dspi->irq) |
1450 | free_irq(dspi->irq, dspi); |
1451 | } |
1452 | |
1453 | static void dspi_shutdown(struct platform_device *pdev) |
1454 | { |
1455 | dspi_remove(pdev); |
1456 | } |
1457 | |
1458 | static struct platform_driver fsl_dspi_driver = { |
1459 | .driver.name = DRIVER_NAME, |
1460 | .driver.of_match_table = fsl_dspi_dt_ids, |
1461 | .driver.owner = THIS_MODULE, |
1462 | .driver.pm = &dspi_pm, |
1463 | .probe = dspi_probe, |
1464 | .remove_new = dspi_remove, |
1465 | .shutdown = dspi_shutdown, |
1466 | }; |
1467 | module_platform_driver(fsl_dspi_driver); |
1468 | |
1469 | MODULE_DESCRIPTION("Freescale DSPI Controller Driver" ); |
1470 | MODULE_LICENSE("GPL" ); |
1471 | MODULE_ALIAS("platform:" DRIVER_NAME); |
1472 | |