1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2015 MediaTek Inc. |
4 | * Author: Leilk Liu <leilk.liu@mediatek.com> |
5 | */ |
6 | |
7 | #include <linux/clk.h> |
8 | #include <linux/device.h> |
9 | #include <linux/err.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/io.h> |
12 | #include <linux/ioport.h> |
13 | #include <linux/module.h> |
14 | #include <linux/of.h> |
15 | #include <linux/gpio/consumer.h> |
16 | #include <linux/pinctrl/consumer.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/platform_data/spi-mt65xx.h> |
19 | #include <linux/pm_runtime.h> |
20 | #include <linux/spi/spi.h> |
21 | #include <linux/spi/spi-mem.h> |
22 | #include <linux/dma-mapping.h> |
23 | |
24 | #define SPI_CFG0_REG 0x0000 |
25 | #define SPI_CFG1_REG 0x0004 |
26 | #define SPI_TX_SRC_REG 0x0008 |
27 | #define SPI_RX_DST_REG 0x000c |
28 | #define SPI_TX_DATA_REG 0x0010 |
29 | #define SPI_RX_DATA_REG 0x0014 |
30 | #define SPI_CMD_REG 0x0018 |
31 | #define SPI_STATUS0_REG 0x001c |
32 | #define SPI_PAD_SEL_REG 0x0024 |
33 | #define SPI_CFG2_REG 0x0028 |
34 | #define SPI_TX_SRC_REG_64 0x002c |
35 | #define SPI_RX_DST_REG_64 0x0030 |
36 | #define SPI_CFG3_IPM_REG 0x0040 |
37 | |
38 | #define SPI_CFG0_SCK_HIGH_OFFSET 0 |
39 | #define SPI_CFG0_SCK_LOW_OFFSET 8 |
40 | #define SPI_CFG0_CS_HOLD_OFFSET 16 |
41 | #define SPI_CFG0_CS_SETUP_OFFSET 24 |
42 | #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 |
43 | #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 |
44 | |
45 | #define SPI_CFG1_CS_IDLE_OFFSET 0 |
46 | #define SPI_CFG1_PACKET_LOOP_OFFSET 8 |
47 | #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 |
48 | #define SPI_CFG1_GET_TICK_DLY_OFFSET 29 |
49 | #define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30 |
50 | |
51 | #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000 |
52 | #define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000 |
53 | |
54 | #define SPI_CFG1_CS_IDLE_MASK 0xff |
55 | #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 |
56 | #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 |
57 | #define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16) |
58 | #define SPI_CFG2_SCK_HIGH_OFFSET 0 |
59 | #define SPI_CFG2_SCK_LOW_OFFSET 16 |
60 | |
61 | #define SPI_CMD_ACT BIT(0) |
62 | #define SPI_CMD_RESUME BIT(1) |
63 | #define SPI_CMD_RST BIT(2) |
64 | #define SPI_CMD_PAUSE_EN BIT(4) |
65 | #define SPI_CMD_DEASSERT BIT(5) |
66 | #define SPI_CMD_SAMPLE_SEL BIT(6) |
67 | #define SPI_CMD_CS_POL BIT(7) |
68 | #define SPI_CMD_CPHA BIT(8) |
69 | #define SPI_CMD_CPOL BIT(9) |
70 | #define SPI_CMD_RX_DMA BIT(10) |
71 | #define SPI_CMD_TX_DMA BIT(11) |
72 | #define SPI_CMD_TXMSBF BIT(12) |
73 | #define SPI_CMD_RXMSBF BIT(13) |
74 | #define SPI_CMD_RX_ENDIAN BIT(14) |
75 | #define SPI_CMD_TX_ENDIAN BIT(15) |
76 | #define SPI_CMD_FINISH_IE BIT(16) |
77 | #define SPI_CMD_PAUSE_IE BIT(17) |
78 | #define SPI_CMD_IPM_NONIDLE_MODE BIT(19) |
79 | #define SPI_CMD_IPM_SPIM_LOOP BIT(21) |
80 | #define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22 |
81 | |
82 | #define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22) |
83 | |
84 | #define PIN_MODE_CFG(x) ((x) / 2) |
85 | |
86 | #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2) |
87 | #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3) |
88 | #define SPI_CFG3_IPM_XMODE_EN BIT(4) |
89 | #define SPI_CFG3_IPM_NODATA_FLAG BIT(5) |
90 | #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8 |
91 | #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12 |
92 | |
93 | #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0) |
94 | #define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8) |
95 | #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12) |
96 | |
97 | #define MT8173_SPI_MAX_PAD_SEL 3 |
98 | |
99 | #define MTK_SPI_PAUSE_INT_STATUS 0x2 |
100 | |
101 | #define MTK_SPI_MAX_FIFO_SIZE 32U |
102 | #define MTK_SPI_PACKET_SIZE 1024 |
103 | #define MTK_SPI_IPM_PACKET_SIZE SZ_64K |
104 | #define MTK_SPI_IPM_PACKET_LOOP SZ_256 |
105 | |
106 | #define MTK_SPI_IDLE 0 |
107 | #define MTK_SPI_PAUSED 1 |
108 | |
109 | #define MTK_SPI_32BITS_MASK (0xffffffff) |
110 | |
111 | #define DMA_ADDR_EXT_BITS (36) |
112 | #define DMA_ADDR_DEF_BITS (32) |
113 | |
114 | /** |
115 | * struct mtk_spi_compatible - device data structure |
116 | * @need_pad_sel: Enable pad (pins) selection in SPI controller |
117 | * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer |
118 | * @enhance_timing: Enable adjusting cfg register to enhance time accuracy |
119 | * @dma_ext: DMA address extension supported |
120 | * @no_need_unprepare: Don't unprepare the SPI clk during runtime |
121 | * @ipm_design: Adjust/extend registers to support IPM design IP features |
122 | */ |
123 | struct mtk_spi_compatible { |
124 | bool need_pad_sel; |
125 | bool must_tx; |
126 | bool enhance_timing; |
127 | bool dma_ext; |
128 | bool no_need_unprepare; |
129 | bool ipm_design; |
130 | }; |
131 | |
132 | /** |
133 | * struct mtk_spi - SPI driver instance |
134 | * @base: Start address of the SPI controller registers |
135 | * @state: SPI controller state |
136 | * @pad_num: Number of pad_sel entries |
137 | * @pad_sel: Groups of pins to select |
138 | * @parent_clk: Parent of sel_clk |
139 | * @sel_clk: SPI host mux clock |
140 | * @spi_clk: Peripheral clock |
141 | * @spi_hclk: AHB bus clock |
142 | * @cur_transfer: Currently processed SPI transfer |
143 | * @xfer_len: Number of bytes to transfer |
144 | * @num_xfered: Number of transferred bytes |
145 | * @tx_sgl: TX transfer scatterlist |
146 | * @rx_sgl: RX transfer scatterlist |
147 | * @tx_sgl_len: Size of TX DMA transfer |
148 | * @rx_sgl_len: Size of RX DMA transfer |
149 | * @dev_comp: Device data structure |
150 | * @spi_clk_hz: Current SPI clock in Hz |
151 | * @spimem_done: SPI-MEM operation completion |
152 | * @use_spimem: Enables SPI-MEM |
153 | * @dev: Device pointer |
154 | * @tx_dma: DMA start for SPI-MEM TX |
155 | * @rx_dma: DMA start for SPI-MEM RX |
156 | */ |
157 | struct mtk_spi { |
158 | void __iomem *base; |
159 | u32 state; |
160 | int pad_num; |
161 | u32 *pad_sel; |
162 | struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk; |
163 | struct spi_transfer *cur_transfer; |
164 | u32 xfer_len; |
165 | u32 num_xfered; |
166 | struct scatterlist *tx_sgl, *rx_sgl; |
167 | u32 tx_sgl_len, rx_sgl_len; |
168 | const struct mtk_spi_compatible *dev_comp; |
169 | u32 spi_clk_hz; |
170 | struct completion spimem_done; |
171 | bool use_spimem; |
172 | struct device *dev; |
173 | dma_addr_t tx_dma; |
174 | dma_addr_t rx_dma; |
175 | }; |
176 | |
177 | static const struct mtk_spi_compatible mtk_common_compat; |
178 | |
179 | static const struct mtk_spi_compatible mt2712_compat = { |
180 | .must_tx = true, |
181 | }; |
182 | |
183 | static const struct mtk_spi_compatible mtk_ipm_compat = { |
184 | .enhance_timing = true, |
185 | .dma_ext = true, |
186 | .ipm_design = true, |
187 | }; |
188 | |
189 | static const struct mtk_spi_compatible mt6765_compat = { |
190 | .need_pad_sel = true, |
191 | .must_tx = true, |
192 | .enhance_timing = true, |
193 | .dma_ext = true, |
194 | }; |
195 | |
196 | static const struct mtk_spi_compatible mt7622_compat = { |
197 | .must_tx = true, |
198 | .enhance_timing = true, |
199 | }; |
200 | |
201 | static const struct mtk_spi_compatible mt8173_compat = { |
202 | .need_pad_sel = true, |
203 | .must_tx = true, |
204 | }; |
205 | |
206 | static const struct mtk_spi_compatible mt8183_compat = { |
207 | .need_pad_sel = true, |
208 | .must_tx = true, |
209 | .enhance_timing = true, |
210 | }; |
211 | |
212 | static const struct mtk_spi_compatible mt6893_compat = { |
213 | .need_pad_sel = true, |
214 | .must_tx = true, |
215 | .enhance_timing = true, |
216 | .dma_ext = true, |
217 | .no_need_unprepare = true, |
218 | }; |
219 | |
220 | /* |
221 | * A piece of default chip info unless the platform |
222 | * supplies it. |
223 | */ |
224 | static const struct mtk_chip_config mtk_default_chip_info = { |
225 | .sample_sel = 0, |
226 | .tick_delay = 0, |
227 | }; |
228 | |
229 | static const struct of_device_id mtk_spi_of_match[] = { |
230 | { .compatible = "mediatek,spi-ipm" , |
231 | .data = (void *)&mtk_ipm_compat, |
232 | }, |
233 | { .compatible = "mediatek,mt2701-spi" , |
234 | .data = (void *)&mtk_common_compat, |
235 | }, |
236 | { .compatible = "mediatek,mt2712-spi" , |
237 | .data = (void *)&mt2712_compat, |
238 | }, |
239 | { .compatible = "mediatek,mt6589-spi" , |
240 | .data = (void *)&mtk_common_compat, |
241 | }, |
242 | { .compatible = "mediatek,mt6765-spi" , |
243 | .data = (void *)&mt6765_compat, |
244 | }, |
245 | { .compatible = "mediatek,mt7622-spi" , |
246 | .data = (void *)&mt7622_compat, |
247 | }, |
248 | { .compatible = "mediatek,mt7629-spi" , |
249 | .data = (void *)&mt7622_compat, |
250 | }, |
251 | { .compatible = "mediatek,mt8135-spi" , |
252 | .data = (void *)&mtk_common_compat, |
253 | }, |
254 | { .compatible = "mediatek,mt8173-spi" , |
255 | .data = (void *)&mt8173_compat, |
256 | }, |
257 | { .compatible = "mediatek,mt8183-spi" , |
258 | .data = (void *)&mt8183_compat, |
259 | }, |
260 | { .compatible = "mediatek,mt8192-spi" , |
261 | .data = (void *)&mt6765_compat, |
262 | }, |
263 | { .compatible = "mediatek,mt6893-spi" , |
264 | .data = (void *)&mt6893_compat, |
265 | }, |
266 | {} |
267 | }; |
268 | MODULE_DEVICE_TABLE(of, mtk_spi_of_match); |
269 | |
270 | static void mtk_spi_reset(struct mtk_spi *mdata) |
271 | { |
272 | u32 reg_val; |
273 | |
274 | /* set the software reset bit in SPI_CMD_REG. */ |
275 | reg_val = readl(addr: mdata->base + SPI_CMD_REG); |
276 | reg_val |= SPI_CMD_RST; |
277 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
278 | |
279 | reg_val = readl(addr: mdata->base + SPI_CMD_REG); |
280 | reg_val &= ~SPI_CMD_RST; |
281 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
282 | } |
283 | |
284 | static int mtk_spi_set_hw_cs_timing(struct spi_device *spi) |
285 | { |
286 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: spi->controller); |
287 | struct spi_delay *cs_setup = &spi->cs_setup; |
288 | struct spi_delay *cs_hold = &spi->cs_hold; |
289 | struct spi_delay *cs_inactive = &spi->cs_inactive; |
290 | u32 setup, hold, inactive; |
291 | u32 reg_val; |
292 | int delay; |
293 | |
294 | delay = spi_delay_to_ns(delay: cs_setup, NULL); |
295 | if (delay < 0) |
296 | return delay; |
297 | setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000; |
298 | |
299 | delay = spi_delay_to_ns(delay: cs_hold, NULL); |
300 | if (delay < 0) |
301 | return delay; |
302 | hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000; |
303 | |
304 | delay = spi_delay_to_ns(delay: cs_inactive, NULL); |
305 | if (delay < 0) |
306 | return delay; |
307 | inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000; |
308 | |
309 | if (hold || setup) { |
310 | reg_val = readl(addr: mdata->base + SPI_CFG0_REG); |
311 | if (mdata->dev_comp->enhance_timing) { |
312 | if (hold) { |
313 | hold = min_t(u32, hold, 0x10000); |
314 | reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); |
315 | reg_val |= (((hold - 1) & 0xffff) |
316 | << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); |
317 | } |
318 | if (setup) { |
319 | setup = min_t(u32, setup, 0x10000); |
320 | reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); |
321 | reg_val |= (((setup - 1) & 0xffff) |
322 | << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); |
323 | } |
324 | } else { |
325 | if (hold) { |
326 | hold = min_t(u32, hold, 0x100); |
327 | reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET); |
328 | reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); |
329 | } |
330 | if (setup) { |
331 | setup = min_t(u32, setup, 0x100); |
332 | reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET); |
333 | reg_val |= (((setup - 1) & 0xff) |
334 | << SPI_CFG0_CS_SETUP_OFFSET); |
335 | } |
336 | } |
337 | writel(val: reg_val, addr: mdata->base + SPI_CFG0_REG); |
338 | } |
339 | |
340 | if (inactive) { |
341 | inactive = min_t(u32, inactive, 0x100); |
342 | reg_val = readl(addr: mdata->base + SPI_CFG1_REG); |
343 | reg_val &= ~SPI_CFG1_CS_IDLE_MASK; |
344 | reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); |
345 | writel(val: reg_val, addr: mdata->base + SPI_CFG1_REG); |
346 | } |
347 | |
348 | return 0; |
349 | } |
350 | |
351 | static int mtk_spi_hw_init(struct spi_controller *host, |
352 | struct spi_device *spi) |
353 | { |
354 | u16 cpha, cpol; |
355 | u32 reg_val; |
356 | struct mtk_chip_config *chip_config = spi->controller_data; |
357 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
358 | |
359 | cpha = spi->mode & SPI_CPHA ? 1 : 0; |
360 | cpol = spi->mode & SPI_CPOL ? 1 : 0; |
361 | |
362 | reg_val = readl(addr: mdata->base + SPI_CMD_REG); |
363 | if (mdata->dev_comp->ipm_design) { |
364 | /* SPI transfer without idle time until packet length done */ |
365 | reg_val |= SPI_CMD_IPM_NONIDLE_MODE; |
366 | if (spi->mode & SPI_LOOP) |
367 | reg_val |= SPI_CMD_IPM_SPIM_LOOP; |
368 | else |
369 | reg_val &= ~SPI_CMD_IPM_SPIM_LOOP; |
370 | } |
371 | |
372 | if (cpha) |
373 | reg_val |= SPI_CMD_CPHA; |
374 | else |
375 | reg_val &= ~SPI_CMD_CPHA; |
376 | if (cpol) |
377 | reg_val |= SPI_CMD_CPOL; |
378 | else |
379 | reg_val &= ~SPI_CMD_CPOL; |
380 | |
381 | /* set the mlsbx and mlsbtx */ |
382 | if (spi->mode & SPI_LSB_FIRST) { |
383 | reg_val &= ~SPI_CMD_TXMSBF; |
384 | reg_val &= ~SPI_CMD_RXMSBF; |
385 | } else { |
386 | reg_val |= SPI_CMD_TXMSBF; |
387 | reg_val |= SPI_CMD_RXMSBF; |
388 | } |
389 | |
390 | /* set the tx/rx endian */ |
391 | #ifdef __LITTLE_ENDIAN |
392 | reg_val &= ~SPI_CMD_TX_ENDIAN; |
393 | reg_val &= ~SPI_CMD_RX_ENDIAN; |
394 | #else |
395 | reg_val |= SPI_CMD_TX_ENDIAN; |
396 | reg_val |= SPI_CMD_RX_ENDIAN; |
397 | #endif |
398 | |
399 | if (mdata->dev_comp->enhance_timing) { |
400 | /* set CS polarity */ |
401 | if (spi->mode & SPI_CS_HIGH) |
402 | reg_val |= SPI_CMD_CS_POL; |
403 | else |
404 | reg_val &= ~SPI_CMD_CS_POL; |
405 | |
406 | if (chip_config->sample_sel) |
407 | reg_val |= SPI_CMD_SAMPLE_SEL; |
408 | else |
409 | reg_val &= ~SPI_CMD_SAMPLE_SEL; |
410 | } |
411 | |
412 | /* set finish and pause interrupt always enable */ |
413 | reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; |
414 | |
415 | /* disable dma mode */ |
416 | reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); |
417 | |
418 | /* disable deassert mode */ |
419 | reg_val &= ~SPI_CMD_DEASSERT; |
420 | |
421 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
422 | |
423 | /* pad select */ |
424 | if (mdata->dev_comp->need_pad_sel) |
425 | writel(val: mdata->pad_sel[spi_get_chipselect(spi, idx: 0)], |
426 | addr: mdata->base + SPI_PAD_SEL_REG); |
427 | |
428 | /* tick delay */ |
429 | if (mdata->dev_comp->enhance_timing) { |
430 | if (mdata->dev_comp->ipm_design) { |
431 | reg_val = readl(addr: mdata->base + SPI_CMD_REG); |
432 | reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK; |
433 | reg_val |= ((chip_config->tick_delay & 0x7) |
434 | << SPI_CMD_IPM_GET_TICKDLY_OFFSET); |
435 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
436 | } else { |
437 | reg_val = readl(addr: mdata->base + SPI_CFG1_REG); |
438 | reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK; |
439 | reg_val |= ((chip_config->tick_delay & 0x7) |
440 | << SPI_CFG1_GET_TICK_DLY_OFFSET); |
441 | writel(val: reg_val, addr: mdata->base + SPI_CFG1_REG); |
442 | } |
443 | } else { |
444 | reg_val = readl(addr: mdata->base + SPI_CFG1_REG); |
445 | reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1; |
446 | reg_val |= ((chip_config->tick_delay & 0x3) |
447 | << SPI_CFG1_GET_TICK_DLY_OFFSET_V1); |
448 | writel(val: reg_val, addr: mdata->base + SPI_CFG1_REG); |
449 | } |
450 | |
451 | /* set hw cs timing */ |
452 | mtk_spi_set_hw_cs_timing(spi); |
453 | return 0; |
454 | } |
455 | |
456 | static int mtk_spi_prepare_message(struct spi_controller *host, |
457 | struct spi_message *msg) |
458 | { |
459 | return mtk_spi_hw_init(host, spi: msg->spi); |
460 | } |
461 | |
462 | static void mtk_spi_set_cs(struct spi_device *spi, bool enable) |
463 | { |
464 | u32 reg_val; |
465 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: spi->controller); |
466 | |
467 | if (spi->mode & SPI_CS_HIGH) |
468 | enable = !enable; |
469 | |
470 | reg_val = readl(addr: mdata->base + SPI_CMD_REG); |
471 | if (!enable) { |
472 | reg_val |= SPI_CMD_PAUSE_EN; |
473 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
474 | } else { |
475 | reg_val &= ~SPI_CMD_PAUSE_EN; |
476 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
477 | mdata->state = MTK_SPI_IDLE; |
478 | mtk_spi_reset(mdata); |
479 | } |
480 | } |
481 | |
482 | static void mtk_spi_prepare_transfer(struct spi_controller *host, |
483 | u32 speed_hz) |
484 | { |
485 | u32 div, sck_time, reg_val; |
486 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
487 | |
488 | if (speed_hz < mdata->spi_clk_hz / 2) |
489 | div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz); |
490 | else |
491 | div = 1; |
492 | |
493 | sck_time = (div + 1) / 2; |
494 | |
495 | if (mdata->dev_comp->enhance_timing) { |
496 | reg_val = readl(addr: mdata->base + SPI_CFG2_REG); |
497 | reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET); |
498 | reg_val |= (((sck_time - 1) & 0xffff) |
499 | << SPI_CFG2_SCK_HIGH_OFFSET); |
500 | reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET); |
501 | reg_val |= (((sck_time - 1) & 0xffff) |
502 | << SPI_CFG2_SCK_LOW_OFFSET); |
503 | writel(val: reg_val, addr: mdata->base + SPI_CFG2_REG); |
504 | } else { |
505 | reg_val = readl(addr: mdata->base + SPI_CFG0_REG); |
506 | reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET); |
507 | reg_val |= (((sck_time - 1) & 0xff) |
508 | << SPI_CFG0_SCK_HIGH_OFFSET); |
509 | reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET); |
510 | reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); |
511 | writel(val: reg_val, addr: mdata->base + SPI_CFG0_REG); |
512 | } |
513 | } |
514 | |
515 | static void mtk_spi_setup_packet(struct spi_controller *host) |
516 | { |
517 | u32 packet_size, packet_loop, reg_val; |
518 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
519 | |
520 | if (mdata->dev_comp->ipm_design) |
521 | packet_size = min_t(u32, |
522 | mdata->xfer_len, |
523 | MTK_SPI_IPM_PACKET_SIZE); |
524 | else |
525 | packet_size = min_t(u32, |
526 | mdata->xfer_len, |
527 | MTK_SPI_PACKET_SIZE); |
528 | |
529 | packet_loop = mdata->xfer_len / packet_size; |
530 | |
531 | reg_val = readl(addr: mdata->base + SPI_CFG1_REG); |
532 | if (mdata->dev_comp->ipm_design) |
533 | reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK; |
534 | else |
535 | reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK; |
536 | reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; |
537 | reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK; |
538 | reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; |
539 | writel(val: reg_val, addr: mdata->base + SPI_CFG1_REG); |
540 | } |
541 | |
542 | static void mtk_spi_enable_transfer(struct spi_controller *host) |
543 | { |
544 | u32 cmd; |
545 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
546 | |
547 | cmd = readl(addr: mdata->base + SPI_CMD_REG); |
548 | if (mdata->state == MTK_SPI_IDLE) |
549 | cmd |= SPI_CMD_ACT; |
550 | else |
551 | cmd |= SPI_CMD_RESUME; |
552 | writel(val: cmd, addr: mdata->base + SPI_CMD_REG); |
553 | } |
554 | |
555 | static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len) |
556 | { |
557 | u32 mult_delta = 0; |
558 | |
559 | if (mdata->dev_comp->ipm_design) { |
560 | if (xfer_len > MTK_SPI_IPM_PACKET_SIZE) |
561 | mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE; |
562 | } else { |
563 | if (xfer_len > MTK_SPI_PACKET_SIZE) |
564 | mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; |
565 | } |
566 | |
567 | return mult_delta; |
568 | } |
569 | |
570 | static void mtk_spi_update_mdata_len(struct spi_controller *host) |
571 | { |
572 | int mult_delta; |
573 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
574 | |
575 | if (mdata->tx_sgl_len && mdata->rx_sgl_len) { |
576 | if (mdata->tx_sgl_len > mdata->rx_sgl_len) { |
577 | mult_delta = mtk_spi_get_mult_delta(mdata, xfer_len: mdata->rx_sgl_len); |
578 | mdata->xfer_len = mdata->rx_sgl_len - mult_delta; |
579 | mdata->rx_sgl_len = mult_delta; |
580 | mdata->tx_sgl_len -= mdata->xfer_len; |
581 | } else { |
582 | mult_delta = mtk_spi_get_mult_delta(mdata, xfer_len: mdata->tx_sgl_len); |
583 | mdata->xfer_len = mdata->tx_sgl_len - mult_delta; |
584 | mdata->tx_sgl_len = mult_delta; |
585 | mdata->rx_sgl_len -= mdata->xfer_len; |
586 | } |
587 | } else if (mdata->tx_sgl_len) { |
588 | mult_delta = mtk_spi_get_mult_delta(mdata, xfer_len: mdata->tx_sgl_len); |
589 | mdata->xfer_len = mdata->tx_sgl_len - mult_delta; |
590 | mdata->tx_sgl_len = mult_delta; |
591 | } else if (mdata->rx_sgl_len) { |
592 | mult_delta = mtk_spi_get_mult_delta(mdata, xfer_len: mdata->rx_sgl_len); |
593 | mdata->xfer_len = mdata->rx_sgl_len - mult_delta; |
594 | mdata->rx_sgl_len = mult_delta; |
595 | } |
596 | } |
597 | |
598 | static void mtk_spi_setup_dma_addr(struct spi_controller *host, |
599 | struct spi_transfer *xfer) |
600 | { |
601 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
602 | |
603 | if (mdata->tx_sgl) { |
604 | writel(val: (u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK), |
605 | addr: mdata->base + SPI_TX_SRC_REG); |
606 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
607 | if (mdata->dev_comp->dma_ext) |
608 | writel(val: (u32)(xfer->tx_dma >> 32), |
609 | addr: mdata->base + SPI_TX_SRC_REG_64); |
610 | #endif |
611 | } |
612 | |
613 | if (mdata->rx_sgl) { |
614 | writel(val: (u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK), |
615 | addr: mdata->base + SPI_RX_DST_REG); |
616 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
617 | if (mdata->dev_comp->dma_ext) |
618 | writel(val: (u32)(xfer->rx_dma >> 32), |
619 | addr: mdata->base + SPI_RX_DST_REG_64); |
620 | #endif |
621 | } |
622 | } |
623 | |
624 | static int mtk_spi_fifo_transfer(struct spi_controller *host, |
625 | struct spi_device *spi, |
626 | struct spi_transfer *xfer) |
627 | { |
628 | int cnt, remainder; |
629 | u32 reg_val; |
630 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
631 | |
632 | mdata->cur_transfer = xfer; |
633 | mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len); |
634 | mdata->num_xfered = 0; |
635 | mtk_spi_prepare_transfer(host, speed_hz: xfer->speed_hz); |
636 | mtk_spi_setup_packet(host); |
637 | |
638 | if (xfer->tx_buf) { |
639 | cnt = xfer->len / 4; |
640 | iowrite32_rep(port: mdata->base + SPI_TX_DATA_REG, buf: xfer->tx_buf, count: cnt); |
641 | remainder = xfer->len % 4; |
642 | if (remainder > 0) { |
643 | reg_val = 0; |
644 | memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); |
645 | writel(val: reg_val, addr: mdata->base + SPI_TX_DATA_REG); |
646 | } |
647 | } |
648 | |
649 | mtk_spi_enable_transfer(host); |
650 | |
651 | return 1; |
652 | } |
653 | |
654 | static int mtk_spi_dma_transfer(struct spi_controller *host, |
655 | struct spi_device *spi, |
656 | struct spi_transfer *xfer) |
657 | { |
658 | int cmd; |
659 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
660 | |
661 | mdata->tx_sgl = NULL; |
662 | mdata->rx_sgl = NULL; |
663 | mdata->tx_sgl_len = 0; |
664 | mdata->rx_sgl_len = 0; |
665 | mdata->cur_transfer = xfer; |
666 | mdata->num_xfered = 0; |
667 | |
668 | mtk_spi_prepare_transfer(host, speed_hz: xfer->speed_hz); |
669 | |
670 | cmd = readl(addr: mdata->base + SPI_CMD_REG); |
671 | if (xfer->tx_buf) |
672 | cmd |= SPI_CMD_TX_DMA; |
673 | if (xfer->rx_buf) |
674 | cmd |= SPI_CMD_RX_DMA; |
675 | writel(val: cmd, addr: mdata->base + SPI_CMD_REG); |
676 | |
677 | if (xfer->tx_buf) |
678 | mdata->tx_sgl = xfer->tx_sg.sgl; |
679 | if (xfer->rx_buf) |
680 | mdata->rx_sgl = xfer->rx_sg.sgl; |
681 | |
682 | if (mdata->tx_sgl) { |
683 | xfer->tx_dma = sg_dma_address(mdata->tx_sgl); |
684 | mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); |
685 | } |
686 | if (mdata->rx_sgl) { |
687 | xfer->rx_dma = sg_dma_address(mdata->rx_sgl); |
688 | mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); |
689 | } |
690 | |
691 | mtk_spi_update_mdata_len(host); |
692 | mtk_spi_setup_packet(host); |
693 | mtk_spi_setup_dma_addr(host, xfer); |
694 | mtk_spi_enable_transfer(host); |
695 | |
696 | return 1; |
697 | } |
698 | |
699 | static int mtk_spi_transfer_one(struct spi_controller *host, |
700 | struct spi_device *spi, |
701 | struct spi_transfer *xfer) |
702 | { |
703 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: spi->controller); |
704 | u32 reg_val = 0; |
705 | |
706 | /* prepare xfer direction and duplex mode */ |
707 | if (mdata->dev_comp->ipm_design) { |
708 | if (!xfer->tx_buf || !xfer->rx_buf) { |
709 | reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN; |
710 | if (xfer->rx_buf) |
711 | reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR; |
712 | } |
713 | writel(val: reg_val, addr: mdata->base + SPI_CFG3_IPM_REG); |
714 | } |
715 | |
716 | if (host->can_dma(host, spi, xfer)) |
717 | return mtk_spi_dma_transfer(host, spi, xfer); |
718 | else |
719 | return mtk_spi_fifo_transfer(host, spi, xfer); |
720 | } |
721 | |
722 | static bool mtk_spi_can_dma(struct spi_controller *host, |
723 | struct spi_device *spi, |
724 | struct spi_transfer *xfer) |
725 | { |
726 | /* Buffers for DMA transactions must be 4-byte aligned */ |
727 | return (xfer->len > MTK_SPI_MAX_FIFO_SIZE && |
728 | (unsigned long)xfer->tx_buf % 4 == 0 && |
729 | (unsigned long)xfer->rx_buf % 4 == 0); |
730 | } |
731 | |
732 | static int mtk_spi_setup(struct spi_device *spi) |
733 | { |
734 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: spi->controller); |
735 | |
736 | if (!spi->controller_data) |
737 | spi->controller_data = (void *)&mtk_default_chip_info; |
738 | |
739 | if (mdata->dev_comp->need_pad_sel && spi_get_csgpiod(spi, idx: 0)) |
740 | /* CS de-asserted, gpiolib will handle inversion */ |
741 | gpiod_direction_output(desc: spi_get_csgpiod(spi, idx: 0), value: 0); |
742 | |
743 | return 0; |
744 | } |
745 | |
746 | static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) |
747 | { |
748 | u32 cmd, reg_val, cnt, remainder, len; |
749 | struct spi_controller *host = dev_id; |
750 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
751 | struct spi_transfer *trans = mdata->cur_transfer; |
752 | |
753 | reg_val = readl(addr: mdata->base + SPI_STATUS0_REG); |
754 | if (reg_val & MTK_SPI_PAUSE_INT_STATUS) |
755 | mdata->state = MTK_SPI_PAUSED; |
756 | else |
757 | mdata->state = MTK_SPI_IDLE; |
758 | |
759 | /* SPI-MEM ops */ |
760 | if (mdata->use_spimem) { |
761 | complete(&mdata->spimem_done); |
762 | return IRQ_HANDLED; |
763 | } |
764 | |
765 | if (!host->can_dma(host, NULL, trans)) { |
766 | if (trans->rx_buf) { |
767 | cnt = mdata->xfer_len / 4; |
768 | ioread32_rep(port: mdata->base + SPI_RX_DATA_REG, |
769 | buf: trans->rx_buf + mdata->num_xfered, count: cnt); |
770 | remainder = mdata->xfer_len % 4; |
771 | if (remainder > 0) { |
772 | reg_val = readl(addr: mdata->base + SPI_RX_DATA_REG); |
773 | memcpy(trans->rx_buf + |
774 | mdata->num_xfered + |
775 | (cnt * 4), |
776 | ®_val, |
777 | remainder); |
778 | } |
779 | } |
780 | |
781 | mdata->num_xfered += mdata->xfer_len; |
782 | if (mdata->num_xfered == trans->len) { |
783 | spi_finalize_current_transfer(ctlr: host); |
784 | return IRQ_HANDLED; |
785 | } |
786 | |
787 | len = trans->len - mdata->num_xfered; |
788 | mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); |
789 | mtk_spi_setup_packet(host); |
790 | |
791 | if (trans->tx_buf) { |
792 | cnt = mdata->xfer_len / 4; |
793 | iowrite32_rep(port: mdata->base + SPI_TX_DATA_REG, |
794 | buf: trans->tx_buf + mdata->num_xfered, count: cnt); |
795 | |
796 | remainder = mdata->xfer_len % 4; |
797 | if (remainder > 0) { |
798 | reg_val = 0; |
799 | memcpy(®_val, |
800 | trans->tx_buf + (cnt * 4) + mdata->num_xfered, |
801 | remainder); |
802 | writel(val: reg_val, addr: mdata->base + SPI_TX_DATA_REG); |
803 | } |
804 | } |
805 | |
806 | mtk_spi_enable_transfer(host); |
807 | |
808 | return IRQ_HANDLED; |
809 | } |
810 | |
811 | if (mdata->tx_sgl) |
812 | trans->tx_dma += mdata->xfer_len; |
813 | if (mdata->rx_sgl) |
814 | trans->rx_dma += mdata->xfer_len; |
815 | |
816 | if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { |
817 | mdata->tx_sgl = sg_next(mdata->tx_sgl); |
818 | if (mdata->tx_sgl) { |
819 | trans->tx_dma = sg_dma_address(mdata->tx_sgl); |
820 | mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); |
821 | } |
822 | } |
823 | if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { |
824 | mdata->rx_sgl = sg_next(mdata->rx_sgl); |
825 | if (mdata->rx_sgl) { |
826 | trans->rx_dma = sg_dma_address(mdata->rx_sgl); |
827 | mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); |
828 | } |
829 | } |
830 | |
831 | if (!mdata->tx_sgl && !mdata->rx_sgl) { |
832 | /* spi disable dma */ |
833 | cmd = readl(addr: mdata->base + SPI_CMD_REG); |
834 | cmd &= ~SPI_CMD_TX_DMA; |
835 | cmd &= ~SPI_CMD_RX_DMA; |
836 | writel(val: cmd, addr: mdata->base + SPI_CMD_REG); |
837 | |
838 | spi_finalize_current_transfer(ctlr: host); |
839 | return IRQ_HANDLED; |
840 | } |
841 | |
842 | mtk_spi_update_mdata_len(host); |
843 | mtk_spi_setup_packet(host); |
844 | mtk_spi_setup_dma_addr(host, xfer: trans); |
845 | mtk_spi_enable_transfer(host); |
846 | |
847 | return IRQ_HANDLED; |
848 | } |
849 | |
850 | static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem, |
851 | struct spi_mem_op *op) |
852 | { |
853 | int opcode_len; |
854 | |
855 | if (op->data.dir != SPI_MEM_NO_DATA) { |
856 | opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes; |
857 | if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) { |
858 | op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len; |
859 | /* force data buffer dma-aligned. */ |
860 | op->data.nbytes -= op->data.nbytes % 4; |
861 | } |
862 | } |
863 | |
864 | return 0; |
865 | } |
866 | |
867 | static bool mtk_spi_mem_supports_op(struct spi_mem *mem, |
868 | const struct spi_mem_op *op) |
869 | { |
870 | if (!spi_mem_default_supports_op(mem, op)) |
871 | return false; |
872 | |
873 | if (op->addr.nbytes && op->dummy.nbytes && |
874 | op->addr.buswidth != op->dummy.buswidth) |
875 | return false; |
876 | |
877 | if (op->addr.nbytes + op->dummy.nbytes > 16) |
878 | return false; |
879 | |
880 | if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) { |
881 | if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE > |
882 | MTK_SPI_IPM_PACKET_LOOP || |
883 | op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0) |
884 | return false; |
885 | } |
886 | |
887 | return true; |
888 | } |
889 | |
890 | static void mtk_spi_mem_setup_dma_xfer(struct spi_controller *host, |
891 | const struct spi_mem_op *op) |
892 | { |
893 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
894 | |
895 | writel(val: (u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK), |
896 | addr: mdata->base + SPI_TX_SRC_REG); |
897 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
898 | if (mdata->dev_comp->dma_ext) |
899 | writel(val: (u32)(mdata->tx_dma >> 32), |
900 | addr: mdata->base + SPI_TX_SRC_REG_64); |
901 | #endif |
902 | |
903 | if (op->data.dir == SPI_MEM_DATA_IN) { |
904 | writel(val: (u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK), |
905 | addr: mdata->base + SPI_RX_DST_REG); |
906 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
907 | if (mdata->dev_comp->dma_ext) |
908 | writel(val: (u32)(mdata->rx_dma >> 32), |
909 | addr: mdata->base + SPI_RX_DST_REG_64); |
910 | #endif |
911 | } |
912 | } |
913 | |
914 | static int mtk_spi_transfer_wait(struct spi_mem *mem, |
915 | const struct spi_mem_op *op) |
916 | { |
917 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: mem->spi->controller); |
918 | /* |
919 | * For each byte we wait for 8 cycles of the SPI clock. |
920 | * Since speed is defined in Hz and we want milliseconds, |
921 | * so it should be 8 * 1000. |
922 | */ |
923 | u64 ms = 8000LL; |
924 | |
925 | if (op->data.dir == SPI_MEM_NO_DATA) |
926 | ms *= 32; /* prevent we may get 0 for short transfers. */ |
927 | else |
928 | ms *= op->data.nbytes; |
929 | ms = div_u64(dividend: ms, divisor: mem->spi->max_speed_hz); |
930 | ms += ms + 1000; /* 1s tolerance */ |
931 | |
932 | if (ms > UINT_MAX) |
933 | ms = UINT_MAX; |
934 | |
935 | if (!wait_for_completion_timeout(x: &mdata->spimem_done, |
936 | timeout: msecs_to_jiffies(m: ms))) { |
937 | dev_err(mdata->dev, "spi-mem transfer timeout\n" ); |
938 | return -ETIMEDOUT; |
939 | } |
940 | |
941 | return 0; |
942 | } |
943 | |
944 | static int mtk_spi_mem_exec_op(struct spi_mem *mem, |
945 | const struct spi_mem_op *op) |
946 | { |
947 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: mem->spi->controller); |
948 | u32 reg_val, nio, tx_size; |
949 | char *tx_tmp_buf, *rx_tmp_buf; |
950 | int ret = 0; |
951 | |
952 | mdata->use_spimem = true; |
953 | reinit_completion(x: &mdata->spimem_done); |
954 | |
955 | mtk_spi_reset(mdata); |
956 | mtk_spi_hw_init(host: mem->spi->controller, spi: mem->spi); |
957 | mtk_spi_prepare_transfer(host: mem->spi->controller, speed_hz: mem->spi->max_speed_hz); |
958 | |
959 | reg_val = readl(addr: mdata->base + SPI_CFG3_IPM_REG); |
960 | /* opcode byte len */ |
961 | reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK; |
962 | reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET; |
963 | |
964 | /* addr & dummy byte len */ |
965 | reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK; |
966 | if (op->addr.nbytes || op->dummy.nbytes) |
967 | reg_val |= (op->addr.nbytes + op->dummy.nbytes) << |
968 | SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET; |
969 | |
970 | /* data byte len */ |
971 | if (op->data.dir == SPI_MEM_NO_DATA) { |
972 | reg_val |= SPI_CFG3_IPM_NODATA_FLAG; |
973 | writel(val: 0, addr: mdata->base + SPI_CFG1_REG); |
974 | } else { |
975 | reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG; |
976 | mdata->xfer_len = op->data.nbytes; |
977 | mtk_spi_setup_packet(host: mem->spi->controller); |
978 | } |
979 | |
980 | if (op->addr.nbytes || op->dummy.nbytes) { |
981 | if (op->addr.buswidth == 1 || op->dummy.buswidth == 1) |
982 | reg_val |= SPI_CFG3_IPM_XMODE_EN; |
983 | else |
984 | reg_val &= ~SPI_CFG3_IPM_XMODE_EN; |
985 | } |
986 | |
987 | if (op->addr.buswidth == 2 || |
988 | op->dummy.buswidth == 2 || |
989 | op->data.buswidth == 2) |
990 | nio = 2; |
991 | else if (op->addr.buswidth == 4 || |
992 | op->dummy.buswidth == 4 || |
993 | op->data.buswidth == 4) |
994 | nio = 4; |
995 | else |
996 | nio = 1; |
997 | |
998 | reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK; |
999 | reg_val |= PIN_MODE_CFG(nio); |
1000 | |
1001 | reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN; |
1002 | if (op->data.dir == SPI_MEM_DATA_IN) |
1003 | reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR; |
1004 | else |
1005 | reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR; |
1006 | writel(val: reg_val, addr: mdata->base + SPI_CFG3_IPM_REG); |
1007 | |
1008 | tx_size = 1 + op->addr.nbytes + op->dummy.nbytes; |
1009 | if (op->data.dir == SPI_MEM_DATA_OUT) |
1010 | tx_size += op->data.nbytes; |
1011 | |
1012 | tx_size = max_t(u32, tx_size, 32); |
1013 | |
1014 | tx_tmp_buf = kzalloc(size: tx_size, GFP_KERNEL | GFP_DMA); |
1015 | if (!tx_tmp_buf) { |
1016 | mdata->use_spimem = false; |
1017 | return -ENOMEM; |
1018 | } |
1019 | |
1020 | tx_tmp_buf[0] = op->cmd.opcode; |
1021 | |
1022 | if (op->addr.nbytes) { |
1023 | int i; |
1024 | |
1025 | for (i = 0; i < op->addr.nbytes; i++) |
1026 | tx_tmp_buf[i + 1] = op->addr.val >> |
1027 | (8 * (op->addr.nbytes - i - 1)); |
1028 | } |
1029 | |
1030 | if (op->dummy.nbytes) |
1031 | memset(tx_tmp_buf + op->addr.nbytes + 1, |
1032 | 0xff, |
1033 | op->dummy.nbytes); |
1034 | |
1035 | if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) |
1036 | memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1, |
1037 | op->data.buf.out, |
1038 | op->data.nbytes); |
1039 | |
1040 | mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf, |
1041 | tx_size, DMA_TO_DEVICE); |
1042 | if (dma_mapping_error(dev: mdata->dev, dma_addr: mdata->tx_dma)) { |
1043 | ret = -ENOMEM; |
1044 | goto err_exit; |
1045 | } |
1046 | |
1047 | if (op->data.dir == SPI_MEM_DATA_IN) { |
1048 | if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) { |
1049 | rx_tmp_buf = kzalloc(size: op->data.nbytes, |
1050 | GFP_KERNEL | GFP_DMA); |
1051 | if (!rx_tmp_buf) { |
1052 | ret = -ENOMEM; |
1053 | goto unmap_tx_dma; |
1054 | } |
1055 | } else { |
1056 | rx_tmp_buf = op->data.buf.in; |
1057 | } |
1058 | |
1059 | mdata->rx_dma = dma_map_single(mdata->dev, |
1060 | rx_tmp_buf, |
1061 | op->data.nbytes, |
1062 | DMA_FROM_DEVICE); |
1063 | if (dma_mapping_error(dev: mdata->dev, dma_addr: mdata->rx_dma)) { |
1064 | ret = -ENOMEM; |
1065 | goto kfree_rx_tmp_buf; |
1066 | } |
1067 | } |
1068 | |
1069 | reg_val = readl(addr: mdata->base + SPI_CMD_REG); |
1070 | reg_val |= SPI_CMD_TX_DMA; |
1071 | if (op->data.dir == SPI_MEM_DATA_IN) |
1072 | reg_val |= SPI_CMD_RX_DMA; |
1073 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
1074 | |
1075 | mtk_spi_mem_setup_dma_xfer(host: mem->spi->controller, op); |
1076 | |
1077 | mtk_spi_enable_transfer(host: mem->spi->controller); |
1078 | |
1079 | /* Wait for the interrupt. */ |
1080 | ret = mtk_spi_transfer_wait(mem, op); |
1081 | if (ret) |
1082 | goto unmap_rx_dma; |
1083 | |
1084 | /* spi disable dma */ |
1085 | reg_val = readl(addr: mdata->base + SPI_CMD_REG); |
1086 | reg_val &= ~SPI_CMD_TX_DMA; |
1087 | if (op->data.dir == SPI_MEM_DATA_IN) |
1088 | reg_val &= ~SPI_CMD_RX_DMA; |
1089 | writel(val: reg_val, addr: mdata->base + SPI_CMD_REG); |
1090 | |
1091 | unmap_rx_dma: |
1092 | if (op->data.dir == SPI_MEM_DATA_IN) { |
1093 | dma_unmap_single(mdata->dev, mdata->rx_dma, |
1094 | op->data.nbytes, DMA_FROM_DEVICE); |
1095 | if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) |
1096 | memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes); |
1097 | } |
1098 | kfree_rx_tmp_buf: |
1099 | if (op->data.dir == SPI_MEM_DATA_IN && |
1100 | !IS_ALIGNED((size_t)op->data.buf.in, 4)) |
1101 | kfree(objp: rx_tmp_buf); |
1102 | unmap_tx_dma: |
1103 | dma_unmap_single(mdata->dev, mdata->tx_dma, |
1104 | tx_size, DMA_TO_DEVICE); |
1105 | err_exit: |
1106 | kfree(objp: tx_tmp_buf); |
1107 | mdata->use_spimem = false; |
1108 | |
1109 | return ret; |
1110 | } |
1111 | |
1112 | static const struct spi_controller_mem_ops mtk_spi_mem_ops = { |
1113 | .adjust_op_size = mtk_spi_mem_adjust_op_size, |
1114 | .supports_op = mtk_spi_mem_supports_op, |
1115 | .exec_op = mtk_spi_mem_exec_op, |
1116 | }; |
1117 | |
1118 | static int mtk_spi_probe(struct platform_device *pdev) |
1119 | { |
1120 | struct device *dev = &pdev->dev; |
1121 | struct spi_controller *host; |
1122 | struct mtk_spi *mdata; |
1123 | int i, irq, ret, addr_bits; |
1124 | |
1125 | host = devm_spi_alloc_host(dev, size: sizeof(*mdata)); |
1126 | if (!host) |
1127 | return dev_err_probe(dev, err: -ENOMEM, fmt: "failed to alloc spi host\n" ); |
1128 | |
1129 | host->auto_runtime_pm = true; |
1130 | host->dev.of_node = dev->of_node; |
1131 | host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; |
1132 | |
1133 | host->set_cs = mtk_spi_set_cs; |
1134 | host->prepare_message = mtk_spi_prepare_message; |
1135 | host->transfer_one = mtk_spi_transfer_one; |
1136 | host->can_dma = mtk_spi_can_dma; |
1137 | host->setup = mtk_spi_setup; |
1138 | host->set_cs_timing = mtk_spi_set_hw_cs_timing; |
1139 | host->use_gpio_descriptors = true; |
1140 | |
1141 | mdata = spi_controller_get_devdata(ctlr: host); |
1142 | mdata->dev_comp = device_get_match_data(dev); |
1143 | |
1144 | if (mdata->dev_comp->enhance_timing) |
1145 | host->mode_bits |= SPI_CS_HIGH; |
1146 | |
1147 | if (mdata->dev_comp->must_tx) |
1148 | host->flags = SPI_CONTROLLER_MUST_TX; |
1149 | if (mdata->dev_comp->ipm_design) |
1150 | host->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL | |
1151 | SPI_RX_QUAD | SPI_TX_QUAD; |
1152 | |
1153 | if (mdata->dev_comp->ipm_design) { |
1154 | mdata->dev = dev; |
1155 | host->mem_ops = &mtk_spi_mem_ops; |
1156 | init_completion(x: &mdata->spimem_done); |
1157 | } |
1158 | |
1159 | if (mdata->dev_comp->need_pad_sel) { |
1160 | mdata->pad_num = of_property_count_u32_elems(np: dev->of_node, |
1161 | propname: "mediatek,pad-select" ); |
1162 | if (mdata->pad_num < 0) |
1163 | return dev_err_probe(dev, err: -EINVAL, |
1164 | fmt: "No 'mediatek,pad-select' property\n" ); |
1165 | |
1166 | mdata->pad_sel = devm_kmalloc_array(dev, n: mdata->pad_num, |
1167 | size: sizeof(u32), GFP_KERNEL); |
1168 | if (!mdata->pad_sel) |
1169 | return -ENOMEM; |
1170 | |
1171 | for (i = 0; i < mdata->pad_num; i++) { |
1172 | of_property_read_u32_index(np: dev->of_node, |
1173 | propname: "mediatek,pad-select" , |
1174 | index: i, out_value: &mdata->pad_sel[i]); |
1175 | if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) |
1176 | return dev_err_probe(dev, err: -EINVAL, |
1177 | fmt: "wrong pad-sel[%d]: %u\n" , |
1178 | i, mdata->pad_sel[i]); |
1179 | } |
1180 | } |
1181 | |
1182 | platform_set_drvdata(pdev, data: host); |
1183 | mdata->base = devm_platform_ioremap_resource(pdev, index: 0); |
1184 | if (IS_ERR(ptr: mdata->base)) |
1185 | return PTR_ERR(ptr: mdata->base); |
1186 | |
1187 | irq = platform_get_irq(pdev, 0); |
1188 | if (irq < 0) |
1189 | return irq; |
1190 | |
1191 | if (!dev->dma_mask) |
1192 | dev->dma_mask = &dev->coherent_dma_mask; |
1193 | |
1194 | if (mdata->dev_comp->ipm_design) |
1195 | dma_set_max_seg_size(dev, SZ_16M); |
1196 | else |
1197 | dma_set_max_seg_size(dev, SZ_256K); |
1198 | |
1199 | mdata->parent_clk = devm_clk_get(dev, id: "parent-clk" ); |
1200 | if (IS_ERR(ptr: mdata->parent_clk)) |
1201 | return dev_err_probe(dev, err: PTR_ERR(ptr: mdata->parent_clk), |
1202 | fmt: "failed to get parent-clk\n" ); |
1203 | |
1204 | mdata->sel_clk = devm_clk_get(dev, id: "sel-clk" ); |
1205 | if (IS_ERR(ptr: mdata->sel_clk)) |
1206 | return dev_err_probe(dev, err: PTR_ERR(ptr: mdata->sel_clk), fmt: "failed to get sel-clk\n" ); |
1207 | |
1208 | mdata->spi_clk = devm_clk_get(dev, id: "spi-clk" ); |
1209 | if (IS_ERR(ptr: mdata->spi_clk)) |
1210 | return dev_err_probe(dev, err: PTR_ERR(ptr: mdata->spi_clk), fmt: "failed to get spi-clk\n" ); |
1211 | |
1212 | mdata->spi_hclk = devm_clk_get_optional(dev, id: "hclk" ); |
1213 | if (IS_ERR(ptr: mdata->spi_hclk)) |
1214 | return dev_err_probe(dev, err: PTR_ERR(ptr: mdata->spi_hclk), fmt: "failed to get hclk\n" ); |
1215 | |
1216 | ret = clk_set_parent(clk: mdata->sel_clk, parent: mdata->parent_clk); |
1217 | if (ret < 0) |
1218 | return dev_err_probe(dev, err: ret, fmt: "failed to clk_set_parent\n" ); |
1219 | |
1220 | ret = clk_prepare_enable(clk: mdata->spi_hclk); |
1221 | if (ret < 0) |
1222 | return dev_err_probe(dev, err: ret, fmt: "failed to enable hclk\n" ); |
1223 | |
1224 | ret = clk_prepare_enable(clk: mdata->spi_clk); |
1225 | if (ret < 0) { |
1226 | clk_disable_unprepare(clk: mdata->spi_hclk); |
1227 | return dev_err_probe(dev, err: ret, fmt: "failed to enable spi_clk\n" ); |
1228 | } |
1229 | |
1230 | mdata->spi_clk_hz = clk_get_rate(clk: mdata->spi_clk); |
1231 | |
1232 | if (mdata->dev_comp->no_need_unprepare) { |
1233 | clk_disable(clk: mdata->spi_clk); |
1234 | clk_disable(clk: mdata->spi_hclk); |
1235 | } else { |
1236 | clk_disable_unprepare(clk: mdata->spi_clk); |
1237 | clk_disable_unprepare(clk: mdata->spi_hclk); |
1238 | } |
1239 | |
1240 | if (mdata->dev_comp->need_pad_sel) { |
1241 | if (mdata->pad_num != host->num_chipselect) |
1242 | return dev_err_probe(dev, err: -EINVAL, |
1243 | fmt: "pad_num does not match num_chipselect(%d != %d)\n" , |
1244 | mdata->pad_num, host->num_chipselect); |
1245 | |
1246 | if (!host->cs_gpiods && host->num_chipselect > 1) |
1247 | return dev_err_probe(dev, err: -EINVAL, |
1248 | fmt: "cs_gpios not specified and num_chipselect > 1\n" ); |
1249 | } |
1250 | |
1251 | if (mdata->dev_comp->dma_ext) |
1252 | addr_bits = DMA_ADDR_EXT_BITS; |
1253 | else |
1254 | addr_bits = DMA_ADDR_DEF_BITS; |
1255 | ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits)); |
1256 | if (ret) |
1257 | dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n" , |
1258 | addr_bits, ret); |
1259 | |
1260 | ret = devm_request_irq(dev, irq, handler: mtk_spi_interrupt, |
1261 | IRQF_TRIGGER_NONE, devname: dev_name(dev), dev_id: host); |
1262 | if (ret) |
1263 | return dev_err_probe(dev, err: ret, fmt: "failed to register irq\n" ); |
1264 | |
1265 | pm_runtime_enable(dev); |
1266 | |
1267 | ret = devm_spi_register_controller(dev, ctlr: host); |
1268 | if (ret) { |
1269 | pm_runtime_disable(dev); |
1270 | return dev_err_probe(dev, err: ret, fmt: "failed to register host\n" ); |
1271 | } |
1272 | |
1273 | return 0; |
1274 | } |
1275 | |
1276 | static void mtk_spi_remove(struct platform_device *pdev) |
1277 | { |
1278 | struct spi_controller *host = platform_get_drvdata(pdev); |
1279 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
1280 | int ret; |
1281 | |
1282 | if (mdata->use_spimem && !completion_done(x: &mdata->spimem_done)) |
1283 | complete(&mdata->spimem_done); |
1284 | |
1285 | ret = pm_runtime_get_sync(dev: &pdev->dev); |
1286 | if (ret < 0) { |
1287 | dev_warn(&pdev->dev, "Failed to resume hardware (%pe)\n" , ERR_PTR(ret)); |
1288 | } else { |
1289 | /* |
1290 | * If pm runtime resume failed, clks are disabled and |
1291 | * unprepared. So don't access the hardware and skip clk |
1292 | * unpreparing. |
1293 | */ |
1294 | mtk_spi_reset(mdata); |
1295 | |
1296 | if (mdata->dev_comp->no_need_unprepare) { |
1297 | clk_unprepare(clk: mdata->spi_clk); |
1298 | clk_unprepare(clk: mdata->spi_hclk); |
1299 | } |
1300 | } |
1301 | |
1302 | pm_runtime_put_noidle(dev: &pdev->dev); |
1303 | pm_runtime_disable(dev: &pdev->dev); |
1304 | } |
1305 | |
1306 | #ifdef CONFIG_PM_SLEEP |
1307 | static int mtk_spi_suspend(struct device *dev) |
1308 | { |
1309 | int ret; |
1310 | struct spi_controller *host = dev_get_drvdata(dev); |
1311 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
1312 | |
1313 | ret = spi_controller_suspend(ctlr: host); |
1314 | if (ret) |
1315 | return ret; |
1316 | |
1317 | if (!pm_runtime_suspended(dev)) { |
1318 | clk_disable_unprepare(clk: mdata->spi_clk); |
1319 | clk_disable_unprepare(clk: mdata->spi_hclk); |
1320 | } |
1321 | |
1322 | pinctrl_pm_select_sleep_state(dev); |
1323 | |
1324 | return 0; |
1325 | } |
1326 | |
1327 | static int mtk_spi_resume(struct device *dev) |
1328 | { |
1329 | int ret; |
1330 | struct spi_controller *host = dev_get_drvdata(dev); |
1331 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
1332 | |
1333 | pinctrl_pm_select_default_state(dev); |
1334 | |
1335 | if (!pm_runtime_suspended(dev)) { |
1336 | ret = clk_prepare_enable(clk: mdata->spi_clk); |
1337 | if (ret < 0) { |
1338 | dev_err(dev, "failed to enable spi_clk (%d)\n" , ret); |
1339 | return ret; |
1340 | } |
1341 | |
1342 | ret = clk_prepare_enable(clk: mdata->spi_hclk); |
1343 | if (ret < 0) { |
1344 | dev_err(dev, "failed to enable spi_hclk (%d)\n" , ret); |
1345 | clk_disable_unprepare(clk: mdata->spi_clk); |
1346 | return ret; |
1347 | } |
1348 | } |
1349 | |
1350 | ret = spi_controller_resume(ctlr: host); |
1351 | if (ret < 0) { |
1352 | clk_disable_unprepare(clk: mdata->spi_clk); |
1353 | clk_disable_unprepare(clk: mdata->spi_hclk); |
1354 | } |
1355 | |
1356 | return ret; |
1357 | } |
1358 | #endif /* CONFIG_PM_SLEEP */ |
1359 | |
1360 | #ifdef CONFIG_PM |
1361 | static int mtk_spi_runtime_suspend(struct device *dev) |
1362 | { |
1363 | struct spi_controller *host = dev_get_drvdata(dev); |
1364 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
1365 | |
1366 | if (mdata->dev_comp->no_need_unprepare) { |
1367 | clk_disable(clk: mdata->spi_clk); |
1368 | clk_disable(clk: mdata->spi_hclk); |
1369 | } else { |
1370 | clk_disable_unprepare(clk: mdata->spi_clk); |
1371 | clk_disable_unprepare(clk: mdata->spi_hclk); |
1372 | } |
1373 | |
1374 | return 0; |
1375 | } |
1376 | |
1377 | static int mtk_spi_runtime_resume(struct device *dev) |
1378 | { |
1379 | struct spi_controller *host = dev_get_drvdata(dev); |
1380 | struct mtk_spi *mdata = spi_controller_get_devdata(ctlr: host); |
1381 | int ret; |
1382 | |
1383 | if (mdata->dev_comp->no_need_unprepare) { |
1384 | ret = clk_enable(clk: mdata->spi_clk); |
1385 | if (ret < 0) { |
1386 | dev_err(dev, "failed to enable spi_clk (%d)\n" , ret); |
1387 | return ret; |
1388 | } |
1389 | ret = clk_enable(clk: mdata->spi_hclk); |
1390 | if (ret < 0) { |
1391 | dev_err(dev, "failed to enable spi_hclk (%d)\n" , ret); |
1392 | clk_disable(clk: mdata->spi_clk); |
1393 | return ret; |
1394 | } |
1395 | } else { |
1396 | ret = clk_prepare_enable(clk: mdata->spi_clk); |
1397 | if (ret < 0) { |
1398 | dev_err(dev, "failed to prepare_enable spi_clk (%d)\n" , ret); |
1399 | return ret; |
1400 | } |
1401 | |
1402 | ret = clk_prepare_enable(clk: mdata->spi_hclk); |
1403 | if (ret < 0) { |
1404 | dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n" , ret); |
1405 | clk_disable_unprepare(clk: mdata->spi_clk); |
1406 | return ret; |
1407 | } |
1408 | } |
1409 | |
1410 | return 0; |
1411 | } |
1412 | #endif /* CONFIG_PM */ |
1413 | |
1414 | static const struct dev_pm_ops mtk_spi_pm = { |
1415 | SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) |
1416 | SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, |
1417 | mtk_spi_runtime_resume, NULL) |
1418 | }; |
1419 | |
1420 | static struct platform_driver mtk_spi_driver = { |
1421 | .driver = { |
1422 | .name = "mtk-spi" , |
1423 | .pm = &mtk_spi_pm, |
1424 | .of_match_table = mtk_spi_of_match, |
1425 | }, |
1426 | .probe = mtk_spi_probe, |
1427 | .remove_new = mtk_spi_remove, |
1428 | }; |
1429 | |
1430 | module_platform_driver(mtk_spi_driver); |
1431 | |
1432 | MODULE_DESCRIPTION("MTK SPI Controller driver" ); |
1433 | MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>" ); |
1434 | MODULE_LICENSE("GPL v2" ); |
1435 | MODULE_ALIAS("platform:mtk-spi" ); |
1436 | |