1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (c) 2017-2018, The Linux foundation. All rights reserved. |
3 | |
4 | #include <linux/clk.h> |
5 | #include <linux/dmaengine.h> |
6 | #include <linux/dma-mapping.h> |
7 | #include <linux/dma/qcom-gpi-dma.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/io.h> |
10 | #include <linux/log2.h> |
11 | #include <linux/module.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/pm_opp.h> |
14 | #include <linux/pm_runtime.h> |
15 | #include <linux/property.h> |
16 | #include <linux/soc/qcom/geni-se.h> |
17 | #include <linux/spi/spi.h> |
18 | #include <linux/spinlock.h> |
19 | |
20 | /* SPI SE specific registers and respective register fields */ |
21 | #define SE_SPI_CPHA 0x224 |
22 | #define CPHA BIT(0) |
23 | |
24 | #define SE_SPI_LOOPBACK 0x22c |
25 | #define LOOPBACK_ENABLE 0x1 |
26 | #define NORMAL_MODE 0x0 |
27 | #define LOOPBACK_MSK GENMASK(1, 0) |
28 | |
29 | #define SE_SPI_CPOL 0x230 |
30 | #define CPOL BIT(2) |
31 | |
32 | #define SE_SPI_DEMUX_OUTPUT_INV 0x24c |
33 | #define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0) |
34 | |
35 | #define SE_SPI_DEMUX_SEL 0x250 |
36 | #define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0) |
37 | |
38 | #define SE_SPI_TRANS_CFG 0x25c |
39 | #define CS_TOGGLE BIT(1) |
40 | |
41 | #define SE_SPI_WORD_LEN 0x268 |
42 | #define WORD_LEN_MSK GENMASK(9, 0) |
43 | #define MIN_WORD_LEN 4 |
44 | |
45 | #define SE_SPI_TX_TRANS_LEN 0x26c |
46 | #define SE_SPI_RX_TRANS_LEN 0x270 |
47 | #define TRANS_LEN_MSK GENMASK(23, 0) |
48 | |
49 | #define SE_SPI_PRE_POST_CMD_DLY 0x274 |
50 | |
51 | #define SE_SPI_DELAY_COUNTERS 0x278 |
52 | #define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0) |
53 | #define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10) |
54 | #define SPI_CS_CLK_DELAY_SHFT 10 |
55 | |
56 | #define SE_SPI_SLAVE_EN (0x2BC) |
57 | #define SPI_SLAVE_EN BIT(0) |
58 | |
59 | /* M_CMD OP codes for SPI */ |
60 | #define SPI_TX_ONLY 1 |
61 | #define SPI_RX_ONLY 2 |
62 | #define SPI_TX_RX 7 |
63 | #define SPI_CS_ASSERT 8 |
64 | #define SPI_CS_DEASSERT 9 |
65 | #define SPI_SCK_ONLY 10 |
66 | /* M_CMD params for SPI */ |
67 | #define SPI_PRE_CMD_DELAY BIT(0) |
68 | #define TIMESTAMP_BEFORE BIT(1) |
69 | #define FRAGMENTATION BIT(2) |
70 | #define TIMESTAMP_AFTER BIT(3) |
71 | #define POST_CMD_DELAY BIT(4) |
72 | |
73 | #define GSI_LOOPBACK_EN BIT(0) |
74 | #define GSI_CS_TOGGLE BIT(3) |
75 | #define GSI_CPHA BIT(4) |
76 | #define GSI_CPOL BIT(5) |
77 | |
78 | struct spi_geni_master { |
79 | struct geni_se se; |
80 | struct device *dev; |
81 | u32 tx_fifo_depth; |
82 | u32 fifo_width_bits; |
83 | u32 tx_wm; |
84 | u32 last_mode; |
85 | unsigned long cur_speed_hz; |
86 | unsigned long cur_sclk_hz; |
87 | unsigned int cur_bits_per_word; |
88 | unsigned int tx_rem_bytes; |
89 | unsigned int rx_rem_bytes; |
90 | const struct spi_transfer *cur_xfer; |
91 | struct completion cs_done; |
92 | struct completion cancel_done; |
93 | struct completion abort_done; |
94 | struct completion tx_reset_done; |
95 | struct completion rx_reset_done; |
96 | unsigned int oversampling; |
97 | spinlock_t lock; |
98 | int irq; |
99 | bool cs_flag; |
100 | bool abort_failed; |
101 | struct dma_chan *tx; |
102 | struct dma_chan *rx; |
103 | int cur_xfer_mode; |
104 | }; |
105 | |
106 | static void spi_slv_setup(struct spi_geni_master *mas) |
107 | { |
108 | struct geni_se *se = &mas->se; |
109 | |
110 | writel(SPI_SLAVE_EN, addr: se->base + SE_SPI_SLAVE_EN); |
111 | writel(GENI_IO_MUX_0_EN, addr: se->base + GENI_OUTPUT_CTRL); |
112 | writel(START_TRIGGER, addr: se->base + SE_GENI_CFG_SEQ_START); |
113 | dev_dbg(mas->dev, "spi slave setup done\n" ); |
114 | } |
115 | |
116 | static int get_spi_clk_cfg(unsigned int speed_hz, |
117 | struct spi_geni_master *mas, |
118 | unsigned int *clk_idx, |
119 | unsigned int *clk_div) |
120 | { |
121 | unsigned long sclk_freq; |
122 | unsigned int actual_hz; |
123 | int ret; |
124 | |
125 | ret = geni_se_clk_freq_match(se: &mas->se, |
126 | req_freq: speed_hz * mas->oversampling, |
127 | index: clk_idx, res_freq: &sclk_freq, exact: false); |
128 | if (ret) { |
129 | dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n" , |
130 | ret, speed_hz); |
131 | return ret; |
132 | } |
133 | |
134 | *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz); |
135 | actual_hz = sclk_freq / (mas->oversampling * *clk_div); |
136 | |
137 | dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n" , speed_hz, |
138 | actual_hz, sclk_freq, *clk_idx, *clk_div); |
139 | ret = dev_pm_opp_set_rate(dev: mas->dev, target_freq: sclk_freq); |
140 | if (ret) |
141 | dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n" , ret); |
142 | else |
143 | mas->cur_sclk_hz = sclk_freq; |
144 | |
145 | return ret; |
146 | } |
147 | |
148 | static void handle_se_timeout(struct spi_controller *spi, |
149 | struct spi_message *msg) |
150 | { |
151 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
152 | unsigned long time_left; |
153 | struct geni_se *se = &mas->se; |
154 | const struct spi_transfer *xfer; |
155 | |
156 | spin_lock_irq(lock: &mas->lock); |
157 | if (mas->cur_xfer_mode == GENI_SE_FIFO) |
158 | writel(val: 0, addr: se->base + SE_GENI_TX_WATERMARK_REG); |
159 | |
160 | xfer = mas->cur_xfer; |
161 | mas->cur_xfer = NULL; |
162 | |
163 | if (spi->target) { |
164 | /* |
165 | * skip CMD Cancel sequnece since spi target |
166 | * doesn`t support CMD Cancel sequnece |
167 | */ |
168 | spin_unlock_irq(lock: &mas->lock); |
169 | goto reset_if_dma; |
170 | } |
171 | |
172 | reinit_completion(x: &mas->cancel_done); |
173 | geni_se_cancel_m_cmd(se); |
174 | spin_unlock_irq(lock: &mas->lock); |
175 | |
176 | time_left = wait_for_completion_timeout(x: &mas->cancel_done, HZ); |
177 | if (time_left) |
178 | goto reset_if_dma; |
179 | |
180 | spin_lock_irq(lock: &mas->lock); |
181 | reinit_completion(x: &mas->abort_done); |
182 | geni_se_abort_m_cmd(se); |
183 | spin_unlock_irq(lock: &mas->lock); |
184 | |
185 | time_left = wait_for_completion_timeout(x: &mas->abort_done, HZ); |
186 | if (!time_left) { |
187 | dev_err(mas->dev, "Failed to cancel/abort m_cmd\n" ); |
188 | |
189 | /* |
190 | * No need for a lock since SPI core has a lock and we never |
191 | * access this from an interrupt. |
192 | */ |
193 | mas->abort_failed = true; |
194 | } |
195 | |
196 | reset_if_dma: |
197 | if (mas->cur_xfer_mode == GENI_SE_DMA) { |
198 | if (xfer) { |
199 | if (xfer->tx_buf) { |
200 | spin_lock_irq(lock: &mas->lock); |
201 | reinit_completion(x: &mas->tx_reset_done); |
202 | writel(val: 1, addr: se->base + SE_DMA_TX_FSM_RST); |
203 | spin_unlock_irq(lock: &mas->lock); |
204 | time_left = wait_for_completion_timeout(x: &mas->tx_reset_done, HZ); |
205 | if (!time_left) |
206 | dev_err(mas->dev, "DMA TX RESET failed\n" ); |
207 | } |
208 | if (xfer->rx_buf) { |
209 | spin_lock_irq(lock: &mas->lock); |
210 | reinit_completion(x: &mas->rx_reset_done); |
211 | writel(val: 1, addr: se->base + SE_DMA_RX_FSM_RST); |
212 | spin_unlock_irq(lock: &mas->lock); |
213 | time_left = wait_for_completion_timeout(x: &mas->rx_reset_done, HZ); |
214 | if (!time_left) |
215 | dev_err(mas->dev, "DMA RX RESET failed\n" ); |
216 | } |
217 | } else { |
218 | /* |
219 | * This can happen if a timeout happened and we had to wait |
220 | * for lock in this function because isr was holding the lock |
221 | * and handling transfer completion at that time. |
222 | */ |
223 | dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n" ); |
224 | } |
225 | } |
226 | } |
227 | |
228 | static void handle_gpi_timeout(struct spi_controller *spi, struct spi_message *msg) |
229 | { |
230 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
231 | |
232 | dmaengine_terminate_sync(chan: mas->tx); |
233 | dmaengine_terminate_sync(chan: mas->rx); |
234 | } |
235 | |
236 | static void spi_geni_handle_err(struct spi_controller *spi, struct spi_message *msg) |
237 | { |
238 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
239 | |
240 | switch (mas->cur_xfer_mode) { |
241 | case GENI_SE_FIFO: |
242 | case GENI_SE_DMA: |
243 | handle_se_timeout(spi, msg); |
244 | break; |
245 | case GENI_GPI_DMA: |
246 | handle_gpi_timeout(spi, msg); |
247 | break; |
248 | default: |
249 | dev_err(mas->dev, "Abort on Mode:%d not supported" , mas->cur_xfer_mode); |
250 | } |
251 | } |
252 | |
253 | static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas) |
254 | { |
255 | struct geni_se *se = &mas->se; |
256 | u32 m_irq, m_irq_en; |
257 | |
258 | if (!mas->abort_failed) |
259 | return false; |
260 | |
261 | /* |
262 | * The only known case where a transfer times out and then a cancel |
263 | * times out then an abort times out is if something is blocking our |
264 | * interrupt handler from running. Avoid starting any new transfers |
265 | * until that sorts itself out. |
266 | */ |
267 | spin_lock_irq(lock: &mas->lock); |
268 | m_irq = readl(addr: se->base + SE_GENI_M_IRQ_STATUS); |
269 | m_irq_en = readl(addr: se->base + SE_GENI_M_IRQ_EN); |
270 | spin_unlock_irq(lock: &mas->lock); |
271 | |
272 | if (m_irq & m_irq_en) { |
273 | dev_err(mas->dev, "Interrupts pending after abort: %#010x\n" , |
274 | m_irq & m_irq_en); |
275 | return true; |
276 | } |
277 | |
278 | /* |
279 | * If we're here the problem resolved itself so no need to check more |
280 | * on future transfers. |
281 | */ |
282 | mas->abort_failed = false; |
283 | |
284 | return false; |
285 | } |
286 | |
287 | static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) |
288 | { |
289 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: slv->controller); |
290 | struct spi_controller *spi = dev_get_drvdata(dev: mas->dev); |
291 | struct geni_se *se = &mas->se; |
292 | unsigned long time_left; |
293 | |
294 | if (!(slv->mode & SPI_CS_HIGH)) |
295 | set_flag = !set_flag; |
296 | |
297 | if (set_flag == mas->cs_flag) |
298 | return; |
299 | |
300 | pm_runtime_get_sync(dev: mas->dev); |
301 | |
302 | if (spi_geni_is_abort_still_pending(mas)) { |
303 | dev_err(mas->dev, "Can't set chip select\n" ); |
304 | goto exit; |
305 | } |
306 | |
307 | spin_lock_irq(lock: &mas->lock); |
308 | if (mas->cur_xfer) { |
309 | dev_err(mas->dev, "Can't set CS when prev xfer running\n" ); |
310 | spin_unlock_irq(lock: &mas->lock); |
311 | goto exit; |
312 | } |
313 | |
314 | mas->cs_flag = set_flag; |
315 | /* set xfer_mode to FIFO to complete cs_done in isr */ |
316 | mas->cur_xfer_mode = GENI_SE_FIFO; |
317 | geni_se_select_mode(se, mode: mas->cur_xfer_mode); |
318 | |
319 | reinit_completion(x: &mas->cs_done); |
320 | if (set_flag) |
321 | geni_se_setup_m_cmd(se, SPI_CS_ASSERT, params: 0); |
322 | else |
323 | geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, params: 0); |
324 | spin_unlock_irq(lock: &mas->lock); |
325 | |
326 | time_left = wait_for_completion_timeout(x: &mas->cs_done, HZ); |
327 | if (!time_left) { |
328 | dev_warn(mas->dev, "Timeout setting chip select\n" ); |
329 | handle_se_timeout(spi, NULL); |
330 | } |
331 | |
332 | exit: |
333 | pm_runtime_put(dev: mas->dev); |
334 | } |
335 | |
336 | static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode, |
337 | unsigned int bits_per_word) |
338 | { |
339 | unsigned int pack_words; |
340 | bool msb_first = (mode & SPI_LSB_FIRST) ? false : true; |
341 | struct geni_se *se = &mas->se; |
342 | u32 word_len; |
343 | |
344 | /* |
345 | * If bits_per_word isn't a byte aligned value, set the packing to be |
346 | * 1 SPI word per FIFO word. |
347 | */ |
348 | if (!(mas->fifo_width_bits % bits_per_word)) |
349 | pack_words = mas->fifo_width_bits / bits_per_word; |
350 | else |
351 | pack_words = 1; |
352 | geni_se_config_packing(se: &mas->se, bpw: bits_per_word, pack_words, msb_to_lsb: msb_first, |
353 | tx_cfg: true, rx_cfg: true); |
354 | word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK; |
355 | writel(val: word_len, addr: se->base + SE_SPI_WORD_LEN); |
356 | } |
357 | |
358 | static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas, |
359 | unsigned long clk_hz) |
360 | { |
361 | u32 clk_sel, m_clk_cfg, idx, div; |
362 | struct geni_se *se = &mas->se; |
363 | int ret; |
364 | |
365 | if (clk_hz == mas->cur_speed_hz) |
366 | return 0; |
367 | |
368 | ret = get_spi_clk_cfg(speed_hz: clk_hz, mas, clk_idx: &idx, clk_div: &div); |
369 | if (ret) { |
370 | dev_err(mas->dev, "Err setting clk to %lu: %d\n" , clk_hz, ret); |
371 | return ret; |
372 | } |
373 | |
374 | /* |
375 | * SPI core clock gets configured with the requested frequency |
376 | * or the frequency closer to the requested frequency. |
377 | * For that reason requested frequency is stored in the |
378 | * cur_speed_hz and referred in the consecutive transfer instead |
379 | * of calling clk_get_rate() API. |
380 | */ |
381 | mas->cur_speed_hz = clk_hz; |
382 | |
383 | clk_sel = idx & CLK_SEL_MSK; |
384 | m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN; |
385 | writel(val: clk_sel, addr: se->base + SE_GENI_CLK_SEL); |
386 | writel(val: m_clk_cfg, addr: se->base + GENI_SER_M_CLK_CFG); |
387 | |
388 | /* Set BW quota for CPU as driver supports FIFO mode only. */ |
389 | se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz); |
390 | ret = geni_icc_set_bw(se); |
391 | if (ret) |
392 | return ret; |
393 | |
394 | return 0; |
395 | } |
396 | |
397 | static int setup_fifo_params(struct spi_device *spi_slv, |
398 | struct spi_controller *spi) |
399 | { |
400 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
401 | struct geni_se *se = &mas->se; |
402 | u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0; |
403 | u32 demux_sel; |
404 | |
405 | if (mas->last_mode != spi_slv->mode) { |
406 | if (spi_slv->mode & SPI_LOOP) |
407 | loopback_cfg = LOOPBACK_ENABLE; |
408 | |
409 | if (spi_slv->mode & SPI_CPOL) |
410 | cpol = CPOL; |
411 | |
412 | if (spi_slv->mode & SPI_CPHA) |
413 | cpha = CPHA; |
414 | |
415 | if (spi_slv->mode & SPI_CS_HIGH) |
416 | demux_output_inv = BIT(spi_get_chipselect(spi_slv, 0)); |
417 | |
418 | demux_sel = spi_get_chipselect(spi: spi_slv, idx: 0); |
419 | mas->cur_bits_per_word = spi_slv->bits_per_word; |
420 | |
421 | spi_setup_word_len(mas, mode: spi_slv->mode, bits_per_word: spi_slv->bits_per_word); |
422 | writel(val: loopback_cfg, addr: se->base + SE_SPI_LOOPBACK); |
423 | writel(val: demux_sel, addr: se->base + SE_SPI_DEMUX_SEL); |
424 | writel(val: cpha, addr: se->base + SE_SPI_CPHA); |
425 | writel(val: cpol, addr: se->base + SE_SPI_CPOL); |
426 | writel(val: demux_output_inv, addr: se->base + SE_SPI_DEMUX_OUTPUT_INV); |
427 | |
428 | mas->last_mode = spi_slv->mode; |
429 | } |
430 | |
431 | return geni_spi_set_clock_and_bw(mas, clk_hz: spi_slv->max_speed_hz); |
432 | } |
433 | |
434 | static void |
435 | spi_gsi_callback_result(void *cb, const struct dmaengine_result *result) |
436 | { |
437 | struct spi_controller *spi = cb; |
438 | |
439 | spi->cur_msg->status = -EIO; |
440 | if (result->result != DMA_TRANS_NOERROR) { |
441 | dev_err(&spi->dev, "DMA txn failed: %d\n" , result->result); |
442 | spi_finalize_current_transfer(ctlr: spi); |
443 | return; |
444 | } |
445 | |
446 | if (!result->residue) { |
447 | spi->cur_msg->status = 0; |
448 | dev_dbg(&spi->dev, "DMA txn completed\n" ); |
449 | } else { |
450 | dev_err(&spi->dev, "DMA xfer has pending: %d\n" , result->residue); |
451 | } |
452 | |
453 | spi_finalize_current_transfer(ctlr: spi); |
454 | } |
455 | |
456 | static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas, |
457 | struct spi_device *spi_slv, struct spi_controller *spi) |
458 | { |
459 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; |
460 | struct dma_slave_config config = {}; |
461 | struct gpi_spi_config peripheral = {}; |
462 | struct dma_async_tx_descriptor *tx_desc, *rx_desc; |
463 | int ret; |
464 | |
465 | config.peripheral_config = &peripheral; |
466 | config.peripheral_size = sizeof(peripheral); |
467 | peripheral.set_config = true; |
468 | |
469 | if (xfer->bits_per_word != mas->cur_bits_per_word || |
470 | xfer->speed_hz != mas->cur_speed_hz) { |
471 | mas->cur_bits_per_word = xfer->bits_per_word; |
472 | mas->cur_speed_hz = xfer->speed_hz; |
473 | } |
474 | |
475 | if (xfer->tx_buf && xfer->rx_buf) { |
476 | peripheral.cmd = SPI_DUPLEX; |
477 | } else if (xfer->tx_buf) { |
478 | peripheral.cmd = SPI_TX; |
479 | peripheral.rx_len = 0; |
480 | } else if (xfer->rx_buf) { |
481 | peripheral.cmd = SPI_RX; |
482 | if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) { |
483 | peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word); |
484 | } else { |
485 | int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1; |
486 | |
487 | peripheral.rx_len = (xfer->len / bytes_per_word); |
488 | } |
489 | } |
490 | |
491 | peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP); |
492 | peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL); |
493 | peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA); |
494 | peripheral.cs = spi_get_chipselect(spi: spi_slv, idx: 0); |
495 | peripheral.pack_en = true; |
496 | peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN; |
497 | |
498 | ret = get_spi_clk_cfg(speed_hz: mas->cur_speed_hz, mas, |
499 | clk_idx: &peripheral.clk_src, clk_div: &peripheral.clk_div); |
500 | if (ret) { |
501 | dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n" , ret); |
502 | return ret; |
503 | } |
504 | |
505 | if (!xfer->cs_change) { |
506 | if (!list_is_last(list: &xfer->transfer_list, head: &spi->cur_msg->transfers)) |
507 | peripheral.fragmentation = FRAGMENTATION; |
508 | } |
509 | |
510 | if (peripheral.cmd & SPI_RX) { |
511 | dmaengine_slave_config(chan: mas->rx, config: &config); |
512 | rx_desc = dmaengine_prep_slave_sg(chan: mas->rx, sgl: xfer->rx_sg.sgl, sg_len: xfer->rx_sg.nents, |
513 | dir: DMA_DEV_TO_MEM, flags); |
514 | if (!rx_desc) { |
515 | dev_err(mas->dev, "Err setting up rx desc\n" ); |
516 | return -EIO; |
517 | } |
518 | } |
519 | |
520 | /* |
521 | * Prepare the TX always, even for RX or tx_buf being null, we would |
522 | * need TX to be prepared per GSI spec |
523 | */ |
524 | dmaengine_slave_config(chan: mas->tx, config: &config); |
525 | tx_desc = dmaengine_prep_slave_sg(chan: mas->tx, sgl: xfer->tx_sg.sgl, sg_len: xfer->tx_sg.nents, |
526 | dir: DMA_MEM_TO_DEV, flags); |
527 | if (!tx_desc) { |
528 | dev_err(mas->dev, "Err setting up tx desc\n" ); |
529 | return -EIO; |
530 | } |
531 | |
532 | tx_desc->callback_result = spi_gsi_callback_result; |
533 | tx_desc->callback_param = spi; |
534 | |
535 | if (peripheral.cmd & SPI_RX) |
536 | dmaengine_submit(desc: rx_desc); |
537 | dmaengine_submit(desc: tx_desc); |
538 | |
539 | if (peripheral.cmd & SPI_RX) |
540 | dma_async_issue_pending(chan: mas->rx); |
541 | |
542 | dma_async_issue_pending(chan: mas->tx); |
543 | return 1; |
544 | } |
545 | |
546 | static u32 get_xfer_len_in_words(struct spi_transfer *xfer, |
547 | struct spi_geni_master *mas) |
548 | { |
549 | u32 len; |
550 | |
551 | if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) |
552 | len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word; |
553 | else |
554 | len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1); |
555 | len &= TRANS_LEN_MSK; |
556 | |
557 | return len; |
558 | } |
559 | |
560 | static bool geni_can_dma(struct spi_controller *ctlr, |
561 | struct spi_device *slv, struct spi_transfer *xfer) |
562 | { |
563 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: slv->controller); |
564 | u32 len, fifo_size; |
565 | |
566 | if (mas->cur_xfer_mode == GENI_GPI_DMA) |
567 | return true; |
568 | |
569 | /* Set SE DMA mode for SPI target. */ |
570 | if (ctlr->target) |
571 | return true; |
572 | |
573 | len = get_xfer_len_in_words(xfer, mas); |
574 | fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word; |
575 | |
576 | if (len > fifo_size) |
577 | return true; |
578 | else |
579 | return false; |
580 | } |
581 | |
582 | static int spi_geni_prepare_message(struct spi_controller *spi, |
583 | struct spi_message *spi_msg) |
584 | { |
585 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
586 | int ret; |
587 | |
588 | switch (mas->cur_xfer_mode) { |
589 | case GENI_SE_FIFO: |
590 | case GENI_SE_DMA: |
591 | if (spi_geni_is_abort_still_pending(mas)) |
592 | return -EBUSY; |
593 | ret = setup_fifo_params(spi_slv: spi_msg->spi, spi); |
594 | if (ret) |
595 | dev_err(mas->dev, "Couldn't select mode %d\n" , ret); |
596 | return ret; |
597 | |
598 | case GENI_GPI_DMA: |
599 | /* nothing to do for GPI DMA */ |
600 | return 0; |
601 | } |
602 | |
603 | dev_err(mas->dev, "Mode not supported %d" , mas->cur_xfer_mode); |
604 | return -EINVAL; |
605 | } |
606 | |
607 | static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas) |
608 | { |
609 | int ret; |
610 | |
611 | mas->tx = dma_request_chan(dev: mas->dev, name: "tx" ); |
612 | if (IS_ERR(ptr: mas->tx)) { |
613 | ret = dev_err_probe(dev: mas->dev, err: PTR_ERR(ptr: mas->tx), |
614 | fmt: "Failed to get tx DMA ch\n" ); |
615 | goto err_tx; |
616 | } |
617 | |
618 | mas->rx = dma_request_chan(dev: mas->dev, name: "rx" ); |
619 | if (IS_ERR(ptr: mas->rx)) { |
620 | ret = dev_err_probe(dev: mas->dev, err: PTR_ERR(ptr: mas->rx), |
621 | fmt: "Failed to get rx DMA ch\n" ); |
622 | goto err_rx; |
623 | } |
624 | |
625 | return 0; |
626 | |
627 | err_rx: |
628 | mas->rx = NULL; |
629 | dma_release_channel(chan: mas->tx); |
630 | err_tx: |
631 | mas->tx = NULL; |
632 | return ret; |
633 | } |
634 | |
635 | static void spi_geni_release_dma_chan(struct spi_geni_master *mas) |
636 | { |
637 | if (mas->rx) { |
638 | dma_release_channel(chan: mas->rx); |
639 | mas->rx = NULL; |
640 | } |
641 | |
642 | if (mas->tx) { |
643 | dma_release_channel(chan: mas->tx); |
644 | mas->tx = NULL; |
645 | } |
646 | } |
647 | |
648 | static int spi_geni_init(struct spi_geni_master *mas) |
649 | { |
650 | struct spi_controller *spi = dev_get_drvdata(dev: mas->dev); |
651 | struct geni_se *se = &mas->se; |
652 | unsigned int proto, major, minor, ver; |
653 | u32 spi_tx_cfg, fifo_disable; |
654 | int ret = -ENXIO; |
655 | |
656 | pm_runtime_get_sync(dev: mas->dev); |
657 | |
658 | proto = geni_se_read_proto(se); |
659 | |
660 | if (spi->target) { |
661 | if (proto != GENI_SE_SPI_SLAVE) { |
662 | dev_err(mas->dev, "Invalid proto %d\n" , proto); |
663 | goto out_pm; |
664 | } |
665 | spi_slv_setup(mas); |
666 | } else if (proto != GENI_SE_SPI) { |
667 | dev_err(mas->dev, "Invalid proto %d\n" , proto); |
668 | goto out_pm; |
669 | } |
670 | mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se); |
671 | |
672 | /* Width of Tx and Rx FIFO is same */ |
673 | mas->fifo_width_bits = geni_se_get_tx_fifo_width(se); |
674 | |
675 | /* |
676 | * Hardware programming guide suggests to configure |
677 | * RX FIFO RFR level to fifo_depth-2. |
678 | */ |
679 | geni_se_init(se, rx_wm: mas->tx_fifo_depth - 3, rx_rfr: mas->tx_fifo_depth - 2); |
680 | /* Transmit an entire FIFO worth of data per IRQ */ |
681 | mas->tx_wm = 1; |
682 | ver = geni_se_get_qup_hw_version(se); |
683 | major = GENI_SE_VERSION_MAJOR(ver); |
684 | minor = GENI_SE_VERSION_MINOR(ver); |
685 | |
686 | if (major == 1 && minor == 0) |
687 | mas->oversampling = 2; |
688 | else |
689 | mas->oversampling = 1; |
690 | |
691 | fifo_disable = readl(addr: se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE; |
692 | switch (fifo_disable) { |
693 | case 1: |
694 | ret = spi_geni_grab_gpi_chan(mas); |
695 | if (!ret) { /* success case */ |
696 | mas->cur_xfer_mode = GENI_GPI_DMA; |
697 | geni_se_select_mode(se, mode: GENI_GPI_DMA); |
698 | dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n" ); |
699 | break; |
700 | } else if (ret == -EPROBE_DEFER) { |
701 | goto out_pm; |
702 | } |
703 | /* |
704 | * in case of failure to get gpi dma channel, we can still do the |
705 | * FIFO mode, so fallthrough |
706 | */ |
707 | dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n" ); |
708 | fallthrough; |
709 | |
710 | case 0: |
711 | mas->cur_xfer_mode = GENI_SE_FIFO; |
712 | geni_se_select_mode(se, mode: GENI_SE_FIFO); |
713 | ret = 0; |
714 | break; |
715 | } |
716 | |
717 | /* We always control CS manually */ |
718 | if (!spi->target) { |
719 | spi_tx_cfg = readl(addr: se->base + SE_SPI_TRANS_CFG); |
720 | spi_tx_cfg &= ~CS_TOGGLE; |
721 | writel(val: spi_tx_cfg, addr: se->base + SE_SPI_TRANS_CFG); |
722 | } |
723 | |
724 | out_pm: |
725 | pm_runtime_put(dev: mas->dev); |
726 | return ret; |
727 | } |
728 | |
729 | static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas) |
730 | { |
731 | /* |
732 | * Calculate how many bytes we'll put in each FIFO word. If the |
733 | * transfer words don't pack cleanly into a FIFO word we'll just put |
734 | * one transfer word in each FIFO word. If they do pack we'll pack 'em. |
735 | */ |
736 | if (mas->fifo_width_bits % mas->cur_bits_per_word) |
737 | return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word, |
738 | BITS_PER_BYTE)); |
739 | |
740 | return mas->fifo_width_bits / BITS_PER_BYTE; |
741 | } |
742 | |
743 | static bool geni_spi_handle_tx(struct spi_geni_master *mas) |
744 | { |
745 | struct geni_se *se = &mas->se; |
746 | unsigned int max_bytes; |
747 | const u8 *tx_buf; |
748 | unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas); |
749 | unsigned int i = 0; |
750 | |
751 | /* Stop the watermark IRQ if nothing to send */ |
752 | if (!mas->cur_xfer) { |
753 | writel(val: 0, addr: se->base + SE_GENI_TX_WATERMARK_REG); |
754 | return false; |
755 | } |
756 | |
757 | max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word; |
758 | if (mas->tx_rem_bytes < max_bytes) |
759 | max_bytes = mas->tx_rem_bytes; |
760 | |
761 | tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes; |
762 | while (i < max_bytes) { |
763 | unsigned int j; |
764 | unsigned int bytes_to_write; |
765 | u32 fifo_word = 0; |
766 | u8 *fifo_byte = (u8 *)&fifo_word; |
767 | |
768 | bytes_to_write = min(bytes_per_fifo_word, max_bytes - i); |
769 | for (j = 0; j < bytes_to_write; j++) |
770 | fifo_byte[j] = tx_buf[i++]; |
771 | iowrite32_rep(port: se->base + SE_GENI_TX_FIFOn, buf: &fifo_word, count: 1); |
772 | } |
773 | mas->tx_rem_bytes -= max_bytes; |
774 | if (!mas->tx_rem_bytes) { |
775 | writel(val: 0, addr: se->base + SE_GENI_TX_WATERMARK_REG); |
776 | return false; |
777 | } |
778 | return true; |
779 | } |
780 | |
781 | static void geni_spi_handle_rx(struct spi_geni_master *mas) |
782 | { |
783 | struct geni_se *se = &mas->se; |
784 | u32 rx_fifo_status; |
785 | unsigned int rx_bytes; |
786 | unsigned int rx_last_byte_valid; |
787 | u8 *rx_buf; |
788 | unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas); |
789 | unsigned int i = 0; |
790 | |
791 | rx_fifo_status = readl(addr: se->base + SE_GENI_RX_FIFO_STATUS); |
792 | rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word; |
793 | if (rx_fifo_status & RX_LAST) { |
794 | rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK; |
795 | rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT; |
796 | if (rx_last_byte_valid && rx_last_byte_valid < 4) |
797 | rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid; |
798 | } |
799 | |
800 | /* Clear out the FIFO and bail if nowhere to put it */ |
801 | if (!mas->cur_xfer) { |
802 | for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++) |
803 | readl(addr: se->base + SE_GENI_RX_FIFOn); |
804 | return; |
805 | } |
806 | |
807 | if (mas->rx_rem_bytes < rx_bytes) |
808 | rx_bytes = mas->rx_rem_bytes; |
809 | |
810 | rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes; |
811 | while (i < rx_bytes) { |
812 | u32 fifo_word = 0; |
813 | u8 *fifo_byte = (u8 *)&fifo_word; |
814 | unsigned int bytes_to_read; |
815 | unsigned int j; |
816 | |
817 | bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i); |
818 | ioread32_rep(port: se->base + SE_GENI_RX_FIFOn, buf: &fifo_word, count: 1); |
819 | for (j = 0; j < bytes_to_read; j++) |
820 | rx_buf[i++] = fifo_byte[j]; |
821 | } |
822 | mas->rx_rem_bytes -= rx_bytes; |
823 | } |
824 | |
825 | static int setup_se_xfer(struct spi_transfer *xfer, |
826 | struct spi_geni_master *mas, |
827 | u16 mode, struct spi_controller *spi) |
828 | { |
829 | u32 m_cmd = 0; |
830 | u32 len; |
831 | struct geni_se *se = &mas->se; |
832 | int ret; |
833 | |
834 | /* |
835 | * Ensure that our interrupt handler isn't still running from some |
836 | * prior command before we start messing with the hardware behind |
837 | * its back. We don't need to _keep_ the lock here since we're only |
838 | * worried about racing with out interrupt handler. The SPI core |
839 | * already handles making sure that we're not trying to do two |
840 | * transfers at once or setting a chip select and doing a transfer |
841 | * concurrently. |
842 | * |
843 | * NOTE: we actually _can't_ hold the lock here because possibly we |
844 | * might call clk_set_rate() which needs to be able to sleep. |
845 | */ |
846 | spin_lock_irq(lock: &mas->lock); |
847 | spin_unlock_irq(lock: &mas->lock); |
848 | |
849 | if (xfer->bits_per_word != mas->cur_bits_per_word) { |
850 | spi_setup_word_len(mas, mode, bits_per_word: xfer->bits_per_word); |
851 | mas->cur_bits_per_word = xfer->bits_per_word; |
852 | } |
853 | |
854 | /* Speed and bits per word can be overridden per transfer */ |
855 | ret = geni_spi_set_clock_and_bw(mas, clk_hz: xfer->speed_hz); |
856 | if (ret) |
857 | return ret; |
858 | |
859 | mas->tx_rem_bytes = 0; |
860 | mas->rx_rem_bytes = 0; |
861 | |
862 | len = get_xfer_len_in_words(xfer, mas); |
863 | |
864 | mas->cur_xfer = xfer; |
865 | if (xfer->tx_buf) { |
866 | m_cmd |= SPI_TX_ONLY; |
867 | mas->tx_rem_bytes = xfer->len; |
868 | writel(val: len, addr: se->base + SE_SPI_TX_TRANS_LEN); |
869 | } |
870 | |
871 | if (xfer->rx_buf) { |
872 | m_cmd |= SPI_RX_ONLY; |
873 | writel(val: len, addr: se->base + SE_SPI_RX_TRANS_LEN); |
874 | mas->rx_rem_bytes = xfer->len; |
875 | } |
876 | |
877 | /* |
878 | * Select DMA mode if sgt are present; and with only 1 entry |
879 | * This is not a serious limitation because the xfer buffers are |
880 | * expected to fit into in 1 entry almost always, and if any |
881 | * doesn't for any reason we fall back to FIFO mode anyway |
882 | */ |
883 | if (!xfer->tx_sg.nents && !xfer->rx_sg.nents) |
884 | mas->cur_xfer_mode = GENI_SE_FIFO; |
885 | else if (xfer->tx_sg.nents > 1 || xfer->rx_sg.nents > 1) { |
886 | dev_warn_once(mas->dev, "Doing FIFO, cannot handle tx_nents-%d, rx_nents-%d\n" , |
887 | xfer->tx_sg.nents, xfer->rx_sg.nents); |
888 | mas->cur_xfer_mode = GENI_SE_FIFO; |
889 | } else |
890 | mas->cur_xfer_mode = GENI_SE_DMA; |
891 | geni_se_select_mode(se, mode: mas->cur_xfer_mode); |
892 | |
893 | /* |
894 | * Lock around right before we start the transfer since our |
895 | * interrupt could come in at any time now. |
896 | */ |
897 | spin_lock_irq(lock: &mas->lock); |
898 | geni_se_setup_m_cmd(se, cmd: m_cmd, FRAGMENTATION); |
899 | |
900 | if (mas->cur_xfer_mode == GENI_SE_DMA) { |
901 | if (m_cmd & SPI_RX_ONLY) |
902 | geni_se_rx_init_dma(se, sg_dma_address(xfer->rx_sg.sgl), |
903 | sg_dma_len(xfer->rx_sg.sgl)); |
904 | if (m_cmd & SPI_TX_ONLY) |
905 | geni_se_tx_init_dma(se, sg_dma_address(xfer->tx_sg.sgl), |
906 | sg_dma_len(xfer->tx_sg.sgl)); |
907 | } else if (m_cmd & SPI_TX_ONLY) { |
908 | if (geni_spi_handle_tx(mas)) |
909 | writel(val: mas->tx_wm, addr: se->base + SE_GENI_TX_WATERMARK_REG); |
910 | } |
911 | |
912 | spin_unlock_irq(lock: &mas->lock); |
913 | return ret; |
914 | } |
915 | |
916 | static int spi_geni_transfer_one(struct spi_controller *spi, |
917 | struct spi_device *slv, |
918 | struct spi_transfer *xfer) |
919 | { |
920 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
921 | int ret; |
922 | |
923 | if (spi_geni_is_abort_still_pending(mas)) |
924 | return -EBUSY; |
925 | |
926 | /* Terminate and return success for 0 byte length transfer */ |
927 | if (!xfer->len) |
928 | return 0; |
929 | |
930 | if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) { |
931 | ret = setup_se_xfer(xfer, mas, mode: slv->mode, spi); |
932 | /* SPI framework expects +ve ret code to wait for transfer complete */ |
933 | if (!ret) |
934 | ret = 1; |
935 | return ret; |
936 | } |
937 | return setup_gsi_xfer(xfer, mas, spi_slv: slv, spi); |
938 | } |
939 | |
940 | static irqreturn_t geni_spi_isr(int irq, void *data) |
941 | { |
942 | struct spi_controller *spi = data; |
943 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
944 | struct geni_se *se = &mas->se; |
945 | u32 m_irq; |
946 | |
947 | m_irq = readl(addr: se->base + SE_GENI_M_IRQ_STATUS); |
948 | if (!m_irq) |
949 | return IRQ_NONE; |
950 | |
951 | if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN | |
952 | M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN | |
953 | M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN)) |
954 | dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n" , m_irq); |
955 | |
956 | spin_lock(lock: &mas->lock); |
957 | |
958 | if (mas->cur_xfer_mode == GENI_SE_FIFO) { |
959 | if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN)) |
960 | geni_spi_handle_rx(mas); |
961 | |
962 | if (m_irq & M_TX_FIFO_WATERMARK_EN) |
963 | geni_spi_handle_tx(mas); |
964 | |
965 | if (m_irq & M_CMD_DONE_EN) { |
966 | if (mas->cur_xfer) { |
967 | spi_finalize_current_transfer(ctlr: spi); |
968 | mas->cur_xfer = NULL; |
969 | /* |
970 | * If this happens, then a CMD_DONE came before all the |
971 | * Tx buffer bytes were sent out. This is unusual, log |
972 | * this condition and disable the WM interrupt to |
973 | * prevent the system from stalling due an interrupt |
974 | * storm. |
975 | * |
976 | * If this happens when all Rx bytes haven't been |
977 | * received, log the condition. The only known time |
978 | * this can happen is if bits_per_word != 8 and some |
979 | * registers that expect xfer lengths in num spi_words |
980 | * weren't written correctly. |
981 | */ |
982 | if (mas->tx_rem_bytes) { |
983 | writel(val: 0, addr: se->base + SE_GENI_TX_WATERMARK_REG); |
984 | dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n" , |
985 | mas->tx_rem_bytes, mas->cur_bits_per_word); |
986 | } |
987 | if (mas->rx_rem_bytes) |
988 | dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n" , |
989 | mas->rx_rem_bytes, mas->cur_bits_per_word); |
990 | } else { |
991 | complete(&mas->cs_done); |
992 | } |
993 | } |
994 | } else if (mas->cur_xfer_mode == GENI_SE_DMA) { |
995 | const struct spi_transfer *xfer = mas->cur_xfer; |
996 | u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT); |
997 | u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT); |
998 | |
999 | if (dma_tx_status) |
1000 | writel(val: dma_tx_status, addr: se->base + SE_DMA_TX_IRQ_CLR); |
1001 | if (dma_rx_status) |
1002 | writel(val: dma_rx_status, addr: se->base + SE_DMA_RX_IRQ_CLR); |
1003 | if (dma_tx_status & TX_DMA_DONE) |
1004 | mas->tx_rem_bytes = 0; |
1005 | if (dma_rx_status & RX_DMA_DONE) |
1006 | mas->rx_rem_bytes = 0; |
1007 | if (dma_tx_status & TX_RESET_DONE) |
1008 | complete(&mas->tx_reset_done); |
1009 | if (dma_rx_status & RX_RESET_DONE) |
1010 | complete(&mas->rx_reset_done); |
1011 | if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) { |
1012 | spi_finalize_current_transfer(ctlr: spi); |
1013 | mas->cur_xfer = NULL; |
1014 | } |
1015 | } |
1016 | |
1017 | if (m_irq & M_CMD_CANCEL_EN) |
1018 | complete(&mas->cancel_done); |
1019 | if (m_irq & M_CMD_ABORT_EN) |
1020 | complete(&mas->abort_done); |
1021 | |
1022 | /* |
1023 | * It's safe or a good idea to Ack all of our interrupts at the end |
1024 | * of the function. Specifically: |
1025 | * - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and |
1026 | * clearing Acks. Clearing at the end relies on nobody else having |
1027 | * started a new transfer yet or else we could be clearing _their_ |
1028 | * done bit, but everyone grabs the spinlock before starting a new |
1029 | * transfer. |
1030 | * - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear |
1031 | * to be "latched level" interrupts so it's important to clear them |
1032 | * _after_ you've handled the condition and always safe to do so |
1033 | * since they'll re-assert if they're still happening. |
1034 | */ |
1035 | writel(val: m_irq, addr: se->base + SE_GENI_M_IRQ_CLEAR); |
1036 | |
1037 | spin_unlock(lock: &mas->lock); |
1038 | |
1039 | return IRQ_HANDLED; |
1040 | } |
1041 | |
1042 | static int spi_geni_probe(struct platform_device *pdev) |
1043 | { |
1044 | int ret, irq; |
1045 | struct spi_controller *spi; |
1046 | struct spi_geni_master *mas; |
1047 | void __iomem *base; |
1048 | struct clk *clk; |
1049 | struct device *dev = &pdev->dev; |
1050 | |
1051 | irq = platform_get_irq(pdev, 0); |
1052 | if (irq < 0) |
1053 | return irq; |
1054 | |
1055 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
1056 | if (ret) |
1057 | return dev_err_probe(dev, err: ret, fmt: "could not set DMA mask\n" ); |
1058 | |
1059 | base = devm_platform_ioremap_resource(pdev, index: 0); |
1060 | if (IS_ERR(ptr: base)) |
1061 | return PTR_ERR(ptr: base); |
1062 | |
1063 | clk = devm_clk_get(dev, id: "se" ); |
1064 | if (IS_ERR(ptr: clk)) |
1065 | return PTR_ERR(ptr: clk); |
1066 | |
1067 | spi = devm_spi_alloc_host(dev, size: sizeof(*mas)); |
1068 | if (!spi) |
1069 | return -ENOMEM; |
1070 | |
1071 | platform_set_drvdata(pdev, data: spi); |
1072 | mas = spi_controller_get_devdata(ctlr: spi); |
1073 | mas->irq = irq; |
1074 | mas->dev = dev; |
1075 | mas->se.dev = dev; |
1076 | mas->se.wrapper = dev_get_drvdata(dev: dev->parent); |
1077 | mas->se.base = base; |
1078 | mas->se.clk = clk; |
1079 | |
1080 | ret = devm_pm_opp_set_clkname(dev: &pdev->dev, name: "se" ); |
1081 | if (ret) |
1082 | return ret; |
1083 | /* OPP table is optional */ |
1084 | ret = devm_pm_opp_of_add_table(dev: &pdev->dev); |
1085 | if (ret && ret != -ENODEV) { |
1086 | dev_err(&pdev->dev, "invalid OPP table in device tree\n" ); |
1087 | return ret; |
1088 | } |
1089 | |
1090 | spi->bus_num = -1; |
1091 | spi->dev.of_node = dev->of_node; |
1092 | spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH; |
1093 | spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); |
1094 | spi->num_chipselect = 4; |
1095 | spi->max_speed_hz = 50000000; |
1096 | spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */ |
1097 | spi->prepare_message = spi_geni_prepare_message; |
1098 | spi->transfer_one = spi_geni_transfer_one; |
1099 | spi->can_dma = geni_can_dma; |
1100 | spi->dma_map_dev = dev->parent; |
1101 | spi->auto_runtime_pm = true; |
1102 | spi->handle_err = spi_geni_handle_err; |
1103 | spi->use_gpio_descriptors = true; |
1104 | |
1105 | init_completion(x: &mas->cs_done); |
1106 | init_completion(x: &mas->cancel_done); |
1107 | init_completion(x: &mas->abort_done); |
1108 | init_completion(x: &mas->tx_reset_done); |
1109 | init_completion(x: &mas->rx_reset_done); |
1110 | spin_lock_init(&mas->lock); |
1111 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
1112 | pm_runtime_set_autosuspend_delay(dev: &pdev->dev, delay: 250); |
1113 | pm_runtime_enable(dev); |
1114 | |
1115 | if (device_property_read_bool(dev: &pdev->dev, propname: "spi-slave" )) |
1116 | spi->target = true; |
1117 | |
1118 | ret = geni_icc_get(se: &mas->se, NULL); |
1119 | if (ret) |
1120 | goto spi_geni_probe_runtime_disable; |
1121 | /* Set the bus quota to a reasonable value for register access */ |
1122 | mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ); |
1123 | mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW; |
1124 | |
1125 | ret = geni_icc_set_bw(se: &mas->se); |
1126 | if (ret) |
1127 | goto spi_geni_probe_runtime_disable; |
1128 | |
1129 | ret = spi_geni_init(mas); |
1130 | if (ret) |
1131 | goto spi_geni_probe_runtime_disable; |
1132 | |
1133 | /* |
1134 | * check the mode supported and set_cs for fifo mode only |
1135 | * for dma (gsi) mode, the gsi will set cs based on params passed in |
1136 | * TRE |
1137 | */ |
1138 | if (!spi->target && mas->cur_xfer_mode == GENI_SE_FIFO) |
1139 | spi->set_cs = spi_geni_set_cs; |
1140 | |
1141 | /* |
1142 | * TX is required per GSI spec, see setup_gsi_xfer(). |
1143 | */ |
1144 | if (mas->cur_xfer_mode == GENI_GPI_DMA) |
1145 | spi->flags = SPI_CONTROLLER_MUST_TX; |
1146 | |
1147 | ret = request_irq(irq: mas->irq, handler: geni_spi_isr, flags: 0, name: dev_name(dev), dev: spi); |
1148 | if (ret) |
1149 | goto spi_geni_release_dma; |
1150 | |
1151 | ret = spi_register_controller(ctlr: spi); |
1152 | if (ret) |
1153 | goto spi_geni_probe_free_irq; |
1154 | |
1155 | return 0; |
1156 | spi_geni_probe_free_irq: |
1157 | free_irq(mas->irq, spi); |
1158 | spi_geni_release_dma: |
1159 | spi_geni_release_dma_chan(mas); |
1160 | spi_geni_probe_runtime_disable: |
1161 | pm_runtime_disable(dev); |
1162 | return ret; |
1163 | } |
1164 | |
1165 | static void spi_geni_remove(struct platform_device *pdev) |
1166 | { |
1167 | struct spi_controller *spi = platform_get_drvdata(pdev); |
1168 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
1169 | |
1170 | /* Unregister _before_ disabling pm_runtime() so we stop transfers */ |
1171 | spi_unregister_controller(ctlr: spi); |
1172 | |
1173 | spi_geni_release_dma_chan(mas); |
1174 | |
1175 | free_irq(mas->irq, spi); |
1176 | pm_runtime_disable(dev: &pdev->dev); |
1177 | } |
1178 | |
1179 | static int __maybe_unused spi_geni_runtime_suspend(struct device *dev) |
1180 | { |
1181 | struct spi_controller *spi = dev_get_drvdata(dev); |
1182 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
1183 | int ret; |
1184 | |
1185 | /* Drop the performance state vote */ |
1186 | dev_pm_opp_set_rate(dev, target_freq: 0); |
1187 | |
1188 | ret = geni_se_resources_off(se: &mas->se); |
1189 | if (ret) |
1190 | return ret; |
1191 | |
1192 | return geni_icc_disable(se: &mas->se); |
1193 | } |
1194 | |
1195 | static int __maybe_unused spi_geni_runtime_resume(struct device *dev) |
1196 | { |
1197 | struct spi_controller *spi = dev_get_drvdata(dev); |
1198 | struct spi_geni_master *mas = spi_controller_get_devdata(ctlr: spi); |
1199 | int ret; |
1200 | |
1201 | ret = geni_icc_enable(se: &mas->se); |
1202 | if (ret) |
1203 | return ret; |
1204 | |
1205 | ret = geni_se_resources_on(se: &mas->se); |
1206 | if (ret) |
1207 | return ret; |
1208 | |
1209 | return dev_pm_opp_set_rate(dev: mas->dev, target_freq: mas->cur_sclk_hz); |
1210 | } |
1211 | |
1212 | static int __maybe_unused spi_geni_suspend(struct device *dev) |
1213 | { |
1214 | struct spi_controller *spi = dev_get_drvdata(dev); |
1215 | int ret; |
1216 | |
1217 | ret = spi_controller_suspend(ctlr: spi); |
1218 | if (ret) |
1219 | return ret; |
1220 | |
1221 | ret = pm_runtime_force_suspend(dev); |
1222 | if (ret) |
1223 | spi_controller_resume(ctlr: spi); |
1224 | |
1225 | return ret; |
1226 | } |
1227 | |
1228 | static int __maybe_unused spi_geni_resume(struct device *dev) |
1229 | { |
1230 | struct spi_controller *spi = dev_get_drvdata(dev); |
1231 | int ret; |
1232 | |
1233 | ret = pm_runtime_force_resume(dev); |
1234 | if (ret) |
1235 | return ret; |
1236 | |
1237 | ret = spi_controller_resume(ctlr: spi); |
1238 | if (ret) |
1239 | pm_runtime_force_suspend(dev); |
1240 | |
1241 | return ret; |
1242 | } |
1243 | |
1244 | static const struct dev_pm_ops spi_geni_pm_ops = { |
1245 | SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend, |
1246 | spi_geni_runtime_resume, NULL) |
1247 | SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume) |
1248 | }; |
1249 | |
1250 | static const struct of_device_id spi_geni_dt_match[] = { |
1251 | { .compatible = "qcom,geni-spi" }, |
1252 | {} |
1253 | }; |
1254 | MODULE_DEVICE_TABLE(of, spi_geni_dt_match); |
1255 | |
1256 | static struct platform_driver spi_geni_driver = { |
1257 | .probe = spi_geni_probe, |
1258 | .remove_new = spi_geni_remove, |
1259 | .driver = { |
1260 | .name = "geni_spi" , |
1261 | .pm = &spi_geni_pm_ops, |
1262 | .of_match_table = spi_geni_dt_match, |
1263 | }, |
1264 | }; |
1265 | module_platform_driver(spi_geni_driver); |
1266 | |
1267 | MODULE_DESCRIPTION("SPI driver for GENI based QUP cores" ); |
1268 | MODULE_LICENSE("GPL v2" ); |
1269 | |