1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * IMG SPFI controller driver
4 *
5 * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
6 * Copyright (C) 2014 Google, Inc.
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/pm_runtime.h>
19#include <linux/scatterlist.h>
20#include <linux/slab.h>
21#include <linux/spi/spi.h>
22#include <linux/spinlock.h>
23
24#define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
25#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
26#define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
27#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
28#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
29#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
30#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
31#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
32#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
33
34#define SPFI_CONTROL 0x14
35#define SPFI_CONTROL_CONTINUE BIT(12)
36#define SPFI_CONTROL_SOFT_RESET BIT(11)
37#define SPFI_CONTROL_SEND_DMA BIT(10)
38#define SPFI_CONTROL_GET_DMA BIT(9)
39#define SPFI_CONTROL_SE BIT(8)
40#define SPFI_CONTROL_TMODE_SHIFT 5
41#define SPFI_CONTROL_TMODE_MASK 0x7
42#define SPFI_CONTROL_TMODE_SINGLE 0
43#define SPFI_CONTROL_TMODE_DUAL 1
44#define SPFI_CONTROL_TMODE_QUAD 2
45#define SPFI_CONTROL_SPFI_EN BIT(0)
46
47#define SPFI_TRANSACTION 0x18
48#define SPFI_TRANSACTION_TSIZE_SHIFT 16
49#define SPFI_TRANSACTION_TSIZE_MASK 0xffff
50
51#define SPFI_PORT_STATE 0x1c
52#define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
53#define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
54#define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
55#define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
56
57#define SPFI_TX_32BIT_VALID_DATA 0x20
58#define SPFI_TX_8BIT_VALID_DATA 0x24
59#define SPFI_RX_32BIT_VALID_DATA 0x28
60#define SPFI_RX_8BIT_VALID_DATA 0x2c
61
62#define SPFI_INTERRUPT_STATUS 0x30
63#define SPFI_INTERRUPT_ENABLE 0x34
64#define SPFI_INTERRUPT_CLEAR 0x38
65#define SPFI_INTERRUPT_IACCESS BIT(12)
66#define SPFI_INTERRUPT_GDEX8BIT BIT(11)
67#define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
68#define SPFI_INTERRUPT_GDFUL BIT(8)
69#define SPFI_INTERRUPT_GDHF BIT(7)
70#define SPFI_INTERRUPT_GDEX32BIT BIT(6)
71#define SPFI_INTERRUPT_GDTRIG BIT(5)
72#define SPFI_INTERRUPT_SDFUL BIT(3)
73#define SPFI_INTERRUPT_SDHF BIT(2)
74#define SPFI_INTERRUPT_SDE BIT(1)
75#define SPFI_INTERRUPT_SDTRIG BIT(0)
76
77/*
78 * There are four parallel FIFOs of 16 bytes each. The word buffer
79 * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
80 * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
81 * accesses only a single FIFO, resulting in an effective FIFO size of
82 * 16 bytes.
83 */
84#define SPFI_32BIT_FIFO_SIZE 64
85#define SPFI_8BIT_FIFO_SIZE 16
86
87struct img_spfi {
88 struct device *dev;
89 struct spi_controller *host;
90 spinlock_t lock;
91
92 void __iomem *regs;
93 phys_addr_t phys;
94 int irq;
95 struct clk *spfi_clk;
96 struct clk *sys_clk;
97
98 struct dma_chan *rx_ch;
99 struct dma_chan *tx_ch;
100 bool tx_dma_busy;
101 bool rx_dma_busy;
102};
103
104static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
105{
106 return readl(addr: spfi->regs + reg);
107}
108
109static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
110{
111 writel(val, addr: spfi->regs + reg);
112}
113
114static inline void spfi_start(struct img_spfi *spfi)
115{
116 u32 val;
117
118 val = spfi_readl(spfi, SPFI_CONTROL);
119 val |= SPFI_CONTROL_SPFI_EN;
120 spfi_writel(spfi, val, SPFI_CONTROL);
121}
122
123static inline void spfi_reset(struct img_spfi *spfi)
124{
125 spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
126 spfi_writel(spfi, val: 0, SPFI_CONTROL);
127}
128
129static int spfi_wait_all_done(struct img_spfi *spfi)
130{
131 unsigned long timeout = jiffies + msecs_to_jiffies(m: 50);
132
133 while (time_before(jiffies, timeout)) {
134 u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
135
136 if (status & SPFI_INTERRUPT_ALLDONETRIG) {
137 spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
138 SPFI_INTERRUPT_CLEAR);
139 return 0;
140 }
141 cpu_relax();
142 }
143
144 dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
145 spfi_reset(spfi);
146
147 return -ETIMEDOUT;
148}
149
150static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
151 unsigned int max)
152{
153 unsigned int count = 0;
154 u32 status;
155
156 while (count < max / 4) {
157 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
158 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
159 if (status & SPFI_INTERRUPT_SDFUL)
160 break;
161 spfi_writel(spfi, val: buf[count], SPFI_TX_32BIT_VALID_DATA);
162 count++;
163 }
164
165 return count * 4;
166}
167
168static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
169 unsigned int max)
170{
171 unsigned int count = 0;
172 u32 status;
173
174 while (count < max) {
175 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
176 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
177 if (status & SPFI_INTERRUPT_SDFUL)
178 break;
179 spfi_writel(spfi, val: buf[count], SPFI_TX_8BIT_VALID_DATA);
180 count++;
181 }
182
183 return count;
184}
185
186static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
187 unsigned int max)
188{
189 unsigned int count = 0;
190 u32 status;
191
192 while (count < max / 4) {
193 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
194 SPFI_INTERRUPT_CLEAR);
195 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
196 if (!(status & SPFI_INTERRUPT_GDEX32BIT))
197 break;
198 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
199 count++;
200 }
201
202 return count * 4;
203}
204
205static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
206 unsigned int max)
207{
208 unsigned int count = 0;
209 u32 status;
210
211 while (count < max) {
212 spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
213 SPFI_INTERRUPT_CLEAR);
214 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
215 if (!(status & SPFI_INTERRUPT_GDEX8BIT))
216 break;
217 buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
218 count++;
219 }
220
221 return count;
222}
223
224static int img_spfi_start_pio(struct spi_controller *host,
225 struct spi_device *spi,
226 struct spi_transfer *xfer)
227{
228 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: spi->controller);
229 unsigned int tx_bytes = 0, rx_bytes = 0;
230 const void *tx_buf = xfer->tx_buf;
231 void *rx_buf = xfer->rx_buf;
232 unsigned long timeout;
233 int ret;
234
235 if (tx_buf)
236 tx_bytes = xfer->len;
237 if (rx_buf)
238 rx_bytes = xfer->len;
239
240 spfi_start(spfi);
241
242 timeout = jiffies +
243 msecs_to_jiffies(m: xfer->len * 8 * 1000 / xfer->speed_hz + 100);
244 while ((tx_bytes > 0 || rx_bytes > 0) &&
245 time_before(jiffies, timeout)) {
246 unsigned int tx_count, rx_count;
247
248 if (tx_bytes >= 4)
249 tx_count = spfi_pio_write32(spfi, buf: tx_buf, max: tx_bytes);
250 else
251 tx_count = spfi_pio_write8(spfi, buf: tx_buf, max: tx_bytes);
252
253 if (rx_bytes >= 4)
254 rx_count = spfi_pio_read32(spfi, buf: rx_buf, max: rx_bytes);
255 else
256 rx_count = spfi_pio_read8(spfi, buf: rx_buf, max: rx_bytes);
257
258 tx_buf += tx_count;
259 rx_buf += rx_count;
260 tx_bytes -= tx_count;
261 rx_bytes -= rx_count;
262
263 cpu_relax();
264 }
265
266 if (rx_bytes > 0 || tx_bytes > 0) {
267 dev_err(spfi->dev, "PIO transfer timed out\n");
268 return -ETIMEDOUT;
269 }
270
271 ret = spfi_wait_all_done(spfi);
272 if (ret < 0)
273 return ret;
274
275 return 0;
276}
277
278static void img_spfi_dma_rx_cb(void *data)
279{
280 struct img_spfi *spfi = data;
281 unsigned long flags;
282
283 spfi_wait_all_done(spfi);
284
285 spin_lock_irqsave(&spfi->lock, flags);
286 spfi->rx_dma_busy = false;
287 if (!spfi->tx_dma_busy)
288 spi_finalize_current_transfer(ctlr: spfi->host);
289 spin_unlock_irqrestore(lock: &spfi->lock, flags);
290}
291
292static void img_spfi_dma_tx_cb(void *data)
293{
294 struct img_spfi *spfi = data;
295 unsigned long flags;
296
297 spfi_wait_all_done(spfi);
298
299 spin_lock_irqsave(&spfi->lock, flags);
300 spfi->tx_dma_busy = false;
301 if (!spfi->rx_dma_busy)
302 spi_finalize_current_transfer(ctlr: spfi->host);
303 spin_unlock_irqrestore(lock: &spfi->lock, flags);
304}
305
306static int img_spfi_start_dma(struct spi_controller *host,
307 struct spi_device *spi,
308 struct spi_transfer *xfer)
309{
310 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: spi->controller);
311 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
312 struct dma_slave_config rxconf, txconf;
313
314 spfi->rx_dma_busy = false;
315 spfi->tx_dma_busy = false;
316
317 if (xfer->rx_buf) {
318 rxconf.direction = DMA_DEV_TO_MEM;
319 if (xfer->len % 4 == 0) {
320 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
321 rxconf.src_addr_width = 4;
322 rxconf.src_maxburst = 4;
323 } else {
324 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
325 rxconf.src_addr_width = 1;
326 rxconf.src_maxburst = 4;
327 }
328 dmaengine_slave_config(chan: spfi->rx_ch, config: &rxconf);
329
330 rxdesc = dmaengine_prep_slave_sg(chan: spfi->rx_ch, sgl: xfer->rx_sg.sgl,
331 sg_len: xfer->rx_sg.nents,
332 dir: DMA_DEV_TO_MEM,
333 flags: DMA_PREP_INTERRUPT);
334 if (!rxdesc)
335 goto stop_dma;
336
337 rxdesc->callback = img_spfi_dma_rx_cb;
338 rxdesc->callback_param = spfi;
339 }
340
341 if (xfer->tx_buf) {
342 txconf.direction = DMA_MEM_TO_DEV;
343 if (xfer->len % 4 == 0) {
344 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
345 txconf.dst_addr_width = 4;
346 txconf.dst_maxburst = 4;
347 } else {
348 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
349 txconf.dst_addr_width = 1;
350 txconf.dst_maxburst = 4;
351 }
352 dmaengine_slave_config(chan: spfi->tx_ch, config: &txconf);
353
354 txdesc = dmaengine_prep_slave_sg(chan: spfi->tx_ch, sgl: xfer->tx_sg.sgl,
355 sg_len: xfer->tx_sg.nents,
356 dir: DMA_MEM_TO_DEV,
357 flags: DMA_PREP_INTERRUPT);
358 if (!txdesc)
359 goto stop_dma;
360
361 txdesc->callback = img_spfi_dma_tx_cb;
362 txdesc->callback_param = spfi;
363 }
364
365 if (xfer->rx_buf) {
366 spfi->rx_dma_busy = true;
367 dmaengine_submit(desc: rxdesc);
368 dma_async_issue_pending(chan: spfi->rx_ch);
369 }
370
371 spfi_start(spfi);
372
373 if (xfer->tx_buf) {
374 spfi->tx_dma_busy = true;
375 dmaengine_submit(desc: txdesc);
376 dma_async_issue_pending(chan: spfi->tx_ch);
377 }
378
379 return 1;
380
381stop_dma:
382 dmaengine_terminate_all(chan: spfi->rx_ch);
383 dmaengine_terminate_all(chan: spfi->tx_ch);
384 return -EIO;
385}
386
387static void img_spfi_handle_err(struct spi_controller *host,
388 struct spi_message *msg)
389{
390 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: host);
391 unsigned long flags;
392
393 /*
394 * Stop all DMA and reset the controller if the previous transaction
395 * timed-out and never completed it's DMA.
396 */
397 spin_lock_irqsave(&spfi->lock, flags);
398 if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
399 spfi->tx_dma_busy = false;
400 spfi->rx_dma_busy = false;
401
402 dmaengine_terminate_all(chan: spfi->tx_ch);
403 dmaengine_terminate_all(chan: spfi->rx_ch);
404 }
405 spin_unlock_irqrestore(lock: &spfi->lock, flags);
406}
407
408static int img_spfi_prepare(struct spi_controller *host, struct spi_message *msg)
409{
410 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: host);
411 u32 val;
412
413 val = spfi_readl(spfi, SPFI_PORT_STATE);
414 val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK <<
415 SPFI_PORT_STATE_DEV_SEL_SHIFT);
416 val |= spi_get_chipselect(spi: msg->spi, idx: 0) << SPFI_PORT_STATE_DEV_SEL_SHIFT;
417 if (msg->spi->mode & SPI_CPHA)
418 val |= SPFI_PORT_STATE_CK_PHASE(spi_get_chipselect(msg->spi, 0));
419 else
420 val &= ~SPFI_PORT_STATE_CK_PHASE(spi_get_chipselect(msg->spi, 0));
421 if (msg->spi->mode & SPI_CPOL)
422 val |= SPFI_PORT_STATE_CK_POL(spi_get_chipselect(msg->spi, 0));
423 else
424 val &= ~SPFI_PORT_STATE_CK_POL(spi_get_chipselect(msg->spi, 0));
425 spfi_writel(spfi, val, SPFI_PORT_STATE);
426
427 return 0;
428}
429
430static int img_spfi_unprepare(struct spi_controller *host,
431 struct spi_message *msg)
432{
433 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: host);
434
435 spfi_reset(spfi);
436
437 return 0;
438}
439
440static void img_spfi_config(struct spi_controller *host, struct spi_device *spi,
441 struct spi_transfer *xfer)
442{
443 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: spi->controller);
444 u32 val, div;
445
446 /*
447 * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
448 * power of 2 up to 128
449 */
450 div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
451 div = clamp(512 / (1 << get_count_order(div)), 1, 128);
452
453 val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi_get_chipselect(spi, 0)));
454 val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
455 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
456 val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
457 spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi_get_chipselect(spi, 0)));
458
459 spfi_writel(spfi, val: xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
460 SPFI_TRANSACTION);
461
462 val = spfi_readl(spfi, SPFI_CONTROL);
463 val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
464 if (xfer->tx_buf)
465 val |= SPFI_CONTROL_SEND_DMA;
466 if (xfer->rx_buf)
467 val |= SPFI_CONTROL_GET_DMA;
468 val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
469 if (xfer->tx_nbits == SPI_NBITS_DUAL &&
470 xfer->rx_nbits == SPI_NBITS_DUAL)
471 val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
472 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
473 xfer->rx_nbits == SPI_NBITS_QUAD)
474 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
475 val |= SPFI_CONTROL_SE;
476 spfi_writel(spfi, val, SPFI_CONTROL);
477}
478
479static int img_spfi_transfer_one(struct spi_controller *host,
480 struct spi_device *spi,
481 struct spi_transfer *xfer)
482{
483 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: spi->controller);
484 int ret;
485
486 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
487 dev_err(spfi->dev,
488 "Transfer length (%d) is greater than the max supported (%d)",
489 xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
490 return -EINVAL;
491 }
492
493 img_spfi_config(host, spi, xfer);
494 if (host->can_dma && host->can_dma(host, spi, xfer))
495 ret = img_spfi_start_dma(host, spi, xfer);
496 else
497 ret = img_spfi_start_pio(host, spi, xfer);
498
499 return ret;
500}
501
502static bool img_spfi_can_dma(struct spi_controller *host, struct spi_device *spi,
503 struct spi_transfer *xfer)
504{
505 if (xfer->len > SPFI_32BIT_FIFO_SIZE)
506 return true;
507 return false;
508}
509
510static irqreturn_t img_spfi_irq(int irq, void *dev_id)
511{
512 struct img_spfi *spfi = (struct img_spfi *)dev_id;
513 u32 status;
514
515 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
516 if (status & SPFI_INTERRUPT_IACCESS) {
517 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
518 dev_err(spfi->dev, "Illegal access interrupt");
519 return IRQ_HANDLED;
520 }
521
522 return IRQ_NONE;
523}
524
525static int img_spfi_probe(struct platform_device *pdev)
526{
527 struct spi_controller *host;
528 struct img_spfi *spfi;
529 struct resource *res;
530 int ret;
531 u32 max_speed_hz;
532
533 host = spi_alloc_host(dev: &pdev->dev, size: sizeof(*spfi));
534 if (!host)
535 return -ENOMEM;
536 platform_set_drvdata(pdev, data: host);
537
538 spfi = spi_controller_get_devdata(ctlr: host);
539 spfi->dev = &pdev->dev;
540 spfi->host = host;
541 spin_lock_init(&spfi->lock);
542
543 spfi->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &res);
544 if (IS_ERR(ptr: spfi->regs)) {
545 ret = PTR_ERR(ptr: spfi->regs);
546 goto put_spi;
547 }
548 spfi->phys = res->start;
549
550 spfi->irq = platform_get_irq(pdev, 0);
551 if (spfi->irq < 0) {
552 ret = spfi->irq;
553 goto put_spi;
554 }
555 ret = devm_request_irq(dev: spfi->dev, irq: spfi->irq, handler: img_spfi_irq,
556 irqflags: IRQ_TYPE_LEVEL_HIGH, devname: dev_name(dev: spfi->dev), dev_id: spfi);
557 if (ret)
558 goto put_spi;
559
560 spfi->sys_clk = devm_clk_get(dev: spfi->dev, id: "sys");
561 if (IS_ERR(ptr: spfi->sys_clk)) {
562 ret = PTR_ERR(ptr: spfi->sys_clk);
563 goto put_spi;
564 }
565 spfi->spfi_clk = devm_clk_get(dev: spfi->dev, id: "spfi");
566 if (IS_ERR(ptr: spfi->spfi_clk)) {
567 ret = PTR_ERR(ptr: spfi->spfi_clk);
568 goto put_spi;
569 }
570
571 ret = clk_prepare_enable(clk: spfi->sys_clk);
572 if (ret)
573 goto put_spi;
574 ret = clk_prepare_enable(clk: spfi->spfi_clk);
575 if (ret)
576 goto disable_pclk;
577
578 spfi_reset(spfi);
579 /*
580 * Only enable the error (IACCESS) interrupt. In PIO mode we'll
581 * poll the status of the FIFOs.
582 */
583 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
584
585 host->auto_runtime_pm = true;
586 host->bus_num = pdev->id;
587 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
588 if (of_property_read_bool(np: spfi->dev->of_node, propname: "img,supports-quad-mode"))
589 host->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
590 host->dev.of_node = pdev->dev.of_node;
591 host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
592 host->max_speed_hz = clk_get_rate(clk: spfi->spfi_clk) / 4;
593 host->min_speed_hz = clk_get_rate(clk: spfi->spfi_clk) / 512;
594
595 /*
596 * Maximum speed supported by spfi is limited to the lower value
597 * between 1/4 of the SPFI clock or to "spfi-max-frequency"
598 * defined in the device tree.
599 * If no value is defined in the device tree assume the maximum
600 * speed supported to be 1/4 of the SPFI clock.
601 */
602 if (!of_property_read_u32(np: spfi->dev->of_node, propname: "spfi-max-frequency",
603 out_value: &max_speed_hz)) {
604 if (host->max_speed_hz > max_speed_hz)
605 host->max_speed_hz = max_speed_hz;
606 }
607
608 host->transfer_one = img_spfi_transfer_one;
609 host->prepare_message = img_spfi_prepare;
610 host->unprepare_message = img_spfi_unprepare;
611 host->handle_err = img_spfi_handle_err;
612 host->use_gpio_descriptors = true;
613
614 spfi->tx_ch = dma_request_chan(dev: spfi->dev, name: "tx");
615 if (IS_ERR(ptr: spfi->tx_ch)) {
616 ret = PTR_ERR(ptr: spfi->tx_ch);
617 spfi->tx_ch = NULL;
618 if (ret == -EPROBE_DEFER)
619 goto disable_pm;
620 }
621
622 spfi->rx_ch = dma_request_chan(dev: spfi->dev, name: "rx");
623 if (IS_ERR(ptr: spfi->rx_ch)) {
624 ret = PTR_ERR(ptr: spfi->rx_ch);
625 spfi->rx_ch = NULL;
626 if (ret == -EPROBE_DEFER)
627 goto disable_pm;
628 }
629
630 if (!spfi->tx_ch || !spfi->rx_ch) {
631 if (spfi->tx_ch)
632 dma_release_channel(chan: spfi->tx_ch);
633 if (spfi->rx_ch)
634 dma_release_channel(chan: spfi->rx_ch);
635 spfi->tx_ch = NULL;
636 spfi->rx_ch = NULL;
637 dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
638 } else {
639 host->dma_tx = spfi->tx_ch;
640 host->dma_rx = spfi->rx_ch;
641 host->can_dma = img_spfi_can_dma;
642 }
643
644 pm_runtime_set_active(dev: spfi->dev);
645 pm_runtime_enable(dev: spfi->dev);
646
647 ret = devm_spi_register_controller(dev: spfi->dev, ctlr: host);
648 if (ret)
649 goto disable_pm;
650
651 return 0;
652
653disable_pm:
654 pm_runtime_disable(dev: spfi->dev);
655 if (spfi->rx_ch)
656 dma_release_channel(chan: spfi->rx_ch);
657 if (spfi->tx_ch)
658 dma_release_channel(chan: spfi->tx_ch);
659 clk_disable_unprepare(clk: spfi->spfi_clk);
660disable_pclk:
661 clk_disable_unprepare(clk: spfi->sys_clk);
662put_spi:
663 spi_controller_put(ctlr: host);
664
665 return ret;
666}
667
668static void img_spfi_remove(struct platform_device *pdev)
669{
670 struct spi_controller *host = platform_get_drvdata(pdev);
671 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: host);
672
673 if (spfi->tx_ch)
674 dma_release_channel(chan: spfi->tx_ch);
675 if (spfi->rx_ch)
676 dma_release_channel(chan: spfi->rx_ch);
677
678 pm_runtime_disable(dev: spfi->dev);
679 if (!pm_runtime_status_suspended(dev: spfi->dev)) {
680 clk_disable_unprepare(clk: spfi->spfi_clk);
681 clk_disable_unprepare(clk: spfi->sys_clk);
682 }
683}
684
685#ifdef CONFIG_PM
686static int img_spfi_runtime_suspend(struct device *dev)
687{
688 struct spi_controller *host = dev_get_drvdata(dev);
689 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: host);
690
691 clk_disable_unprepare(clk: spfi->spfi_clk);
692 clk_disable_unprepare(clk: spfi->sys_clk);
693
694 return 0;
695}
696
697static int img_spfi_runtime_resume(struct device *dev)
698{
699 struct spi_controller *host = dev_get_drvdata(dev);
700 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: host);
701 int ret;
702
703 ret = clk_prepare_enable(clk: spfi->sys_clk);
704 if (ret)
705 return ret;
706 ret = clk_prepare_enable(clk: spfi->spfi_clk);
707 if (ret) {
708 clk_disable_unprepare(clk: spfi->sys_clk);
709 return ret;
710 }
711
712 return 0;
713}
714#endif /* CONFIG_PM */
715
716#ifdef CONFIG_PM_SLEEP
717static int img_spfi_suspend(struct device *dev)
718{
719 struct spi_controller *host = dev_get_drvdata(dev);
720
721 return spi_controller_suspend(ctlr: host);
722}
723
724static int img_spfi_resume(struct device *dev)
725{
726 struct spi_controller *host = dev_get_drvdata(dev);
727 struct img_spfi *spfi = spi_controller_get_devdata(ctlr: host);
728 int ret;
729
730 ret = pm_runtime_resume_and_get(dev);
731 if (ret < 0)
732 return ret;
733 spfi_reset(spfi);
734 pm_runtime_put(dev);
735
736 return spi_controller_resume(ctlr: host);
737}
738#endif /* CONFIG_PM_SLEEP */
739
740static const struct dev_pm_ops img_spfi_pm_ops = {
741 SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
742 NULL)
743 SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
744};
745
746static const struct of_device_id img_spfi_of_match[] = {
747 { .compatible = "img,spfi", },
748 { },
749};
750MODULE_DEVICE_TABLE(of, img_spfi_of_match);
751
752static struct platform_driver img_spfi_driver = {
753 .driver = {
754 .name = "img-spfi",
755 .pm = &img_spfi_pm_ops,
756 .of_match_table = of_match_ptr(img_spfi_of_match),
757 },
758 .probe = img_spfi_probe,
759 .remove_new = img_spfi_remove,
760};
761module_platform_driver(img_spfi_driver);
762
763MODULE_DESCRIPTION("IMG SPFI controller driver");
764MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
765MODULE_LICENSE("GPL v2");
766

source code of linux/drivers/spi/spi-img-spfi.c