1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * PIC32 Quad SPI controller driver. |
4 | * |
5 | * Purna Chandra Mandal <purna.mandal@microchip.com> |
6 | * Copyright (c) 2016, Microchip Technology Inc. |
7 | */ |
8 | |
9 | #include <linux/clk.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/io.h> |
13 | #include <linux/iopoll.h> |
14 | #include <linux/module.h> |
15 | #include <linux/of.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/spi/spi.h> |
19 | |
20 | /* SQI registers */ |
21 | #define PESQI_XIP_CONF1_REG 0x00 |
22 | #define PESQI_XIP_CONF2_REG 0x04 |
23 | #define PESQI_CONF_REG 0x08 |
24 | #define PESQI_CTRL_REG 0x0C |
25 | #define PESQI_CLK_CTRL_REG 0x10 |
26 | #define PESQI_CMD_THRES_REG 0x14 |
27 | #define PESQI_INT_THRES_REG 0x18 |
28 | #define PESQI_INT_ENABLE_REG 0x1C |
29 | #define PESQI_INT_STAT_REG 0x20 |
30 | #define PESQI_TX_DATA_REG 0x24 |
31 | #define PESQI_RX_DATA_REG 0x28 |
32 | #define PESQI_STAT1_REG 0x2C |
33 | #define PESQI_STAT2_REG 0x30 |
34 | #define PESQI_BD_CTRL_REG 0x34 |
35 | #define PESQI_BD_CUR_ADDR_REG 0x38 |
36 | #define PESQI_BD_BASE_ADDR_REG 0x40 |
37 | #define PESQI_BD_STAT_REG 0x44 |
38 | #define PESQI_BD_POLL_CTRL_REG 0x48 |
39 | #define PESQI_BD_TX_DMA_STAT_REG 0x4C |
40 | #define PESQI_BD_RX_DMA_STAT_REG 0x50 |
41 | #define PESQI_THRES_REG 0x54 |
42 | #define PESQI_INT_SIGEN_REG 0x58 |
43 | |
44 | /* PESQI_CONF_REG fields */ |
45 | #define PESQI_MODE 0x7 |
46 | #define PESQI_MODE_BOOT 0 |
47 | #define PESQI_MODE_PIO 1 |
48 | #define PESQI_MODE_DMA 2 |
49 | #define PESQI_MODE_XIP 3 |
50 | #define PESQI_MODE_SHIFT 0 |
51 | #define PESQI_CPHA BIT(3) |
52 | #define PESQI_CPOL BIT(4) |
53 | #define PESQI_LSBF BIT(5) |
54 | #define PESQI_RXLATCH BIT(7) |
55 | #define PESQI_SERMODE BIT(8) |
56 | #define PESQI_WP_EN BIT(9) |
57 | #define PESQI_HOLD_EN BIT(10) |
58 | #define PESQI_BURST_EN BIT(12) |
59 | #define PESQI_CS_CTRL_HW BIT(15) |
60 | #define PESQI_SOFT_RESET BIT(16) |
61 | #define PESQI_LANES_SHIFT 20 |
62 | #define PESQI_SINGLE_LANE 0 |
63 | #define PESQI_DUAL_LANE 1 |
64 | #define PESQI_QUAD_LANE 2 |
65 | #define PESQI_CSEN_SHIFT 24 |
66 | #define PESQI_EN BIT(23) |
67 | |
68 | /* PESQI_CLK_CTRL_REG fields */ |
69 | #define PESQI_CLK_EN BIT(0) |
70 | #define PESQI_CLK_STABLE BIT(1) |
71 | #define PESQI_CLKDIV_SHIFT 8 |
72 | #define PESQI_CLKDIV 0xff |
73 | |
74 | /* PESQI_INT_THR/CMD_THR_REG */ |
75 | #define PESQI_TXTHR_MASK 0x1f |
76 | #define PESQI_TXTHR_SHIFT 8 |
77 | #define PESQI_RXTHR_MASK 0x1f |
78 | #define PESQI_RXTHR_SHIFT 0 |
79 | |
80 | /* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */ |
81 | #define PESQI_TXEMPTY BIT(0) |
82 | #define PESQI_TXFULL BIT(1) |
83 | #define PESQI_TXTHR BIT(2) |
84 | #define PESQI_RXEMPTY BIT(3) |
85 | #define PESQI_RXFULL BIT(4) |
86 | #define PESQI_RXTHR BIT(5) |
87 | #define PESQI_BDDONE BIT(9) /* BD processing complete */ |
88 | #define PESQI_PKTCOMP BIT(10) /* packet processing complete */ |
89 | #define PESQI_DMAERR BIT(11) /* error */ |
90 | |
91 | /* PESQI_BD_CTRL_REG */ |
92 | #define PESQI_DMA_EN BIT(0) /* enable DMA engine */ |
93 | #define PESQI_POLL_EN BIT(1) /* enable polling */ |
94 | #define PESQI_BDP_START BIT(2) /* start BD processor */ |
95 | |
96 | /* PESQI controller buffer descriptor */ |
97 | struct buf_desc { |
98 | u32 bd_ctrl; /* control */ |
99 | u32 bd_status; /* reserved */ |
100 | u32 bd_addr; /* DMA buffer addr */ |
101 | u32 bd_nextp; /* next item in chain */ |
102 | }; |
103 | |
104 | /* bd_ctrl */ |
105 | #define BD_BUFLEN 0x1ff |
106 | #define BD_CBD_INT_EN BIT(16) /* Current BD is processed */ |
107 | #define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */ |
108 | #define BD_LIFM BIT(18) /* last data of pkt */ |
109 | #define BD_LAST BIT(19) /* end of list */ |
110 | #define BD_DATA_RECV BIT(20) /* receive data */ |
111 | #define BD_DDR BIT(21) /* DDR mode */ |
112 | #define BD_DUAL BIT(22) /* Dual SPI */ |
113 | #define BD_QUAD BIT(23) /* Quad SPI */ |
114 | #define BD_LSBF BIT(25) /* LSB First */ |
115 | #define BD_STAT_CHECK BIT(27) /* Status poll */ |
116 | #define BD_DEVSEL_SHIFT 28 /* CS */ |
117 | #define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */ |
118 | #define BD_EN BIT(31) /* BD owned by H/W */ |
119 | |
120 | /** |
121 | * struct ring_desc - Representation of SQI ring descriptor |
122 | * @list: list element to add to free or used list. |
123 | * @bd: PESQI controller buffer descriptor |
124 | * @bd_dma: DMA address of PESQI controller buffer descriptor |
125 | * @xfer_len: transfer length |
126 | */ |
127 | struct ring_desc { |
128 | struct list_head list; |
129 | struct buf_desc *bd; |
130 | dma_addr_t bd_dma; |
131 | u32 xfer_len; |
132 | }; |
133 | |
134 | /* Global constants */ |
135 | #define PESQI_BD_BUF_LEN_MAX 256 |
136 | #define PESQI_BD_COUNT 256 /* max 64KB data per spi message */ |
137 | |
138 | struct pic32_sqi { |
139 | void __iomem *regs; |
140 | struct clk *sys_clk; |
141 | struct clk *base_clk; /* drives spi clock */ |
142 | struct spi_controller *host; |
143 | int irq; |
144 | struct completion xfer_done; |
145 | struct ring_desc *ring; |
146 | void *bd; |
147 | dma_addr_t bd_dma; |
148 | struct list_head bd_list_free; /* free */ |
149 | struct list_head bd_list_used; /* allocated */ |
150 | struct spi_device *cur_spi; |
151 | u32 cur_speed; |
152 | u8 cur_mode; |
153 | }; |
154 | |
155 | static inline void pic32_setbits(void __iomem *reg, u32 set) |
156 | { |
157 | writel(readl(addr: reg) | set, addr: reg); |
158 | } |
159 | |
160 | static inline void pic32_clrbits(void __iomem *reg, u32 clr) |
161 | { |
162 | writel(readl(addr: reg) & ~clr, addr: reg); |
163 | } |
164 | |
165 | static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck) |
166 | { |
167 | u32 val, div; |
168 | |
169 | /* div = base_clk / (2 * spi_clk) */ |
170 | div = clk_get_rate(clk: sqi->base_clk) / (2 * sck); |
171 | div &= PESQI_CLKDIV; |
172 | |
173 | val = readl(addr: sqi->regs + PESQI_CLK_CTRL_REG); |
174 | /* apply new divider */ |
175 | val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT)); |
176 | val |= div << PESQI_CLKDIV_SHIFT; |
177 | writel(val, addr: sqi->regs + PESQI_CLK_CTRL_REG); |
178 | |
179 | /* wait for stability */ |
180 | return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val, |
181 | val & PESQI_CLK_STABLE, 1, 5000); |
182 | } |
183 | |
184 | static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi) |
185 | { |
186 | u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP; |
187 | |
188 | writel(val: mask, addr: sqi->regs + PESQI_INT_ENABLE_REG); |
189 | /* INT_SIGEN works as interrupt-gate to INTR line */ |
190 | writel(val: mask, addr: sqi->regs + PESQI_INT_SIGEN_REG); |
191 | } |
192 | |
193 | static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi) |
194 | { |
195 | writel(val: 0, addr: sqi->regs + PESQI_INT_ENABLE_REG); |
196 | writel(val: 0, addr: sqi->regs + PESQI_INT_SIGEN_REG); |
197 | } |
198 | |
199 | static irqreturn_t pic32_sqi_isr(int irq, void *dev_id) |
200 | { |
201 | struct pic32_sqi *sqi = dev_id; |
202 | u32 enable, status; |
203 | |
204 | enable = readl(addr: sqi->regs + PESQI_INT_ENABLE_REG); |
205 | status = readl(addr: sqi->regs + PESQI_INT_STAT_REG); |
206 | |
207 | /* check spurious interrupt */ |
208 | if (!status) |
209 | return IRQ_NONE; |
210 | |
211 | if (status & PESQI_DMAERR) { |
212 | enable = 0; |
213 | goto irq_done; |
214 | } |
215 | |
216 | if (status & PESQI_TXTHR) |
217 | enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY); |
218 | |
219 | if (status & PESQI_RXTHR) |
220 | enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY); |
221 | |
222 | if (status & PESQI_BDDONE) |
223 | enable &= ~PESQI_BDDONE; |
224 | |
225 | /* packet processing completed */ |
226 | if (status & PESQI_PKTCOMP) { |
227 | /* mask all interrupts */ |
228 | enable = 0; |
229 | /* complete trasaction */ |
230 | complete(&sqi->xfer_done); |
231 | } |
232 | |
233 | irq_done: |
234 | /* interrupts are sticky, so mask when handled */ |
235 | writel(val: enable, addr: sqi->regs + PESQI_INT_ENABLE_REG); |
236 | |
237 | return IRQ_HANDLED; |
238 | } |
239 | |
240 | static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi) |
241 | { |
242 | struct ring_desc *rdesc; |
243 | |
244 | if (list_empty(head: &sqi->bd_list_free)) |
245 | return NULL; |
246 | |
247 | rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list); |
248 | list_move_tail(list: &rdesc->list, head: &sqi->bd_list_used); |
249 | return rdesc; |
250 | } |
251 | |
252 | static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc) |
253 | { |
254 | list_move(list: &rdesc->list, head: &sqi->bd_list_free); |
255 | } |
256 | |
257 | static int pic32_sqi_one_transfer(struct pic32_sqi *sqi, |
258 | struct spi_message *mesg, |
259 | struct spi_transfer *xfer) |
260 | { |
261 | struct spi_device *spi = mesg->spi; |
262 | struct scatterlist *sg, *sgl; |
263 | struct ring_desc *rdesc; |
264 | struct buf_desc *bd; |
265 | int nents, i; |
266 | u32 bd_ctrl; |
267 | u32 nbits; |
268 | |
269 | /* Device selection */ |
270 | bd_ctrl = spi_get_chipselect(spi, idx: 0) << BD_DEVSEL_SHIFT; |
271 | |
272 | /* half-duplex: select transfer buffer, direction and lane */ |
273 | if (xfer->rx_buf) { |
274 | bd_ctrl |= BD_DATA_RECV; |
275 | nbits = xfer->rx_nbits; |
276 | sgl = xfer->rx_sg.sgl; |
277 | nents = xfer->rx_sg.nents; |
278 | } else { |
279 | nbits = xfer->tx_nbits; |
280 | sgl = xfer->tx_sg.sgl; |
281 | nents = xfer->tx_sg.nents; |
282 | } |
283 | |
284 | if (nbits & SPI_NBITS_QUAD) |
285 | bd_ctrl |= BD_QUAD; |
286 | else if (nbits & SPI_NBITS_DUAL) |
287 | bd_ctrl |= BD_DUAL; |
288 | |
289 | /* LSB first */ |
290 | if (spi->mode & SPI_LSB_FIRST) |
291 | bd_ctrl |= BD_LSBF; |
292 | |
293 | /* ownership to hardware */ |
294 | bd_ctrl |= BD_EN; |
295 | |
296 | for_each_sg(sgl, sg, nents, i) { |
297 | /* get ring descriptor */ |
298 | rdesc = ring_desc_get(sqi); |
299 | if (!rdesc) |
300 | break; |
301 | |
302 | bd = rdesc->bd; |
303 | |
304 | /* BD CTRL: length */ |
305 | rdesc->xfer_len = sg_dma_len(sg); |
306 | bd->bd_ctrl = bd_ctrl; |
307 | bd->bd_ctrl |= rdesc->xfer_len; |
308 | |
309 | /* BD STAT */ |
310 | bd->bd_status = 0; |
311 | |
312 | /* BD BUFFER ADDRESS */ |
313 | bd->bd_addr = sg->dma_address; |
314 | } |
315 | |
316 | return 0; |
317 | } |
318 | |
319 | static int pic32_sqi_prepare_hardware(struct spi_controller *host) |
320 | { |
321 | struct pic32_sqi *sqi = spi_controller_get_devdata(ctlr: host); |
322 | |
323 | /* enable spi interface */ |
324 | pic32_setbits(reg: sqi->regs + PESQI_CONF_REG, PESQI_EN); |
325 | /* enable spi clk */ |
326 | pic32_setbits(reg: sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); |
327 | |
328 | return 0; |
329 | } |
330 | |
331 | static bool pic32_sqi_can_dma(struct spi_controller *host, |
332 | struct spi_device *spi, |
333 | struct spi_transfer *x) |
334 | { |
335 | /* Do DMA irrespective of transfer size */ |
336 | return true; |
337 | } |
338 | |
339 | static int pic32_sqi_one_message(struct spi_controller *host, |
340 | struct spi_message *msg) |
341 | { |
342 | struct spi_device *spi = msg->spi; |
343 | struct ring_desc *rdesc, *next; |
344 | struct spi_transfer *xfer; |
345 | struct pic32_sqi *sqi; |
346 | int ret = 0, mode; |
347 | unsigned long timeout; |
348 | u32 val; |
349 | |
350 | sqi = spi_controller_get_devdata(ctlr: host); |
351 | |
352 | reinit_completion(x: &sqi->xfer_done); |
353 | msg->actual_length = 0; |
354 | |
355 | /* We can't handle spi_transfer specific "speed_hz", "bits_per_word" |
356 | * and "delay_usecs". But spi_device specific speed and mode change |
357 | * can be handled at best during spi chip-select switch. |
358 | */ |
359 | if (sqi->cur_spi != spi) { |
360 | /* set spi speed */ |
361 | if (sqi->cur_speed != spi->max_speed_hz) { |
362 | sqi->cur_speed = spi->max_speed_hz; |
363 | ret = pic32_sqi_set_clk_rate(sqi, sck: spi->max_speed_hz); |
364 | if (ret) |
365 | dev_warn(&spi->dev, "set_clk, %d\n" , ret); |
366 | } |
367 | |
368 | /* set spi mode */ |
369 | mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST); |
370 | if (sqi->cur_mode != mode) { |
371 | val = readl(addr: sqi->regs + PESQI_CONF_REG); |
372 | val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF); |
373 | if (mode & SPI_CPOL) |
374 | val |= PESQI_CPOL; |
375 | if (mode & SPI_LSB_FIRST) |
376 | val |= PESQI_LSBF; |
377 | val |= PESQI_CPHA; |
378 | writel(val, addr: sqi->regs + PESQI_CONF_REG); |
379 | |
380 | sqi->cur_mode = mode; |
381 | } |
382 | sqi->cur_spi = spi; |
383 | } |
384 | |
385 | /* prepare hardware desc-list(BD) for transfer(s) */ |
386 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
387 | ret = pic32_sqi_one_transfer(sqi, mesg: msg, xfer); |
388 | if (ret) { |
389 | dev_err(&spi->dev, "xfer %p err\n" , xfer); |
390 | goto xfer_out; |
391 | } |
392 | } |
393 | |
394 | /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last |
395 | * element of the list. |
396 | */ |
397 | rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list); |
398 | rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT | |
399 | BD_LIFM | BD_PKT_INT_EN; |
400 | |
401 | /* set base address BD list for DMA engine */ |
402 | rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list); |
403 | writel(val: rdesc->bd_dma, addr: sqi->regs + PESQI_BD_BASE_ADDR_REG); |
404 | |
405 | /* enable interrupt */ |
406 | pic32_sqi_enable_int(sqi); |
407 | |
408 | /* enable DMA engine */ |
409 | val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START; |
410 | writel(val, addr: sqi->regs + PESQI_BD_CTRL_REG); |
411 | |
412 | /* wait for xfer completion */ |
413 | timeout = wait_for_completion_timeout(x: &sqi->xfer_done, timeout: 5 * HZ); |
414 | if (timeout == 0) { |
415 | dev_err(&sqi->host->dev, "wait timedout/interrupted\n" ); |
416 | ret = -ETIMEDOUT; |
417 | msg->status = ret; |
418 | } else { |
419 | /* success */ |
420 | msg->status = 0; |
421 | ret = 0; |
422 | } |
423 | |
424 | /* disable DMA */ |
425 | writel(val: 0, addr: sqi->regs + PESQI_BD_CTRL_REG); |
426 | |
427 | pic32_sqi_disable_int(sqi); |
428 | |
429 | xfer_out: |
430 | list_for_each_entry_safe_reverse(rdesc, next, |
431 | &sqi->bd_list_used, list) { |
432 | /* Update total byte transferred */ |
433 | msg->actual_length += rdesc->xfer_len; |
434 | /* release ring descr */ |
435 | ring_desc_put(sqi, rdesc); |
436 | } |
437 | spi_finalize_current_message(ctlr: spi->controller); |
438 | |
439 | return ret; |
440 | } |
441 | |
442 | static int pic32_sqi_unprepare_hardware(struct spi_controller *host) |
443 | { |
444 | struct pic32_sqi *sqi = spi_controller_get_devdata(ctlr: host); |
445 | |
446 | /* disable clk */ |
447 | pic32_clrbits(reg: sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); |
448 | /* disable spi */ |
449 | pic32_clrbits(reg: sqi->regs + PESQI_CONF_REG, PESQI_EN); |
450 | |
451 | return 0; |
452 | } |
453 | |
454 | static int ring_desc_ring_alloc(struct pic32_sqi *sqi) |
455 | { |
456 | struct ring_desc *rdesc; |
457 | struct buf_desc *bd; |
458 | int i; |
459 | |
460 | /* allocate coherent DMAable memory for hardware buffer descriptors. */ |
461 | sqi->bd = dma_alloc_coherent(dev: &sqi->host->dev, |
462 | size: sizeof(*bd) * PESQI_BD_COUNT, |
463 | dma_handle: &sqi->bd_dma, GFP_KERNEL); |
464 | if (!sqi->bd) { |
465 | dev_err(&sqi->host->dev, "failed allocating dma buffer\n" ); |
466 | return -ENOMEM; |
467 | } |
468 | |
469 | /* allocate software ring descriptors */ |
470 | sqi->ring = kcalloc(PESQI_BD_COUNT, size: sizeof(*rdesc), GFP_KERNEL); |
471 | if (!sqi->ring) { |
472 | dma_free_coherent(dev: &sqi->host->dev, |
473 | size: sizeof(*bd) * PESQI_BD_COUNT, |
474 | cpu_addr: sqi->bd, dma_handle: sqi->bd_dma); |
475 | return -ENOMEM; |
476 | } |
477 | |
478 | bd = (struct buf_desc *)sqi->bd; |
479 | |
480 | INIT_LIST_HEAD(list: &sqi->bd_list_free); |
481 | INIT_LIST_HEAD(list: &sqi->bd_list_used); |
482 | |
483 | /* initialize ring-desc */ |
484 | for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) { |
485 | INIT_LIST_HEAD(list: &rdesc->list); |
486 | rdesc->bd = &bd[i]; |
487 | rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd; |
488 | list_add_tail(new: &rdesc->list, head: &sqi->bd_list_free); |
489 | } |
490 | |
491 | /* Prepare BD: chain to next BD(s) */ |
492 | for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++) |
493 | bd[i].bd_nextp = rdesc[i + 1].bd_dma; |
494 | bd[PESQI_BD_COUNT - 1].bd_nextp = 0; |
495 | |
496 | return 0; |
497 | } |
498 | |
499 | static void ring_desc_ring_free(struct pic32_sqi *sqi) |
500 | { |
501 | dma_free_coherent(dev: &sqi->host->dev, |
502 | size: sizeof(struct buf_desc) * PESQI_BD_COUNT, |
503 | cpu_addr: sqi->bd, dma_handle: sqi->bd_dma); |
504 | kfree(objp: sqi->ring); |
505 | } |
506 | |
507 | static void pic32_sqi_hw_init(struct pic32_sqi *sqi) |
508 | { |
509 | unsigned long flags; |
510 | u32 val; |
511 | |
512 | /* Soft-reset of PESQI controller triggers interrupt. |
513 | * We are not yet ready to handle them so disable CPU |
514 | * interrupt for the time being. |
515 | */ |
516 | local_irq_save(flags); |
517 | |
518 | /* assert soft-reset */ |
519 | writel(PESQI_SOFT_RESET, addr: sqi->regs + PESQI_CONF_REG); |
520 | |
521 | /* wait until clear */ |
522 | readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val, |
523 | !(val & PESQI_SOFT_RESET), 1, 5000); |
524 | |
525 | /* disable all interrupts */ |
526 | pic32_sqi_disable_int(sqi); |
527 | |
528 | /* Now it is safe to enable back CPU interrupt */ |
529 | local_irq_restore(flags); |
530 | |
531 | /* tx and rx fifo interrupt threshold */ |
532 | val = readl(addr: sqi->regs + PESQI_CMD_THRES_REG); |
533 | val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT); |
534 | val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT); |
535 | val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT); |
536 | writel(val, addr: sqi->regs + PESQI_CMD_THRES_REG); |
537 | |
538 | val = readl(addr: sqi->regs + PESQI_INT_THRES_REG); |
539 | val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT); |
540 | val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT); |
541 | val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT); |
542 | writel(val, addr: sqi->regs + PESQI_INT_THRES_REG); |
543 | |
544 | /* default configuration */ |
545 | val = readl(addr: sqi->regs + PESQI_CONF_REG); |
546 | |
547 | /* set mode: DMA */ |
548 | val &= ~PESQI_MODE; |
549 | val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT; |
550 | writel(val, addr: sqi->regs + PESQI_CONF_REG); |
551 | |
552 | /* DATAEN - SQIID0-ID3 */ |
553 | val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT; |
554 | |
555 | /* burst/INCR4 enable */ |
556 | val |= PESQI_BURST_EN; |
557 | |
558 | /* CSEN - all CS */ |
559 | val |= 3U << PESQI_CSEN_SHIFT; |
560 | writel(val, addr: sqi->regs + PESQI_CONF_REG); |
561 | |
562 | /* write poll count */ |
563 | writel(val: 0, addr: sqi->regs + PESQI_BD_POLL_CTRL_REG); |
564 | |
565 | sqi->cur_speed = 0; |
566 | sqi->cur_mode = -1; |
567 | } |
568 | |
569 | static int pic32_sqi_probe(struct platform_device *pdev) |
570 | { |
571 | struct spi_controller *host; |
572 | struct pic32_sqi *sqi; |
573 | int ret; |
574 | |
575 | host = spi_alloc_host(dev: &pdev->dev, size: sizeof(*sqi)); |
576 | if (!host) |
577 | return -ENOMEM; |
578 | |
579 | sqi = spi_controller_get_devdata(ctlr: host); |
580 | sqi->host = host; |
581 | |
582 | sqi->regs = devm_platform_ioremap_resource(pdev, index: 0); |
583 | if (IS_ERR(ptr: sqi->regs)) { |
584 | ret = PTR_ERR(ptr: sqi->regs); |
585 | goto err_free_host; |
586 | } |
587 | |
588 | /* irq */ |
589 | sqi->irq = platform_get_irq(pdev, 0); |
590 | if (sqi->irq < 0) { |
591 | ret = sqi->irq; |
592 | goto err_free_host; |
593 | } |
594 | |
595 | /* clocks */ |
596 | sqi->sys_clk = devm_clk_get_enabled(dev: &pdev->dev, id: "reg_ck" ); |
597 | if (IS_ERR(ptr: sqi->sys_clk)) { |
598 | ret = PTR_ERR(ptr: sqi->sys_clk); |
599 | dev_err(&pdev->dev, "no sys_clk ?\n" ); |
600 | goto err_free_host; |
601 | } |
602 | |
603 | sqi->base_clk = devm_clk_get_enabled(dev: &pdev->dev, id: "spi_ck" ); |
604 | if (IS_ERR(ptr: sqi->base_clk)) { |
605 | ret = PTR_ERR(ptr: sqi->base_clk); |
606 | dev_err(&pdev->dev, "no base clk ?\n" ); |
607 | goto err_free_host; |
608 | } |
609 | |
610 | init_completion(x: &sqi->xfer_done); |
611 | |
612 | /* initialize hardware */ |
613 | pic32_sqi_hw_init(sqi); |
614 | |
615 | /* allocate buffers & descriptors */ |
616 | ret = ring_desc_ring_alloc(sqi); |
617 | if (ret) { |
618 | dev_err(&pdev->dev, "ring alloc failed\n" ); |
619 | goto err_free_host; |
620 | } |
621 | |
622 | /* install irq handlers */ |
623 | ret = request_irq(irq: sqi->irq, handler: pic32_sqi_isr, flags: 0, |
624 | name: dev_name(dev: &pdev->dev), dev: sqi); |
625 | if (ret < 0) { |
626 | dev_err(&pdev->dev, "request_irq(%d), failed\n" , sqi->irq); |
627 | goto err_free_ring; |
628 | } |
629 | |
630 | /* register host */ |
631 | host->num_chipselect = 2; |
632 | host->max_speed_hz = clk_get_rate(clk: sqi->base_clk); |
633 | host->dma_alignment = 32; |
634 | host->max_dma_len = PESQI_BD_BUF_LEN_MAX; |
635 | host->dev.of_node = pdev->dev.of_node; |
636 | host->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL | |
637 | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; |
638 | host->flags = SPI_CONTROLLER_HALF_DUPLEX; |
639 | host->can_dma = pic32_sqi_can_dma; |
640 | host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); |
641 | host->transfer_one_message = pic32_sqi_one_message; |
642 | host->prepare_transfer_hardware = pic32_sqi_prepare_hardware; |
643 | host->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware; |
644 | |
645 | ret = devm_spi_register_controller(dev: &pdev->dev, ctlr: host); |
646 | if (ret) { |
647 | dev_err(&host->dev, "failed registering spi host\n" ); |
648 | free_irq(sqi->irq, sqi); |
649 | goto err_free_ring; |
650 | } |
651 | |
652 | platform_set_drvdata(pdev, data: sqi); |
653 | |
654 | return 0; |
655 | |
656 | err_free_ring: |
657 | ring_desc_ring_free(sqi); |
658 | |
659 | err_free_host: |
660 | spi_controller_put(ctlr: host); |
661 | return ret; |
662 | } |
663 | |
664 | static void pic32_sqi_remove(struct platform_device *pdev) |
665 | { |
666 | struct pic32_sqi *sqi = platform_get_drvdata(pdev); |
667 | |
668 | /* release resources */ |
669 | free_irq(sqi->irq, sqi); |
670 | ring_desc_ring_free(sqi); |
671 | } |
672 | |
673 | static const struct of_device_id pic32_sqi_of_ids[] = { |
674 | {.compatible = "microchip,pic32mzda-sqi" ,}, |
675 | {}, |
676 | }; |
677 | MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids); |
678 | |
679 | static struct platform_driver pic32_sqi_driver = { |
680 | .driver = { |
681 | .name = "sqi-pic32" , |
682 | .of_match_table = of_match_ptr(pic32_sqi_of_ids), |
683 | }, |
684 | .probe = pic32_sqi_probe, |
685 | .remove_new = pic32_sqi_remove, |
686 | }; |
687 | |
688 | module_platform_driver(pic32_sqi_driver); |
689 | |
690 | MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>" ); |
691 | MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller." ); |
692 | MODULE_LICENSE("GPL v2" ); |
693 | |