1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * i2c Support for Atmel's AT91 Two-Wire Interface (TWI) |
4 | * |
5 | * Copyright (C) 2011 Weinmann Medical GmbH |
6 | * Author: Nikolaus Voss <n.voss@weinmann.de> |
7 | * |
8 | * Evolved from original work by: |
9 | * Copyright (C) 2004 Rick Bronson |
10 | * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com> |
11 | * |
12 | * Borrowed heavily from original work by: |
13 | * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com> |
14 | */ |
15 | |
16 | #include <linux/clk.h> |
17 | #include <linux/completion.h> |
18 | #include <linux/dma-mapping.h> |
19 | #include <linux/dmaengine.h> |
20 | #include <linux/err.h> |
21 | #include <linux/gpio/consumer.h> |
22 | #include <linux/i2c.h> |
23 | #include <linux/interrupt.h> |
24 | #include <linux/io.h> |
25 | #include <linux/of.h> |
26 | #include <linux/pinctrl/consumer.h> |
27 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> |
29 | |
30 | #include "i2c-at91.h" |
31 | |
32 | void at91_init_twi_bus_master(struct at91_twi_dev *dev) |
33 | { |
34 | struct at91_twi_pdata *pdata = dev->pdata; |
35 | u32 filtr = 0; |
36 | |
37 | /* FIFO should be enabled immediately after the software reset */ |
38 | if (dev->fifo_size) |
39 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN); |
40 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN); |
41 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS); |
42 | at91_twi_write(dev, AT91_TWI_CWGR, val: dev->twi_cwgr_reg); |
43 | |
44 | /* enable digital filter */ |
45 | if (pdata->has_dig_filtr && dev->enable_dig_filt) |
46 | filtr |= AT91_TWI_FILTR_FILT; |
47 | |
48 | /* enable advanced digital filter */ |
49 | if (pdata->has_adv_dig_filtr && dev->enable_dig_filt) |
50 | filtr |= AT91_TWI_FILTR_FILT | |
51 | (AT91_TWI_FILTR_THRES(dev->filter_width) & |
52 | AT91_TWI_FILTR_THRES_MASK); |
53 | |
54 | /* enable analog filter */ |
55 | if (pdata->has_ana_filtr && dev->enable_ana_filt) |
56 | filtr |= AT91_TWI_FILTR_PADFEN; |
57 | |
58 | if (filtr) |
59 | at91_twi_write(dev, AT91_TWI_FILTR, val: filtr); |
60 | } |
61 | |
62 | /* |
63 | * Calculate symmetric clock as stated in datasheet: |
64 | * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset)) |
65 | */ |
66 | static void at91_calc_twi_clock(struct at91_twi_dev *dev) |
67 | { |
68 | int ckdiv, cdiv, div, hold = 0, filter_width = 0; |
69 | struct at91_twi_pdata *pdata = dev->pdata; |
70 | int offset = pdata->clk_offset; |
71 | int max_ckdiv = pdata->clk_max_div; |
72 | struct i2c_timings timings, *t = &timings; |
73 | |
74 | i2c_parse_fw_timings(dev: dev->dev, t, use_defaults: true); |
75 | |
76 | div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk), |
77 | 2 * t->bus_freq_hz) - offset); |
78 | ckdiv = fls(x: div >> 8); |
79 | cdiv = div >> ckdiv; |
80 | |
81 | if (ckdiv > max_ckdiv) { |
82 | dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n" , |
83 | ckdiv, max_ckdiv); |
84 | ckdiv = max_ckdiv; |
85 | cdiv = 255; |
86 | } |
87 | |
88 | if (pdata->has_hold_field) { |
89 | /* |
90 | * hold time = HOLD + 3 x T_peripheral_clock |
91 | * Use clk rate in kHz to prevent overflows when computing |
92 | * hold. |
93 | */ |
94 | hold = DIV_ROUND_UP(t->sda_hold_ns |
95 | * (clk_get_rate(dev->clk) / 1000), 1000000); |
96 | hold -= 3; |
97 | if (hold < 0) |
98 | hold = 0; |
99 | if (hold > AT91_TWI_CWGR_HOLD_MAX) { |
100 | dev_warn(dev->dev, |
101 | "HOLD field set to its maximum value (%d instead of %d)\n" , |
102 | AT91_TWI_CWGR_HOLD_MAX, hold); |
103 | hold = AT91_TWI_CWGR_HOLD_MAX; |
104 | } |
105 | } |
106 | |
107 | if (pdata->has_adv_dig_filtr) { |
108 | /* |
109 | * filter width = 0 to AT91_TWI_FILTR_THRES_MAX |
110 | * peripheral clocks |
111 | */ |
112 | filter_width = DIV_ROUND_UP(t->digital_filter_width_ns |
113 | * (clk_get_rate(dev->clk) / 1000), 1000000); |
114 | if (filter_width > AT91_TWI_FILTR_THRES_MAX) { |
115 | dev_warn(dev->dev, |
116 | "Filter threshold set to its maximum value (%d instead of %d)\n" , |
117 | AT91_TWI_FILTR_THRES_MAX, filter_width); |
118 | filter_width = AT91_TWI_FILTR_THRES_MAX; |
119 | } |
120 | } |
121 | |
122 | dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv |
123 | | AT91_TWI_CWGR_HOLD(hold); |
124 | |
125 | dev->filter_width = filter_width; |
126 | |
127 | dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n" , |
128 | cdiv, ckdiv, hold, t->sda_hold_ns, filter_width, |
129 | t->digital_filter_width_ns); |
130 | } |
131 | |
132 | static void at91_twi_dma_cleanup(struct at91_twi_dev *dev) |
133 | { |
134 | struct at91_twi_dma *dma = &dev->dma; |
135 | |
136 | at91_twi_irq_save(dev); |
137 | |
138 | if (dma->xfer_in_progress) { |
139 | if (dma->direction == DMA_FROM_DEVICE) |
140 | dmaengine_terminate_sync(chan: dma->chan_rx); |
141 | else |
142 | dmaengine_terminate_sync(chan: dma->chan_tx); |
143 | dma->xfer_in_progress = false; |
144 | } |
145 | if (dma->buf_mapped) { |
146 | dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]), |
147 | dev->buf_len, dma->direction); |
148 | dma->buf_mapped = false; |
149 | } |
150 | |
151 | at91_twi_irq_restore(dev); |
152 | } |
153 | |
154 | static void at91_twi_write_next_byte(struct at91_twi_dev *dev) |
155 | { |
156 | if (!dev->buf_len) |
157 | return; |
158 | |
159 | /* 8bit write works with and without FIFO */ |
160 | writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); |
161 | |
162 | /* send stop when last byte has been written */ |
163 | if (--dev->buf_len == 0) { |
164 | if (!dev->use_alt_cmd) |
165 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
166 | at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY); |
167 | } |
168 | |
169 | dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n" , *dev->buf, dev->buf_len); |
170 | |
171 | ++dev->buf; |
172 | } |
173 | |
174 | static void at91_twi_write_data_dma_callback(void *data) |
175 | { |
176 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; |
177 | |
178 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), |
179 | dev->buf_len, DMA_TO_DEVICE); |
180 | |
181 | /* |
182 | * When this callback is called, THR/TX FIFO is likely not to be empty |
183 | * yet. So we have to wait for TXCOMP or NACK bits to be set into the |
184 | * Status Register to be sure that the STOP bit has been sent and the |
185 | * transfer is completed. The NACK interrupt has already been enabled, |
186 | * we just have to enable TXCOMP one. |
187 | */ |
188 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); |
189 | if (!dev->use_alt_cmd) |
190 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
191 | } |
192 | |
193 | static void at91_twi_write_data_dma(struct at91_twi_dev *dev) |
194 | { |
195 | dma_addr_t dma_addr; |
196 | struct dma_async_tx_descriptor *txdesc; |
197 | struct at91_twi_dma *dma = &dev->dma; |
198 | struct dma_chan *chan_tx = dma->chan_tx; |
199 | unsigned int sg_len = 1; |
200 | |
201 | if (!dev->buf_len) |
202 | return; |
203 | |
204 | dma->direction = DMA_TO_DEVICE; |
205 | |
206 | at91_twi_irq_save(dev); |
207 | dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len, |
208 | DMA_TO_DEVICE); |
209 | if (dma_mapping_error(dev: dev->dev, dma_addr)) { |
210 | dev_err(dev->dev, "dma map failed\n" ); |
211 | return; |
212 | } |
213 | dma->buf_mapped = true; |
214 | at91_twi_irq_restore(dev); |
215 | |
216 | if (dev->fifo_size) { |
217 | size_t part1_len, part2_len; |
218 | struct scatterlist *sg; |
219 | unsigned fifo_mr; |
220 | |
221 | sg_len = 0; |
222 | |
223 | part1_len = dev->buf_len & ~0x3; |
224 | if (part1_len) { |
225 | sg = &dma->sg[sg_len++]; |
226 | sg_dma_len(sg) = part1_len; |
227 | sg_dma_address(sg) = dma_addr; |
228 | } |
229 | |
230 | part2_len = dev->buf_len & 0x3; |
231 | if (part2_len) { |
232 | sg = &dma->sg[sg_len++]; |
233 | sg_dma_len(sg) = part2_len; |
234 | sg_dma_address(sg) = dma_addr + part1_len; |
235 | } |
236 | |
237 | /* |
238 | * DMA controller is triggered when at least 4 data can be |
239 | * written into the TX FIFO |
240 | */ |
241 | fifo_mr = at91_twi_read(dev, AT91_TWI_FMR); |
242 | fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK; |
243 | fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA); |
244 | at91_twi_write(dev, AT91_TWI_FMR, val: fifo_mr); |
245 | } else { |
246 | sg_dma_len(&dma->sg[0]) = dev->buf_len; |
247 | sg_dma_address(&dma->sg[0]) = dma_addr; |
248 | } |
249 | |
250 | txdesc = dmaengine_prep_slave_sg(chan: chan_tx, sgl: dma->sg, sg_len, |
251 | dir: DMA_MEM_TO_DEV, |
252 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
253 | if (!txdesc) { |
254 | dev_err(dev->dev, "dma prep slave sg failed\n" ); |
255 | goto error; |
256 | } |
257 | |
258 | txdesc->callback = at91_twi_write_data_dma_callback; |
259 | txdesc->callback_param = dev; |
260 | |
261 | dma->xfer_in_progress = true; |
262 | dmaengine_submit(desc: txdesc); |
263 | dma_async_issue_pending(chan: chan_tx); |
264 | |
265 | return; |
266 | |
267 | error: |
268 | at91_twi_dma_cleanup(dev); |
269 | } |
270 | |
271 | static void at91_twi_read_next_byte(struct at91_twi_dev *dev) |
272 | { |
273 | /* |
274 | * If we are in this case, it means there is garbage data in RHR, so |
275 | * delete them. |
276 | */ |
277 | if (!dev->buf_len) { |
278 | at91_twi_read(dev, AT91_TWI_RHR); |
279 | return; |
280 | } |
281 | |
282 | /* 8bit read works with and without FIFO */ |
283 | *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR); |
284 | --dev->buf_len; |
285 | |
286 | /* return if aborting, we only needed to read RHR to clear RXRDY*/ |
287 | if (dev->recv_len_abort) |
288 | return; |
289 | |
290 | /* handle I2C_SMBUS_BLOCK_DATA */ |
291 | if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { |
292 | /* ensure length byte is a valid value */ |
293 | if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) { |
294 | dev->msg->flags &= ~I2C_M_RECV_LEN; |
295 | dev->buf_len += *dev->buf; |
296 | dev->msg->len = dev->buf_len + 1; |
297 | dev_dbg(dev->dev, "received block length %zu\n" , |
298 | dev->buf_len); |
299 | } else { |
300 | /* abort and send the stop by reading one more byte */ |
301 | dev->recv_len_abort = true; |
302 | dev->buf_len = 1; |
303 | } |
304 | } |
305 | |
306 | /* send stop if second but last byte has been read */ |
307 | if (!dev->use_alt_cmd && dev->buf_len == 1) |
308 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
309 | |
310 | dev_dbg(dev->dev, "read 0x%x, to go %zu\n" , *dev->buf, dev->buf_len); |
311 | |
312 | ++dev->buf; |
313 | } |
314 | |
315 | static void at91_twi_read_data_dma_callback(void *data) |
316 | { |
317 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; |
318 | unsigned ier = AT91_TWI_TXCOMP; |
319 | |
320 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), |
321 | dev->buf_len, DMA_FROM_DEVICE); |
322 | |
323 | if (!dev->use_alt_cmd) { |
324 | /* The last two bytes have to be read without using dma */ |
325 | dev->buf += dev->buf_len - 2; |
326 | dev->buf_len = 2; |
327 | ier |= AT91_TWI_RXRDY; |
328 | } |
329 | at91_twi_write(dev, AT91_TWI_IER, val: ier); |
330 | } |
331 | |
332 | static void at91_twi_read_data_dma(struct at91_twi_dev *dev) |
333 | { |
334 | dma_addr_t dma_addr; |
335 | struct dma_async_tx_descriptor *rxdesc; |
336 | struct at91_twi_dma *dma = &dev->dma; |
337 | struct dma_chan *chan_rx = dma->chan_rx; |
338 | size_t buf_len; |
339 | |
340 | buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2; |
341 | dma->direction = DMA_FROM_DEVICE; |
342 | |
343 | /* Keep in mind that we won't use dma to read the last two bytes */ |
344 | at91_twi_irq_save(dev); |
345 | dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE); |
346 | if (dma_mapping_error(dev: dev->dev, dma_addr)) { |
347 | dev_err(dev->dev, "dma map failed\n" ); |
348 | return; |
349 | } |
350 | dma->buf_mapped = true; |
351 | at91_twi_irq_restore(dev); |
352 | |
353 | if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) { |
354 | unsigned fifo_mr; |
355 | |
356 | /* |
357 | * DMA controller is triggered when at least 4 data can be |
358 | * read from the RX FIFO |
359 | */ |
360 | fifo_mr = at91_twi_read(dev, AT91_TWI_FMR); |
361 | fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK; |
362 | fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA); |
363 | at91_twi_write(dev, AT91_TWI_FMR, val: fifo_mr); |
364 | } |
365 | |
366 | sg_dma_len(&dma->sg[0]) = buf_len; |
367 | sg_dma_address(&dma->sg[0]) = dma_addr; |
368 | |
369 | rxdesc = dmaengine_prep_slave_sg(chan: chan_rx, sgl: dma->sg, sg_len: 1, dir: DMA_DEV_TO_MEM, |
370 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
371 | if (!rxdesc) { |
372 | dev_err(dev->dev, "dma prep slave sg failed\n" ); |
373 | goto error; |
374 | } |
375 | |
376 | rxdesc->callback = at91_twi_read_data_dma_callback; |
377 | rxdesc->callback_param = dev; |
378 | |
379 | dma->xfer_in_progress = true; |
380 | dmaengine_submit(desc: rxdesc); |
381 | dma_async_issue_pending(chan: dma->chan_rx); |
382 | |
383 | return; |
384 | |
385 | error: |
386 | at91_twi_dma_cleanup(dev); |
387 | } |
388 | |
389 | static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) |
390 | { |
391 | struct at91_twi_dev *dev = dev_id; |
392 | const unsigned status = at91_twi_read(dev, AT91_TWI_SR); |
393 | const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR); |
394 | |
395 | if (!irqstatus) |
396 | return IRQ_NONE; |
397 | /* |
398 | * In reception, the behavior of the twi device (before sama5d2) is |
399 | * weird. There is some magic about RXRDY flag! When a data has been |
400 | * almost received, the reception of a new one is anticipated if there |
401 | * is no stop command to send. That is the reason why ask for sending |
402 | * the stop command not on the last data but on the second last one. |
403 | * |
404 | * Unfortunately, we could still have the RXRDY flag set even if the |
405 | * transfer is done and we have read the last data. It might happen |
406 | * when the i2c slave device sends too quickly data after receiving the |
407 | * ack from the master. The data has been almost received before having |
408 | * the order to send stop. In this case, sending the stop command could |
409 | * cause a RXRDY interrupt with a TXCOMP one. It is better to manage |
410 | * the RXRDY interrupt first in order to not keep garbage data in the |
411 | * Receive Holding Register for the next transfer. |
412 | */ |
413 | if (irqstatus & AT91_TWI_RXRDY) { |
414 | /* |
415 | * Read all available bytes at once by polling RXRDY usable w/ |
416 | * and w/o FIFO. With FIFO enabled we could also read RXFL and |
417 | * avoid polling RXRDY. |
418 | */ |
419 | do { |
420 | at91_twi_read_next_byte(dev); |
421 | } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY); |
422 | } |
423 | |
424 | /* |
425 | * When a NACK condition is detected, the I2C controller sets the NACK, |
426 | * TXCOMP and TXRDY bits all together in the Status Register (SR). |
427 | * |
428 | * 1 - Handling NACK errors with CPU write transfer. |
429 | * |
430 | * In such case, we should not write the next byte into the Transmit |
431 | * Holding Register (THR) otherwise the I2C controller would start a new |
432 | * transfer and the I2C slave is likely to reply by another NACK. |
433 | * |
434 | * 2 - Handling NACK errors with DMA write transfer. |
435 | * |
436 | * By setting the TXRDY bit in the SR, the I2C controller also triggers |
437 | * the DMA controller to write the next data into the THR. Then the |
438 | * result depends on the hardware version of the I2C controller. |
439 | * |
440 | * 2a - Without support of the Alternative Command mode. |
441 | * |
442 | * This is the worst case: the DMA controller is triggered to write the |
443 | * next data into the THR, hence starting a new transfer: the I2C slave |
444 | * is likely to reply by another NACK. |
445 | * Concurrently, this interrupt handler is likely to be called to manage |
446 | * the first NACK before the I2C controller detects the second NACK and |
447 | * sets once again the NACK bit into the SR. |
448 | * When handling the first NACK, this interrupt handler disables the I2C |
449 | * controller interruptions, especially the NACK interrupt. |
450 | * Hence, the NACK bit is pending into the SR. This is why we should |
451 | * read the SR to clear all pending interrupts at the beginning of |
452 | * at91_do_twi_transfer() before actually starting a new transfer. |
453 | * |
454 | * 2b - With support of the Alternative Command mode. |
455 | * |
456 | * When a NACK condition is detected, the I2C controller also locks the |
457 | * THR (and sets the LOCK bit in the SR): even though the DMA controller |
458 | * is triggered by the TXRDY bit to write the next data into the THR, |
459 | * this data actually won't go on the I2C bus hence a second NACK is not |
460 | * generated. |
461 | */ |
462 | if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) { |
463 | at91_disable_twi_interrupts(dev); |
464 | complete(&dev->cmd_complete); |
465 | } else if (irqstatus & AT91_TWI_TXRDY) { |
466 | at91_twi_write_next_byte(dev); |
467 | } |
468 | |
469 | /* catch error flags */ |
470 | dev->transfer_status |= status; |
471 | |
472 | return IRQ_HANDLED; |
473 | } |
474 | |
475 | static int at91_do_twi_transfer(struct at91_twi_dev *dev) |
476 | { |
477 | int ret; |
478 | unsigned long time_left; |
479 | bool has_unre_flag = dev->pdata->has_unre_flag; |
480 | bool has_alt_cmd = dev->pdata->has_alt_cmd; |
481 | |
482 | /* |
483 | * WARNING: the TXCOMP bit in the Status Register is NOT a clear on |
484 | * read flag but shows the state of the transmission at the time the |
485 | * Status Register is read. According to the programmer datasheet, |
486 | * TXCOMP is set when both holding register and internal shifter are |
487 | * empty and STOP condition has been sent. |
488 | * Consequently, we should enable NACK interrupt rather than TXCOMP to |
489 | * detect transmission failure. |
490 | * Indeed let's take the case of an i2c write command using DMA. |
491 | * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and |
492 | * TXCOMP bits are set together into the Status Register. |
493 | * LOCK is a clear on write bit, which is set to prevent the DMA |
494 | * controller from sending new data on the i2c bus after a NACK |
495 | * condition has happened. Once locked, this i2c peripheral stops |
496 | * triggering the DMA controller for new data but it is more than |
497 | * likely that a new DMA transaction is already in progress, writing |
498 | * into the Transmit Holding Register. Since the peripheral is locked, |
499 | * these new data won't be sent to the i2c bus but they will remain |
500 | * into the Transmit Holding Register, so TXCOMP bit is cleared. |
501 | * Then when the interrupt handler is called, the Status Register is |
502 | * read: the TXCOMP bit is clear but NACK bit is still set. The driver |
503 | * manage the error properly, without waiting for timeout. |
504 | * This case can be reproduced easyly when writing into an at24 eeprom. |
505 | * |
506 | * Besides, the TXCOMP bit is already set before the i2c transaction |
507 | * has been started. For read transactions, this bit is cleared when |
508 | * writing the START bit into the Control Register. So the |
509 | * corresponding interrupt can safely be enabled just after. |
510 | * However for write transactions managed by the CPU, we first write |
511 | * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP |
512 | * interrupt. If TXCOMP interrupt were enabled before writing into THR, |
513 | * the interrupt handler would be called immediately and the i2c command |
514 | * would be reported as completed. |
515 | * Also when a write transaction is managed by the DMA controller, |
516 | * enabling the TXCOMP interrupt in this function may lead to a race |
517 | * condition since we don't know whether the TXCOMP interrupt is enabled |
518 | * before or after the DMA has started to write into THR. So the TXCOMP |
519 | * interrupt is enabled later by at91_twi_write_data_dma_callback(). |
520 | * Immediately after in that DMA callback, if the alternative command |
521 | * mode is not used, we still need to send the STOP condition manually |
522 | * writing the corresponding bit into the Control Register. |
523 | */ |
524 | |
525 | dev_dbg(dev->dev, "transfer: %s %zu bytes.\n" , |
526 | (dev->msg->flags & I2C_M_RD) ? "read" : "write" , dev->buf_len); |
527 | |
528 | reinit_completion(x: &dev->cmd_complete); |
529 | dev->transfer_status = 0; |
530 | |
531 | /* Clear pending interrupts, such as NACK. */ |
532 | at91_twi_read(dev, AT91_TWI_SR); |
533 | |
534 | if (dev->fifo_size) { |
535 | unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR); |
536 | |
537 | /* Reset FIFO mode register */ |
538 | fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK | |
539 | AT91_TWI_FMR_RXRDYM_MASK); |
540 | fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA); |
541 | fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA); |
542 | at91_twi_write(dev, AT91_TWI_FMR, val: fifo_mr); |
543 | |
544 | /* Flush FIFOs */ |
545 | at91_twi_write(dev, AT91_TWI_CR, |
546 | AT91_TWI_THRCLR | AT91_TWI_RHRCLR); |
547 | } |
548 | |
549 | if (!dev->buf_len) { |
550 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); |
551 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); |
552 | } else if (dev->msg->flags & I2C_M_RD) { |
553 | unsigned start_flags = AT91_TWI_START; |
554 | |
555 | /* if only one byte is to be read, immediately stop transfer */ |
556 | if (!dev->use_alt_cmd && dev->buf_len <= 1 && |
557 | !(dev->msg->flags & I2C_M_RECV_LEN)) |
558 | start_flags |= AT91_TWI_STOP; |
559 | at91_twi_write(dev, AT91_TWI_CR, val: start_flags); |
560 | /* |
561 | * When using dma without alternative command mode, the last |
562 | * byte has to be read manually in order to not send the stop |
563 | * command too late and then to receive extra data. |
564 | * In practice, there are some issues if you use the dma to |
565 | * read n-1 bytes because of latency. |
566 | * Reading n-2 bytes with dma and the two last ones manually |
567 | * seems to be the best solution. |
568 | */ |
569 | if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { |
570 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); |
571 | at91_twi_read_data_dma(dev); |
572 | } else { |
573 | at91_twi_write(dev, AT91_TWI_IER, |
574 | AT91_TWI_TXCOMP | |
575 | AT91_TWI_NACK | |
576 | AT91_TWI_RXRDY); |
577 | } |
578 | } else { |
579 | if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { |
580 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); |
581 | at91_twi_write_data_dma(dev); |
582 | } else { |
583 | at91_twi_write_next_byte(dev); |
584 | at91_twi_write(dev, AT91_TWI_IER, |
585 | AT91_TWI_TXCOMP | AT91_TWI_NACK | |
586 | (dev->buf_len ? AT91_TWI_TXRDY : 0)); |
587 | } |
588 | } |
589 | |
590 | time_left = wait_for_completion_timeout(x: &dev->cmd_complete, |
591 | timeout: dev->adapter.timeout); |
592 | if (time_left == 0) { |
593 | dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR); |
594 | dev_err(dev->dev, "controller timed out\n" ); |
595 | at91_init_twi_bus(dev); |
596 | ret = -ETIMEDOUT; |
597 | goto error; |
598 | } |
599 | if (dev->transfer_status & AT91_TWI_NACK) { |
600 | dev_dbg(dev->dev, "received nack\n" ); |
601 | ret = -EREMOTEIO; |
602 | goto error; |
603 | } |
604 | if (dev->transfer_status & AT91_TWI_OVRE) { |
605 | dev_err(dev->dev, "overrun while reading\n" ); |
606 | ret = -EIO; |
607 | goto error; |
608 | } |
609 | if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) { |
610 | dev_err(dev->dev, "underrun while writing\n" ); |
611 | ret = -EIO; |
612 | goto error; |
613 | } |
614 | if ((has_alt_cmd || dev->fifo_size) && |
615 | (dev->transfer_status & AT91_TWI_LOCK)) { |
616 | dev_err(dev->dev, "tx locked\n" ); |
617 | ret = -EIO; |
618 | goto error; |
619 | } |
620 | if (dev->recv_len_abort) { |
621 | dev_err(dev->dev, "invalid smbus block length recvd\n" ); |
622 | ret = -EPROTO; |
623 | goto error; |
624 | } |
625 | |
626 | dev_dbg(dev->dev, "transfer complete\n" ); |
627 | |
628 | return 0; |
629 | |
630 | error: |
631 | /* first stop DMA transfer if still in progress */ |
632 | at91_twi_dma_cleanup(dev); |
633 | /* then flush THR/FIFO and unlock TX if locked */ |
634 | if ((has_alt_cmd || dev->fifo_size) && |
635 | (dev->transfer_status & AT91_TWI_LOCK)) { |
636 | dev_dbg(dev->dev, "unlock tx\n" ); |
637 | at91_twi_write(dev, AT91_TWI_CR, |
638 | AT91_TWI_THRCLR | AT91_TWI_LOCKCLR); |
639 | } |
640 | |
641 | /* |
642 | * some faulty I2C slave devices might hold SDA down; |
643 | * we can send a bus clear command, hoping that the pins will be |
644 | * released |
645 | */ |
646 | i2c_recover_bus(adap: &dev->adapter); |
647 | |
648 | return ret; |
649 | } |
650 | |
651 | static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) |
652 | { |
653 | struct at91_twi_dev *dev = i2c_get_adapdata(adap); |
654 | int ret; |
655 | unsigned int_addr_flag = 0; |
656 | struct i2c_msg *m_start = msg; |
657 | bool is_read; |
658 | u8 *dma_buf = NULL; |
659 | |
660 | dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n" , num); |
661 | |
662 | ret = pm_runtime_get_sync(dev: dev->dev); |
663 | if (ret < 0) |
664 | goto out; |
665 | |
666 | if (num == 2) { |
667 | int internal_address = 0; |
668 | int i; |
669 | |
670 | /* 1st msg is put into the internal address, start with 2nd */ |
671 | m_start = &msg[1]; |
672 | for (i = 0; i < msg->len; ++i) { |
673 | const unsigned addr = msg->buf[msg->len - 1 - i]; |
674 | |
675 | internal_address |= addr << (8 * i); |
676 | int_addr_flag += AT91_TWI_IADRSZ_1; |
677 | } |
678 | at91_twi_write(dev, AT91_TWI_IADR, val: internal_address); |
679 | } |
680 | |
681 | dev->use_alt_cmd = false; |
682 | is_read = (m_start->flags & I2C_M_RD); |
683 | if (dev->pdata->has_alt_cmd) { |
684 | if (m_start->len > 0 && |
685 | m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) { |
686 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); |
687 | at91_twi_write(dev, AT91_TWI_ACR, |
688 | AT91_TWI_ACR_DATAL(m_start->len) | |
689 | ((is_read) ? AT91_TWI_ACR_DIR : 0)); |
690 | dev->use_alt_cmd = true; |
691 | } else { |
692 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); |
693 | } |
694 | } |
695 | |
696 | at91_twi_write(dev, AT91_TWI_MMR, |
697 | val: (m_start->addr << 16) | |
698 | int_addr_flag | |
699 | ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); |
700 | |
701 | dev->buf_len = m_start->len; |
702 | dev->buf = m_start->buf; |
703 | dev->msg = m_start; |
704 | dev->recv_len_abort = false; |
705 | |
706 | if (dev->use_dma) { |
707 | dma_buf = i2c_get_dma_safe_msg_buf(msg: m_start, threshold: 1); |
708 | if (!dma_buf) { |
709 | ret = -ENOMEM; |
710 | goto out; |
711 | } |
712 | dev->buf = dma_buf; |
713 | } |
714 | |
715 | ret = at91_do_twi_transfer(dev); |
716 | i2c_put_dma_safe_msg_buf(buf: dma_buf, msg: m_start, xferred: !ret); |
717 | |
718 | ret = (ret < 0) ? ret : num; |
719 | out: |
720 | pm_runtime_mark_last_busy(dev: dev->dev); |
721 | pm_runtime_put_autosuspend(dev: dev->dev); |
722 | |
723 | return ret; |
724 | } |
725 | |
726 | /* |
727 | * The hardware can handle at most two messages concatenated by a |
728 | * repeated start via it's internal address feature. |
729 | */ |
730 | static const struct i2c_adapter_quirks at91_twi_quirks = { |
731 | .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR, |
732 | .max_comb_1st_msg_len = 3, |
733 | }; |
734 | |
735 | static u32 at91_twi_func(struct i2c_adapter *adapter) |
736 | { |
737 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
738 | | I2C_FUNC_SMBUS_READ_BLOCK_DATA; |
739 | } |
740 | |
741 | static const struct i2c_algorithm at91_twi_algorithm = { |
742 | .master_xfer = at91_twi_xfer, |
743 | .functionality = at91_twi_func, |
744 | }; |
745 | |
746 | static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr) |
747 | { |
748 | int ret = 0; |
749 | struct dma_slave_config slave_config; |
750 | struct at91_twi_dma *dma = &dev->dma; |
751 | enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
752 | |
753 | /* |
754 | * The actual width of the access will be chosen in |
755 | * dmaengine_prep_slave_sg(): |
756 | * for each buffer in the scatter-gather list, if its size is aligned |
757 | * to addr_width then addr_width accesses will be performed to transfer |
758 | * the buffer. On the other hand, if the buffer size is not aligned to |
759 | * addr_width then the buffer is transferred using single byte accesses. |
760 | * Please refer to the Atmel eXtended DMA controller driver. |
761 | * When FIFOs are used, the TXRDYM threshold can always be set to |
762 | * trigger the XDMAC when at least 4 data can be written into the TX |
763 | * FIFO, even if single byte accesses are performed. |
764 | * However the RXRDYM threshold must be set to fit the access width, |
765 | * deduced from buffer length, so the XDMAC is triggered properly to |
766 | * read data from the RX FIFO. |
767 | */ |
768 | if (dev->fifo_size) |
769 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
770 | |
771 | memset(&slave_config, 0, sizeof(slave_config)); |
772 | slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR; |
773 | slave_config.src_addr_width = addr_width; |
774 | slave_config.src_maxburst = 1; |
775 | slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR; |
776 | slave_config.dst_addr_width = addr_width; |
777 | slave_config.dst_maxburst = 1; |
778 | slave_config.device_fc = false; |
779 | |
780 | dma->chan_tx = dma_request_chan(dev: dev->dev, name: "tx" ); |
781 | if (IS_ERR(ptr: dma->chan_tx)) { |
782 | ret = PTR_ERR(ptr: dma->chan_tx); |
783 | dma->chan_tx = NULL; |
784 | goto error; |
785 | } |
786 | |
787 | dma->chan_rx = dma_request_chan(dev: dev->dev, name: "rx" ); |
788 | if (IS_ERR(ptr: dma->chan_rx)) { |
789 | ret = PTR_ERR(ptr: dma->chan_rx); |
790 | dma->chan_rx = NULL; |
791 | goto error; |
792 | } |
793 | |
794 | slave_config.direction = DMA_MEM_TO_DEV; |
795 | if (dmaengine_slave_config(chan: dma->chan_tx, config: &slave_config)) { |
796 | dev_err(dev->dev, "failed to configure tx channel\n" ); |
797 | ret = -EINVAL; |
798 | goto error; |
799 | } |
800 | |
801 | slave_config.direction = DMA_DEV_TO_MEM; |
802 | if (dmaengine_slave_config(chan: dma->chan_rx, config: &slave_config)) { |
803 | dev_err(dev->dev, "failed to configure rx channel\n" ); |
804 | ret = -EINVAL; |
805 | goto error; |
806 | } |
807 | |
808 | sg_init_table(dma->sg, 2); |
809 | dma->buf_mapped = false; |
810 | dma->xfer_in_progress = false; |
811 | dev->use_dma = true; |
812 | |
813 | dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n" , |
814 | dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); |
815 | |
816 | return ret; |
817 | |
818 | error: |
819 | if (ret != -EPROBE_DEFER) |
820 | dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n" ); |
821 | if (dma->chan_rx) |
822 | dma_release_channel(chan: dma->chan_rx); |
823 | if (dma->chan_tx) |
824 | dma_release_channel(chan: dma->chan_tx); |
825 | return ret; |
826 | } |
827 | |
828 | static int at91_init_twi_recovery_gpio(struct platform_device *pdev, |
829 | struct at91_twi_dev *dev) |
830 | { |
831 | struct i2c_bus_recovery_info *rinfo = &dev->rinfo; |
832 | |
833 | rinfo->pinctrl = devm_pinctrl_get(dev: &pdev->dev); |
834 | if (!rinfo->pinctrl) { |
835 | dev_info(dev->dev, "pinctrl unavailable, bus recovery not supported\n" ); |
836 | return 0; |
837 | } |
838 | if (IS_ERR(ptr: rinfo->pinctrl)) { |
839 | dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n" ); |
840 | return PTR_ERR(ptr: rinfo->pinctrl); |
841 | } |
842 | dev->adapter.bus_recovery_info = rinfo; |
843 | |
844 | return 0; |
845 | } |
846 | |
847 | static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap) |
848 | { |
849 | struct at91_twi_dev *dev = i2c_get_adapdata(adap); |
850 | |
851 | dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR); |
852 | if (!(dev->transfer_status & AT91_TWI_SDA)) { |
853 | dev_dbg(dev->dev, "SDA is down; sending bus clear command\n" ); |
854 | if (dev->use_alt_cmd) { |
855 | unsigned int acr; |
856 | |
857 | acr = at91_twi_read(dev, AT91_TWI_ACR); |
858 | acr &= ~AT91_TWI_ACR_DATAL_MASK; |
859 | at91_twi_write(dev, AT91_TWI_ACR, val: acr); |
860 | } |
861 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR); |
862 | } |
863 | |
864 | return 0; |
865 | } |
866 | |
867 | static int at91_init_twi_recovery_info(struct platform_device *pdev, |
868 | struct at91_twi_dev *dev) |
869 | { |
870 | struct i2c_bus_recovery_info *rinfo = &dev->rinfo; |
871 | bool has_clear_cmd = dev->pdata->has_clear_cmd; |
872 | |
873 | if (!has_clear_cmd) |
874 | return at91_init_twi_recovery_gpio(pdev, dev); |
875 | |
876 | rinfo->recover_bus = at91_twi_recover_bus_cmd; |
877 | dev->adapter.bus_recovery_info = rinfo; |
878 | |
879 | return 0; |
880 | } |
881 | |
882 | int at91_twi_probe_master(struct platform_device *pdev, |
883 | u32 phy_addr, struct at91_twi_dev *dev) |
884 | { |
885 | int rc; |
886 | |
887 | init_completion(x: &dev->cmd_complete); |
888 | |
889 | rc = devm_request_irq(dev: &pdev->dev, irq: dev->irq, handler: atmel_twi_interrupt, irqflags: 0, |
890 | devname: dev_name(dev: dev->dev), dev_id: dev); |
891 | if (rc) { |
892 | dev_err(dev->dev, "Cannot get irq %d: %d\n" , dev->irq, rc); |
893 | return rc; |
894 | } |
895 | |
896 | if (dev->dev->of_node) { |
897 | rc = at91_twi_configure_dma(dev, phy_addr); |
898 | if (rc == -EPROBE_DEFER) |
899 | return rc; |
900 | } |
901 | |
902 | if (!of_property_read_u32(np: pdev->dev.of_node, propname: "atmel,fifo-size" , |
903 | out_value: &dev->fifo_size)) { |
904 | dev_info(dev->dev, "Using FIFO (%u data)\n" , dev->fifo_size); |
905 | } |
906 | |
907 | dev->enable_dig_filt = of_property_read_bool(np: pdev->dev.of_node, |
908 | propname: "i2c-digital-filter" ); |
909 | |
910 | dev->enable_ana_filt = of_property_read_bool(np: pdev->dev.of_node, |
911 | propname: "i2c-analog-filter" ); |
912 | at91_calc_twi_clock(dev); |
913 | |
914 | rc = at91_init_twi_recovery_info(pdev, dev); |
915 | if (rc == -EPROBE_DEFER) |
916 | return rc; |
917 | |
918 | dev->adapter.algo = &at91_twi_algorithm; |
919 | dev->adapter.quirks = &at91_twi_quirks; |
920 | |
921 | return 0; |
922 | } |
923 | |