1 | // SPDX-License-Identifier: GPL-2.0+ |
---|---|
2 | /* |
3 | * Driver for AMBA serial ports |
4 | * |
5 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. |
6 | * |
7 | * Copyright 1999 ARM Limited |
8 | * Copyright (C) 2000 Deep Blue Solutions Ltd. |
9 | * Copyright (C) 2010 ST-Ericsson SA |
10 | * |
11 | * This is a generic driver for ARM AMBA-type serial ports. They |
12 | * have a lot of 16550-like features, but are not register compatible. |
13 | * Note that although they do have CTS, DCD and DSR inputs, they do |
14 | * not have an RI input, nor do they have DTR or RTS outputs. If |
15 | * required, these have to be supplied via some other means (eg, GPIO) |
16 | * and hooked into this driver. |
17 | */ |
18 | |
19 | #include <linux/module.h> |
20 | #include <linux/ioport.h> |
21 | #include <linux/init.h> |
22 | #include <linux/console.h> |
23 | #include <linux/platform_device.h> |
24 | #include <linux/sysrq.h> |
25 | #include <linux/device.h> |
26 | #include <linux/tty.h> |
27 | #include <linux/tty_flip.h> |
28 | #include <linux/serial_core.h> |
29 | #include <linux/serial.h> |
30 | #include <linux/amba/bus.h> |
31 | #include <linux/amba/serial.h> |
32 | #include <linux/clk.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/dmaengine.h> |
35 | #include <linux/dma-mapping.h> |
36 | #include <linux/scatterlist.h> |
37 | #include <linux/delay.h> |
38 | #include <linux/types.h> |
39 | #include <linux/of.h> |
40 | #include <linux/pinctrl/consumer.h> |
41 | #include <linux/sizes.h> |
42 | #include <linux/io.h> |
43 | #include <linux/acpi.h> |
44 | |
45 | #define UART_NR 14 |
46 | |
47 | #define SERIAL_AMBA_MAJOR 204 |
48 | #define SERIAL_AMBA_MINOR 64 |
49 | #define SERIAL_AMBA_NR UART_NR |
50 | |
51 | #define AMBA_ISR_PASS_LIMIT 256 |
52 | |
53 | #define UART_DR_ERROR (UART011_DR_OE | UART011_DR_BE | UART011_DR_PE | UART011_DR_FE) |
54 | #define UART_DUMMY_DR_RX BIT(16) |
55 | |
56 | enum { |
57 | REG_DR, |
58 | REG_ST_DMAWM, |
59 | REG_ST_TIMEOUT, |
60 | REG_FR, |
61 | REG_LCRH_RX, |
62 | REG_LCRH_TX, |
63 | REG_IBRD, |
64 | REG_FBRD, |
65 | REG_CR, |
66 | REG_IFLS, |
67 | REG_IMSC, |
68 | REG_RIS, |
69 | REG_MIS, |
70 | REG_ICR, |
71 | REG_DMACR, |
72 | REG_ST_XFCR, |
73 | REG_ST_XON1, |
74 | REG_ST_XON2, |
75 | REG_ST_XOFF1, |
76 | REG_ST_XOFF2, |
77 | REG_ST_ITCR, |
78 | REG_ST_ITIP, |
79 | REG_ST_ABCR, |
80 | REG_ST_ABIMSC, |
81 | |
82 | /* The size of the array - must be last */ |
83 | REG_ARRAY_SIZE, |
84 | }; |
85 | |
86 | static u16 pl011_std_offsets[REG_ARRAY_SIZE] = { |
87 | [REG_DR] = UART01x_DR, |
88 | [REG_FR] = UART01x_FR, |
89 | [REG_LCRH_RX] = UART011_LCRH, |
90 | [REG_LCRH_TX] = UART011_LCRH, |
91 | [REG_IBRD] = UART011_IBRD, |
92 | [REG_FBRD] = UART011_FBRD, |
93 | [REG_CR] = UART011_CR, |
94 | [REG_IFLS] = UART011_IFLS, |
95 | [REG_IMSC] = UART011_IMSC, |
96 | [REG_RIS] = UART011_RIS, |
97 | [REG_MIS] = UART011_MIS, |
98 | [REG_ICR] = UART011_ICR, |
99 | [REG_DMACR] = UART011_DMACR, |
100 | }; |
101 | |
102 | /* There is by now at least one vendor with differing details, so handle it */ |
103 | struct vendor_data { |
104 | const u16 *reg_offset; |
105 | unsigned int ifls; |
106 | unsigned int fr_busy; |
107 | unsigned int fr_dsr; |
108 | unsigned int fr_cts; |
109 | unsigned int fr_ri; |
110 | unsigned int inv_fr; |
111 | bool access_32b; |
112 | bool oversampling; |
113 | bool dma_threshold; |
114 | bool cts_event_workaround; |
115 | bool always_enabled; |
116 | bool fixed_options; |
117 | |
118 | unsigned int (*get_fifosize)(struct amba_device *dev); |
119 | }; |
120 | |
121 | static unsigned int get_fifosize_arm(struct amba_device *dev) |
122 | { |
123 | return amba_rev(dev) < 3 ? 16 : 32; |
124 | } |
125 | |
126 | static struct vendor_data vendor_arm = { |
127 | .reg_offset = pl011_std_offsets, |
128 | .ifls = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8, |
129 | .fr_busy = UART01x_FR_BUSY, |
130 | .fr_dsr = UART01x_FR_DSR, |
131 | .fr_cts = UART01x_FR_CTS, |
132 | .fr_ri = UART011_FR_RI, |
133 | .oversampling = false, |
134 | .dma_threshold = false, |
135 | .cts_event_workaround = false, |
136 | .always_enabled = false, |
137 | .fixed_options = false, |
138 | .get_fifosize = get_fifosize_arm, |
139 | }; |
140 | |
141 | static const struct vendor_data vendor_sbsa = { |
142 | .reg_offset = pl011_std_offsets, |
143 | .fr_busy = UART01x_FR_BUSY, |
144 | .fr_dsr = UART01x_FR_DSR, |
145 | .fr_cts = UART01x_FR_CTS, |
146 | .fr_ri = UART011_FR_RI, |
147 | .access_32b = true, |
148 | .oversampling = false, |
149 | .dma_threshold = false, |
150 | .cts_event_workaround = false, |
151 | .always_enabled = true, |
152 | .fixed_options = true, |
153 | }; |
154 | |
155 | #ifdef CONFIG_ACPI_SPCR_TABLE |
156 | static const struct vendor_data vendor_qdt_qdf2400_e44 = { |
157 | .reg_offset = pl011_std_offsets, |
158 | .fr_busy = UART011_FR_TXFE, |
159 | .fr_dsr = UART01x_FR_DSR, |
160 | .fr_cts = UART01x_FR_CTS, |
161 | .fr_ri = UART011_FR_RI, |
162 | .inv_fr = UART011_FR_TXFE, |
163 | .access_32b = true, |
164 | .oversampling = false, |
165 | .dma_threshold = false, |
166 | .cts_event_workaround = false, |
167 | .always_enabled = true, |
168 | .fixed_options = true, |
169 | }; |
170 | #endif |
171 | |
172 | static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { |
173 | [REG_DR] = UART01x_DR, |
174 | [REG_ST_DMAWM] = ST_UART011_DMAWM, |
175 | [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT, |
176 | [REG_FR] = UART01x_FR, |
177 | [REG_LCRH_RX] = ST_UART011_LCRH_RX, |
178 | [REG_LCRH_TX] = ST_UART011_LCRH_TX, |
179 | [REG_IBRD] = UART011_IBRD, |
180 | [REG_FBRD] = UART011_FBRD, |
181 | [REG_CR] = UART011_CR, |
182 | [REG_IFLS] = UART011_IFLS, |
183 | [REG_IMSC] = UART011_IMSC, |
184 | [REG_RIS] = UART011_RIS, |
185 | [REG_MIS] = UART011_MIS, |
186 | [REG_ICR] = UART011_ICR, |
187 | [REG_DMACR] = UART011_DMACR, |
188 | [REG_ST_XFCR] = ST_UART011_XFCR, |
189 | [REG_ST_XON1] = ST_UART011_XON1, |
190 | [REG_ST_XON2] = ST_UART011_XON2, |
191 | [REG_ST_XOFF1] = ST_UART011_XOFF1, |
192 | [REG_ST_XOFF2] = ST_UART011_XOFF2, |
193 | [REG_ST_ITCR] = ST_UART011_ITCR, |
194 | [REG_ST_ITIP] = ST_UART011_ITIP, |
195 | [REG_ST_ABCR] = ST_UART011_ABCR, |
196 | [REG_ST_ABIMSC] = ST_UART011_ABIMSC, |
197 | }; |
198 | |
199 | static unsigned int get_fifosize_st(struct amba_device *dev) |
200 | { |
201 | return 64; |
202 | } |
203 | |
204 | static struct vendor_data vendor_st = { |
205 | .reg_offset = pl011_st_offsets, |
206 | .ifls = UART011_IFLS_RX_HALF | UART011_IFLS_TX_HALF, |
207 | .fr_busy = UART01x_FR_BUSY, |
208 | .fr_dsr = UART01x_FR_DSR, |
209 | .fr_cts = UART01x_FR_CTS, |
210 | .fr_ri = UART011_FR_RI, |
211 | .oversampling = true, |
212 | .dma_threshold = true, |
213 | .cts_event_workaround = true, |
214 | .always_enabled = false, |
215 | .fixed_options = false, |
216 | .get_fifosize = get_fifosize_st, |
217 | }; |
218 | |
219 | /* Deals with DMA transactions */ |
220 | |
221 | struct pl011_dmabuf { |
222 | dma_addr_t dma; |
223 | size_t len; |
224 | char *buf; |
225 | }; |
226 | |
227 | struct pl011_dmarx_data { |
228 | struct dma_chan *chan; |
229 | struct completion complete; |
230 | bool use_buf_b; |
231 | struct pl011_dmabuf dbuf_a; |
232 | struct pl011_dmabuf dbuf_b; |
233 | dma_cookie_t cookie; |
234 | bool running; |
235 | struct timer_list timer; |
236 | unsigned int last_residue; |
237 | unsigned long last_jiffies; |
238 | bool auto_poll_rate; |
239 | unsigned int poll_rate; |
240 | unsigned int poll_timeout; |
241 | }; |
242 | |
243 | struct pl011_dmatx_data { |
244 | struct dma_chan *chan; |
245 | dma_addr_t dma; |
246 | size_t len; |
247 | char *buf; |
248 | bool queued; |
249 | }; |
250 | |
251 | /* |
252 | * We wrap our port structure around the generic uart_port. |
253 | */ |
254 | struct uart_amba_port { |
255 | struct uart_port port; |
256 | const u16 *reg_offset; |
257 | struct clk *clk; |
258 | const struct vendor_data *vendor; |
259 | unsigned int dmacr; /* dma control reg */ |
260 | unsigned int im; /* interrupt mask */ |
261 | unsigned int old_status; |
262 | unsigned int fifosize; /* vendor-specific */ |
263 | unsigned int fixed_baud; /* vendor-set fixed baud rate */ |
264 | char type[12]; |
265 | bool rs485_tx_started; |
266 | unsigned int rs485_tx_drain_interval; /* usecs */ |
267 | #ifdef CONFIG_DMA_ENGINE |
268 | /* DMA stuff */ |
269 | bool using_tx_dma; |
270 | bool using_rx_dma; |
271 | struct pl011_dmarx_data dmarx; |
272 | struct pl011_dmatx_data dmatx; |
273 | bool dma_probed; |
274 | #endif |
275 | }; |
276 | |
277 | static unsigned int pl011_tx_empty(struct uart_port *port); |
278 | |
279 | static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap, |
280 | unsigned int reg) |
281 | { |
282 | return uap->reg_offset[reg]; |
283 | } |
284 | |
285 | static unsigned int pl011_read(const struct uart_amba_port *uap, |
286 | unsigned int reg) |
287 | { |
288 | void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); |
289 | |
290 | return (uap->port.iotype == UPIO_MEM32) ? |
291 | readl_relaxed(addr) : readw_relaxed(addr); |
292 | } |
293 | |
294 | static void pl011_write(unsigned int val, const struct uart_amba_port *uap, |
295 | unsigned int reg) |
296 | { |
297 | void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); |
298 | |
299 | if (uap->port.iotype == UPIO_MEM32) |
300 | writel_relaxed(val, addr); |
301 | else |
302 | writew_relaxed(val, addr); |
303 | } |
304 | |
305 | /* |
306 | * Reads up to 256 characters from the FIFO or until it's empty and |
307 | * inserts them into the TTY layer. Returns the number of characters |
308 | * read from the FIFO. |
309 | */ |
310 | static int pl011_fifo_to_tty(struct uart_amba_port *uap) |
311 | { |
312 | unsigned int ch, fifotaken; |
313 | int sysrq; |
314 | u16 status; |
315 | u8 flag; |
316 | |
317 | for (fifotaken = 0; fifotaken != 256; fifotaken++) { |
318 | status = pl011_read(uap, reg: REG_FR); |
319 | if (status & UART01x_FR_RXFE) |
320 | break; |
321 | |
322 | /* Take chars from the FIFO and update status */ |
323 | ch = pl011_read(uap, reg: REG_DR) | UART_DUMMY_DR_RX; |
324 | flag = TTY_NORMAL; |
325 | uap->port.icount.rx++; |
326 | |
327 | if (unlikely(ch & UART_DR_ERROR)) { |
328 | if (ch & UART011_DR_BE) { |
329 | ch &= ~(UART011_DR_FE | UART011_DR_PE); |
330 | uap->port.icount.brk++; |
331 | if (uart_handle_break(port: &uap->port)) |
332 | continue; |
333 | } else if (ch & UART011_DR_PE) { |
334 | uap->port.icount.parity++; |
335 | } else if (ch & UART011_DR_FE) { |
336 | uap->port.icount.frame++; |
337 | } |
338 | if (ch & UART011_DR_OE) |
339 | uap->port.icount.overrun++; |
340 | |
341 | ch &= uap->port.read_status_mask; |
342 | |
343 | if (ch & UART011_DR_BE) |
344 | flag = TTY_BREAK; |
345 | else if (ch & UART011_DR_PE) |
346 | flag = TTY_PARITY; |
347 | else if (ch & UART011_DR_FE) |
348 | flag = TTY_FRAME; |
349 | } |
350 | |
351 | sysrq = uart_prepare_sysrq_char(port: &uap->port, ch: ch & 255); |
352 | if (!sysrq) |
353 | uart_insert_char(port: &uap->port, status: ch, UART011_DR_OE, ch, flag); |
354 | } |
355 | |
356 | return fifotaken; |
357 | } |
358 | |
359 | /* |
360 | * All the DMA operation mode stuff goes inside this ifdef. |
361 | * This assumes that you have a generic DMA device interface, |
362 | * no custom DMA interfaces are supported. |
363 | */ |
364 | #ifdef CONFIG_DMA_ENGINE |
365 | |
366 | #define PL011_DMA_BUFFER_SIZE PAGE_SIZE |
367 | |
368 | static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db, |
369 | enum dma_data_direction dir) |
370 | { |
371 | db->buf = dma_alloc_coherent(dev: chan->device->dev, PL011_DMA_BUFFER_SIZE, |
372 | dma_handle: &db->dma, GFP_KERNEL); |
373 | if (!db->buf) |
374 | return -ENOMEM; |
375 | db->len = PL011_DMA_BUFFER_SIZE; |
376 | |
377 | return 0; |
378 | } |
379 | |
380 | static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db, |
381 | enum dma_data_direction dir) |
382 | { |
383 | if (db->buf) { |
384 | dma_free_coherent(dev: chan->device->dev, |
385 | PL011_DMA_BUFFER_SIZE, cpu_addr: db->buf, dma_handle: db->dma); |
386 | } |
387 | } |
388 | |
389 | static void pl011_dma_probe(struct uart_amba_port *uap) |
390 | { |
391 | /* DMA is the sole user of the platform data right now */ |
392 | struct amba_pl011_data *plat = dev_get_platdata(dev: uap->port.dev); |
393 | struct device *dev = uap->port.dev; |
394 | struct dma_slave_config tx_conf = { |
395 | .dst_addr = uap->port.mapbase + |
396 | pl011_reg_to_offset(uap, reg: REG_DR), |
397 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
398 | .direction = DMA_MEM_TO_DEV, |
399 | .dst_maxburst = uap->fifosize >> 1, |
400 | .device_fc = false, |
401 | }; |
402 | struct dma_chan *chan; |
403 | dma_cap_mask_t mask; |
404 | |
405 | uap->dma_probed = true; |
406 | chan = dma_request_chan(dev, name: "tx"); |
407 | if (IS_ERR(ptr: chan)) { |
408 | if (PTR_ERR(ptr: chan) == -EPROBE_DEFER) { |
409 | uap->dma_probed = false; |
410 | return; |
411 | } |
412 | |
413 | /* We need platform data */ |
414 | if (!plat || !plat->dma_filter) { |
415 | dev_dbg(uap->port.dev, "no DMA platform data\n"); |
416 | return; |
417 | } |
418 | |
419 | /* Try to acquire a generic DMA engine slave TX channel */ |
420 | dma_cap_zero(mask); |
421 | dma_cap_set(DMA_SLAVE, mask); |
422 | |
423 | chan = dma_request_channel(mask, plat->dma_filter, |
424 | plat->dma_tx_param); |
425 | if (!chan) { |
426 | dev_err(uap->port.dev, "no TX DMA channel!\n"); |
427 | return; |
428 | } |
429 | } |
430 | |
431 | dmaengine_slave_config(chan, config: &tx_conf); |
432 | uap->dmatx.chan = chan; |
433 | |
434 | dev_info(uap->port.dev, "DMA channel TX %s\n", |
435 | dma_chan_name(uap->dmatx.chan)); |
436 | |
437 | /* Optionally make use of an RX channel as well */ |
438 | chan = dma_request_chan(dev, name: "rx"); |
439 | |
440 | if (IS_ERR(ptr: chan) && plat && plat->dma_rx_param) { |
441 | chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); |
442 | |
443 | if (!chan) { |
444 | dev_err(uap->port.dev, "no RX DMA channel!\n"); |
445 | return; |
446 | } |
447 | } |
448 | |
449 | if (!IS_ERR(ptr: chan)) { |
450 | struct dma_slave_config rx_conf = { |
451 | .src_addr = uap->port.mapbase + |
452 | pl011_reg_to_offset(uap, reg: REG_DR), |
453 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
454 | .direction = DMA_DEV_TO_MEM, |
455 | .src_maxburst = uap->fifosize >> 2, |
456 | .device_fc = false, |
457 | }; |
458 | struct dma_slave_caps caps; |
459 | |
460 | /* |
461 | * Some DMA controllers provide information on their capabilities. |
462 | * If the controller does, check for suitable residue processing |
463 | * otherwise assime all is well. |
464 | */ |
465 | if (dma_get_slave_caps(chan, caps: &caps) == 0) { |
466 | if (caps.residue_granularity == |
467 | DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { |
468 | dma_release_channel(chan); |
469 | dev_info(uap->port.dev, |
470 | "RX DMA disabled - no residue processing\n"); |
471 | return; |
472 | } |
473 | } |
474 | dmaengine_slave_config(chan, config: &rx_conf); |
475 | uap->dmarx.chan = chan; |
476 | |
477 | uap->dmarx.auto_poll_rate = false; |
478 | if (plat && plat->dma_rx_poll_enable) { |
479 | /* Set poll rate if specified. */ |
480 | if (plat->dma_rx_poll_rate) { |
481 | uap->dmarx.auto_poll_rate = false; |
482 | uap->dmarx.poll_rate = plat->dma_rx_poll_rate; |
483 | } else { |
484 | /* |
485 | * 100 ms defaults to poll rate if not |
486 | * specified. This will be adjusted with |
487 | * the baud rate at set_termios. |
488 | */ |
489 | uap->dmarx.auto_poll_rate = true; |
490 | uap->dmarx.poll_rate = 100; |
491 | } |
492 | /* 3 secs defaults poll_timeout if not specified. */ |
493 | if (plat->dma_rx_poll_timeout) |
494 | uap->dmarx.poll_timeout = |
495 | plat->dma_rx_poll_timeout; |
496 | else |
497 | uap->dmarx.poll_timeout = 3000; |
498 | } else if (!plat && dev->of_node) { |
499 | uap->dmarx.auto_poll_rate = |
500 | of_property_read_bool(np: dev->of_node, propname: "auto-poll"); |
501 | if (uap->dmarx.auto_poll_rate) { |
502 | u32 x; |
503 | |
504 | if (of_property_read_u32(np: dev->of_node, propname: "poll-rate-ms", out_value: &x) == 0) |
505 | uap->dmarx.poll_rate = x; |
506 | else |
507 | uap->dmarx.poll_rate = 100; |
508 | if (of_property_read_u32(np: dev->of_node, propname: "poll-timeout-ms", out_value: &x) == 0) |
509 | uap->dmarx.poll_timeout = x; |
510 | else |
511 | uap->dmarx.poll_timeout = 3000; |
512 | } |
513 | } |
514 | dev_info(uap->port.dev, "DMA channel RX %s\n", |
515 | dma_chan_name(uap->dmarx.chan)); |
516 | } |
517 | } |
518 | |
519 | static void pl011_dma_remove(struct uart_amba_port *uap) |
520 | { |
521 | if (uap->dmatx.chan) |
522 | dma_release_channel(chan: uap->dmatx.chan); |
523 | if (uap->dmarx.chan) |
524 | dma_release_channel(chan: uap->dmarx.chan); |
525 | } |
526 | |
527 | /* Forward declare these for the refill routine */ |
528 | static int pl011_dma_tx_refill(struct uart_amba_port *uap); |
529 | static void pl011_start_tx_pio(struct uart_amba_port *uap); |
530 | |
531 | /* |
532 | * The current DMA TX buffer has been sent. |
533 | * Try to queue up another DMA buffer. |
534 | */ |
535 | static void pl011_dma_tx_callback(void *data) |
536 | { |
537 | struct uart_amba_port *uap = data; |
538 | struct pl011_dmatx_data *dmatx = &uap->dmatx; |
539 | unsigned long flags; |
540 | u16 dmacr; |
541 | |
542 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
543 | if (uap->dmatx.queued) |
544 | dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, |
545 | dmatx->len, DMA_TO_DEVICE); |
546 | |
547 | dmacr = uap->dmacr; |
548 | uap->dmacr = dmacr & ~UART011_TXDMAE; |
549 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
550 | |
551 | /* |
552 | * If TX DMA was disabled, it means that we've stopped the DMA for |
553 | * some reason (eg, XOFF received, or we want to send an X-char.) |
554 | * |
555 | * Note: we need to be careful here of a potential race between DMA |
556 | * and the rest of the driver - if the driver disables TX DMA while |
557 | * a TX buffer completing, we must update the tx queued status to |
558 | * get further refills (hence we check dmacr). |
559 | */ |
560 | if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(port: &uap->port) || |
561 | uart_circ_empty(&uap->port.state->xmit)) { |
562 | uap->dmatx.queued = false; |
563 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
564 | return; |
565 | } |
566 | |
567 | if (pl011_dma_tx_refill(uap) <= 0) |
568 | /* |
569 | * We didn't queue a DMA buffer for some reason, but we |
570 | * have data pending to be sent. Re-enable the TX IRQ. |
571 | */ |
572 | pl011_start_tx_pio(uap); |
573 | |
574 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
575 | } |
576 | |
577 | /* |
578 | * Try to refill the TX DMA buffer. |
579 | * Locking: called with port lock held and IRQs disabled. |
580 | * Returns: |
581 | * 1 if we queued up a TX DMA buffer. |
582 | * 0 if we didn't want to handle this by DMA |
583 | * <0 on error |
584 | */ |
585 | static int pl011_dma_tx_refill(struct uart_amba_port *uap) |
586 | { |
587 | struct pl011_dmatx_data *dmatx = &uap->dmatx; |
588 | struct dma_chan *chan = dmatx->chan; |
589 | struct dma_device *dma_dev = chan->device; |
590 | struct dma_async_tx_descriptor *desc; |
591 | struct circ_buf *xmit = &uap->port.state->xmit; |
592 | unsigned int count; |
593 | |
594 | /* |
595 | * Try to avoid the overhead involved in using DMA if the |
596 | * transaction fits in the first half of the FIFO, by using |
597 | * the standard interrupt handling. This ensures that we |
598 | * issue a uart_write_wakeup() at the appropriate time. |
599 | */ |
600 | count = uart_circ_chars_pending(xmit); |
601 | if (count < (uap->fifosize >> 1)) { |
602 | uap->dmatx.queued = false; |
603 | return 0; |
604 | } |
605 | |
606 | /* |
607 | * Bodge: don't send the last character by DMA, as this |
608 | * will prevent XON from notifying us to restart DMA. |
609 | */ |
610 | count -= 1; |
611 | |
612 | /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ |
613 | if (count > PL011_DMA_BUFFER_SIZE) |
614 | count = PL011_DMA_BUFFER_SIZE; |
615 | |
616 | if (xmit->tail < xmit->head) { |
617 | memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); |
618 | } else { |
619 | size_t first = UART_XMIT_SIZE - xmit->tail; |
620 | size_t second; |
621 | |
622 | if (first > count) |
623 | first = count; |
624 | second = count - first; |
625 | |
626 | memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); |
627 | if (second) |
628 | memcpy(&dmatx->buf[first], &xmit->buf[0], second); |
629 | } |
630 | |
631 | dmatx->len = count; |
632 | dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, |
633 | DMA_TO_DEVICE); |
634 | if (dmatx->dma == DMA_MAPPING_ERROR) { |
635 | uap->dmatx.queued = false; |
636 | dev_dbg(uap->port.dev, "unable to map TX DMA\n"); |
637 | return -EBUSY; |
638 | } |
639 | |
640 | desc = dmaengine_prep_slave_single(chan, buf: dmatx->dma, len: dmatx->len, dir: DMA_MEM_TO_DEV, |
641 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
642 | if (!desc) { |
643 | dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); |
644 | uap->dmatx.queued = false; |
645 | /* |
646 | * If DMA cannot be used right now, we complete this |
647 | * transaction via IRQ and let the TTY layer retry. |
648 | */ |
649 | dev_dbg(uap->port.dev, "TX DMA busy\n"); |
650 | return -EBUSY; |
651 | } |
652 | |
653 | /* Some data to go along to the callback */ |
654 | desc->callback = pl011_dma_tx_callback; |
655 | desc->callback_param = uap; |
656 | |
657 | /* All errors should happen at prepare time */ |
658 | dmaengine_submit(desc); |
659 | |
660 | /* Fire the DMA transaction */ |
661 | dma_dev->device_issue_pending(chan); |
662 | |
663 | uap->dmacr |= UART011_TXDMAE; |
664 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
665 | uap->dmatx.queued = true; |
666 | |
667 | /* |
668 | * Now we know that DMA will fire, so advance the ring buffer |
669 | * with the stuff we just dispatched. |
670 | */ |
671 | uart_xmit_advance(up: &uap->port, chars: count); |
672 | |
673 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
674 | uart_write_wakeup(port: &uap->port); |
675 | |
676 | return 1; |
677 | } |
678 | |
679 | /* |
680 | * We received a transmit interrupt without a pending X-char but with |
681 | * pending characters. |
682 | * Locking: called with port lock held and IRQs disabled. |
683 | * Returns: |
684 | * false if we want to use PIO to transmit |
685 | * true if we queued a DMA buffer |
686 | */ |
687 | static bool pl011_dma_tx_irq(struct uart_amba_port *uap) |
688 | { |
689 | if (!uap->using_tx_dma) |
690 | return false; |
691 | |
692 | /* |
693 | * If we already have a TX buffer queued, but received a |
694 | * TX interrupt, it will be because we've just sent an X-char. |
695 | * Ensure the TX DMA is enabled and the TX IRQ is disabled. |
696 | */ |
697 | if (uap->dmatx.queued) { |
698 | uap->dmacr |= UART011_TXDMAE; |
699 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
700 | uap->im &= ~UART011_TXIM; |
701 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
702 | return true; |
703 | } |
704 | |
705 | /* |
706 | * We don't have a TX buffer queued, so try to queue one. |
707 | * If we successfully queued a buffer, mask the TX IRQ. |
708 | */ |
709 | if (pl011_dma_tx_refill(uap) > 0) { |
710 | uap->im &= ~UART011_TXIM; |
711 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
712 | return true; |
713 | } |
714 | return false; |
715 | } |
716 | |
717 | /* |
718 | * Stop the DMA transmit (eg, due to received XOFF). |
719 | * Locking: called with port lock held and IRQs disabled. |
720 | */ |
721 | static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) |
722 | { |
723 | if (uap->dmatx.queued) { |
724 | uap->dmacr &= ~UART011_TXDMAE; |
725 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
726 | } |
727 | } |
728 | |
729 | /* |
730 | * Try to start a DMA transmit, or in the case of an XON/OFF |
731 | * character queued for send, try to get that character out ASAP. |
732 | * Locking: called with port lock held and IRQs disabled. |
733 | * Returns: |
734 | * false if we want the TX IRQ to be enabled |
735 | * true if we have a buffer queued |
736 | */ |
737 | static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) |
738 | { |
739 | u16 dmacr; |
740 | |
741 | if (!uap->using_tx_dma) |
742 | return false; |
743 | |
744 | if (!uap->port.x_char) { |
745 | /* no X-char, try to push chars out in DMA mode */ |
746 | bool ret = true; |
747 | |
748 | if (!uap->dmatx.queued) { |
749 | if (pl011_dma_tx_refill(uap) > 0) { |
750 | uap->im &= ~UART011_TXIM; |
751 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
752 | } else { |
753 | ret = false; |
754 | } |
755 | } else if (!(uap->dmacr & UART011_TXDMAE)) { |
756 | uap->dmacr |= UART011_TXDMAE; |
757 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
758 | } |
759 | return ret; |
760 | } |
761 | |
762 | /* |
763 | * We have an X-char to send. Disable DMA to prevent it loading |
764 | * the TX fifo, and then see if we can stuff it into the FIFO. |
765 | */ |
766 | dmacr = uap->dmacr; |
767 | uap->dmacr &= ~UART011_TXDMAE; |
768 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
769 | |
770 | if (pl011_read(uap, reg: REG_FR) & UART01x_FR_TXFF) { |
771 | /* |
772 | * No space in the FIFO, so enable the transmit interrupt |
773 | * so we know when there is space. Note that once we've |
774 | * loaded the character, we should just re-enable DMA. |
775 | */ |
776 | return false; |
777 | } |
778 | |
779 | pl011_write(val: uap->port.x_char, uap, reg: REG_DR); |
780 | uap->port.icount.tx++; |
781 | uap->port.x_char = 0; |
782 | |
783 | /* Success - restore the DMA state */ |
784 | uap->dmacr = dmacr; |
785 | pl011_write(val: dmacr, uap, reg: REG_DMACR); |
786 | |
787 | return true; |
788 | } |
789 | |
790 | /* |
791 | * Flush the transmit buffer. |
792 | * Locking: called with port lock held and IRQs disabled. |
793 | */ |
794 | static void pl011_dma_flush_buffer(struct uart_port *port) |
795 | __releases(&uap->port.lock) |
796 | __acquires(&uap->port.lock) |
797 | { |
798 | struct uart_amba_port *uap = |
799 | container_of(port, struct uart_amba_port, port); |
800 | |
801 | if (!uap->using_tx_dma) |
802 | return; |
803 | |
804 | dmaengine_terminate_async(chan: uap->dmatx.chan); |
805 | |
806 | if (uap->dmatx.queued) { |
807 | dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma, |
808 | uap->dmatx.len, DMA_TO_DEVICE); |
809 | uap->dmatx.queued = false; |
810 | uap->dmacr &= ~UART011_TXDMAE; |
811 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
812 | } |
813 | } |
814 | |
815 | static void pl011_dma_rx_callback(void *data); |
816 | |
817 | static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) |
818 | { |
819 | struct dma_chan *rxchan = uap->dmarx.chan; |
820 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
821 | struct dma_async_tx_descriptor *desc; |
822 | struct pl011_dmabuf *dbuf; |
823 | |
824 | if (!rxchan) |
825 | return -EIO; |
826 | |
827 | /* Start the RX DMA job */ |
828 | dbuf = uap->dmarx.use_buf_b ? |
829 | &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; |
830 | desc = dmaengine_prep_slave_single(chan: rxchan, buf: dbuf->dma, len: dbuf->len, |
831 | dir: DMA_DEV_TO_MEM, |
832 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
833 | /* |
834 | * If the DMA engine is busy and cannot prepare a |
835 | * channel, no big deal, the driver will fall back |
836 | * to interrupt mode as a result of this error code. |
837 | */ |
838 | if (!desc) { |
839 | uap->dmarx.running = false; |
840 | dmaengine_terminate_all(chan: rxchan); |
841 | return -EBUSY; |
842 | } |
843 | |
844 | /* Some data to go along to the callback */ |
845 | desc->callback = pl011_dma_rx_callback; |
846 | desc->callback_param = uap; |
847 | dmarx->cookie = dmaengine_submit(desc); |
848 | dma_async_issue_pending(chan: rxchan); |
849 | |
850 | uap->dmacr |= UART011_RXDMAE; |
851 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
852 | uap->dmarx.running = true; |
853 | |
854 | uap->im &= ~UART011_RXIM; |
855 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
856 | |
857 | return 0; |
858 | } |
859 | |
860 | /* |
861 | * This is called when either the DMA job is complete, or |
862 | * the FIFO timeout interrupt occurred. This must be called |
863 | * with the port spinlock uap->port.lock held. |
864 | */ |
865 | static void pl011_dma_rx_chars(struct uart_amba_port *uap, |
866 | u32 pending, bool use_buf_b, |
867 | bool readfifo) |
868 | { |
869 | struct tty_port *port = &uap->port.state->port; |
870 | struct pl011_dmabuf *dbuf = use_buf_b ? |
871 | &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; |
872 | int dma_count = 0; |
873 | u32 fifotaken = 0; /* only used for vdbg() */ |
874 | |
875 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
876 | int dmataken = 0; |
877 | |
878 | if (uap->dmarx.poll_rate) { |
879 | /* The data can be taken by polling */ |
880 | dmataken = dbuf->len - dmarx->last_residue; |
881 | /* Recalculate the pending size */ |
882 | if (pending >= dmataken) |
883 | pending -= dmataken; |
884 | } |
885 | |
886 | /* Pick the remain data from the DMA */ |
887 | if (pending) { |
888 | /* |
889 | * First take all chars in the DMA pipe, then look in the FIFO. |
890 | * Note that tty_insert_flip_buf() tries to take as many chars |
891 | * as it can. |
892 | */ |
893 | dma_count = tty_insert_flip_string(port, chars: dbuf->buf + dmataken, size: pending); |
894 | |
895 | uap->port.icount.rx += dma_count; |
896 | if (dma_count < pending) |
897 | dev_warn(uap->port.dev, |
898 | "couldn't insert all characters (TTY is full?)\n"); |
899 | } |
900 | |
901 | /* Reset the last_residue for Rx DMA poll */ |
902 | if (uap->dmarx.poll_rate) |
903 | dmarx->last_residue = dbuf->len; |
904 | |
905 | /* |
906 | * Only continue with trying to read the FIFO if all DMA chars have |
907 | * been taken first. |
908 | */ |
909 | if (dma_count == pending && readfifo) { |
910 | /* Clear any error flags */ |
911 | pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | |
912 | UART011_FEIS, uap, reg: REG_ICR); |
913 | |
914 | /* |
915 | * If we read all the DMA'd characters, and we had an |
916 | * incomplete buffer, that could be due to an rx error, or |
917 | * maybe we just timed out. Read any pending chars and check |
918 | * the error status. |
919 | * |
920 | * Error conditions will only occur in the FIFO, these will |
921 | * trigger an immediate interrupt and stop the DMA job, so we |
922 | * will always find the error in the FIFO, never in the DMA |
923 | * buffer. |
924 | */ |
925 | fifotaken = pl011_fifo_to_tty(uap); |
926 | } |
927 | |
928 | dev_vdbg(uap->port.dev, |
929 | "Took %d chars from DMA buffer and %d chars from the FIFO\n", |
930 | dma_count, fifotaken); |
931 | tty_flip_buffer_push(port); |
932 | } |
933 | |
934 | static void pl011_dma_rx_irq(struct uart_amba_port *uap) |
935 | { |
936 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
937 | struct dma_chan *rxchan = dmarx->chan; |
938 | struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? |
939 | &dmarx->dbuf_b : &dmarx->dbuf_a; |
940 | size_t pending; |
941 | struct dma_tx_state state; |
942 | enum dma_status dmastat; |
943 | |
944 | /* |
945 | * Pause the transfer so we can trust the current counter, |
946 | * do this before we pause the PL011 block, else we may |
947 | * overflow the FIFO. |
948 | */ |
949 | if (dmaengine_pause(chan: rxchan)) |
950 | dev_err(uap->port.dev, "unable to pause DMA transfer\n"); |
951 | dmastat = rxchan->device->device_tx_status(rxchan, |
952 | dmarx->cookie, &state); |
953 | if (dmastat != DMA_PAUSED) |
954 | dev_err(uap->port.dev, "unable to pause DMA transfer\n"); |
955 | |
956 | /* Disable RX DMA - incoming data will wait in the FIFO */ |
957 | uap->dmacr &= ~UART011_RXDMAE; |
958 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
959 | uap->dmarx.running = false; |
960 | |
961 | pending = dbuf->len - state.residue; |
962 | BUG_ON(pending > PL011_DMA_BUFFER_SIZE); |
963 | /* Then we terminate the transfer - we now know our residue */ |
964 | dmaengine_terminate_all(chan: rxchan); |
965 | |
966 | /* |
967 | * This will take the chars we have so far and insert |
968 | * into the framework. |
969 | */ |
970 | pl011_dma_rx_chars(uap, pending, use_buf_b: dmarx->use_buf_b, readfifo: true); |
971 | |
972 | /* Switch buffer & re-trigger DMA job */ |
973 | dmarx->use_buf_b = !dmarx->use_buf_b; |
974 | if (pl011_dma_rx_trigger_dma(uap)) { |
975 | dev_dbg(uap->port.dev, |
976 | "could not retrigger RX DMA job fall back to interrupt mode\n"); |
977 | uap->im |= UART011_RXIM; |
978 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
979 | } |
980 | } |
981 | |
982 | static void pl011_dma_rx_callback(void *data) |
983 | { |
984 | struct uart_amba_port *uap = data; |
985 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
986 | struct dma_chan *rxchan = dmarx->chan; |
987 | bool lastbuf = dmarx->use_buf_b; |
988 | struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? |
989 | &dmarx->dbuf_b : &dmarx->dbuf_a; |
990 | size_t pending; |
991 | struct dma_tx_state state; |
992 | int ret; |
993 | |
994 | /* |
995 | * This completion interrupt occurs typically when the |
996 | * RX buffer is totally stuffed but no timeout has yet |
997 | * occurred. When that happens, we just want the RX |
998 | * routine to flush out the secondary DMA buffer while |
999 | * we immediately trigger the next DMA job. |
1000 | */ |
1001 | uart_port_lock_irq(up: &uap->port); |
1002 | /* |
1003 | * Rx data can be taken by the UART interrupts during |
1004 | * the DMA irq handler. So we check the residue here. |
1005 | */ |
1006 | rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); |
1007 | pending = dbuf->len - state.residue; |
1008 | BUG_ON(pending > PL011_DMA_BUFFER_SIZE); |
1009 | /* Then we terminate the transfer - we now know our residue */ |
1010 | dmaengine_terminate_all(chan: rxchan); |
1011 | |
1012 | uap->dmarx.running = false; |
1013 | dmarx->use_buf_b = !lastbuf; |
1014 | ret = pl011_dma_rx_trigger_dma(uap); |
1015 | |
1016 | pl011_dma_rx_chars(uap, pending, use_buf_b: lastbuf, readfifo: false); |
1017 | uart_unlock_and_check_sysrq(port: &uap->port); |
1018 | /* |
1019 | * Do this check after we picked the DMA chars so we don't |
1020 | * get some IRQ immediately from RX. |
1021 | */ |
1022 | if (ret) { |
1023 | dev_dbg(uap->port.dev, |
1024 | "could not retrigger RX DMA job fall back to interrupt mode\n"); |
1025 | uap->im |= UART011_RXIM; |
1026 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1027 | } |
1028 | } |
1029 | |
1030 | /* |
1031 | * Stop accepting received characters, when we're shutting down or |
1032 | * suspending this port. |
1033 | * Locking: called with port lock held and IRQs disabled. |
1034 | */ |
1035 | static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) |
1036 | { |
1037 | if (!uap->using_rx_dma) |
1038 | return; |
1039 | |
1040 | /* FIXME. Just disable the DMA enable */ |
1041 | uap->dmacr &= ~UART011_RXDMAE; |
1042 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
1043 | } |
1044 | |
1045 | /* |
1046 | * Timer handler for Rx DMA polling. |
1047 | * Every polling, It checks the residue in the dma buffer and transfer |
1048 | * data to the tty. Also, last_residue is updated for the next polling. |
1049 | */ |
1050 | static void pl011_dma_rx_poll(struct timer_list *t) |
1051 | { |
1052 | struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer); |
1053 | struct tty_port *port = &uap->port.state->port; |
1054 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
1055 | struct dma_chan *rxchan = uap->dmarx.chan; |
1056 | unsigned long flags; |
1057 | unsigned int dmataken = 0; |
1058 | unsigned int size = 0; |
1059 | struct pl011_dmabuf *dbuf; |
1060 | int dma_count; |
1061 | struct dma_tx_state state; |
1062 | |
1063 | dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; |
1064 | rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); |
1065 | if (likely(state.residue < dmarx->last_residue)) { |
1066 | dmataken = dbuf->len - dmarx->last_residue; |
1067 | size = dmarx->last_residue - state.residue; |
1068 | dma_count = tty_insert_flip_string(port, chars: dbuf->buf + dmataken, |
1069 | size); |
1070 | if (dma_count == size) |
1071 | dmarx->last_residue = state.residue; |
1072 | dmarx->last_jiffies = jiffies; |
1073 | } |
1074 | tty_flip_buffer_push(port); |
1075 | |
1076 | /* |
1077 | * If no data is received in poll_timeout, the driver will fall back |
1078 | * to interrupt mode. We will retrigger DMA at the first interrupt. |
1079 | */ |
1080 | if (jiffies_to_msecs(j: jiffies - dmarx->last_jiffies) |
1081 | > uap->dmarx.poll_timeout) { |
1082 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1083 | pl011_dma_rx_stop(uap); |
1084 | uap->im |= UART011_RXIM; |
1085 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1086 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1087 | |
1088 | uap->dmarx.running = false; |
1089 | dmaengine_terminate_all(chan: rxchan); |
1090 | del_timer(timer: &uap->dmarx.timer); |
1091 | } else { |
1092 | mod_timer(timer: &uap->dmarx.timer, |
1093 | expires: jiffies + msecs_to_jiffies(m: uap->dmarx.poll_rate)); |
1094 | } |
1095 | } |
1096 | |
1097 | static void pl011_dma_startup(struct uart_amba_port *uap) |
1098 | { |
1099 | int ret; |
1100 | |
1101 | if (!uap->dma_probed) |
1102 | pl011_dma_probe(uap); |
1103 | |
1104 | if (!uap->dmatx.chan) |
1105 | return; |
1106 | |
1107 | uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); |
1108 | if (!uap->dmatx.buf) { |
1109 | uap->port.fifosize = uap->fifosize; |
1110 | return; |
1111 | } |
1112 | |
1113 | uap->dmatx.len = PL011_DMA_BUFFER_SIZE; |
1114 | |
1115 | /* The DMA buffer is now the FIFO the TTY subsystem can use */ |
1116 | uap->port.fifosize = PL011_DMA_BUFFER_SIZE; |
1117 | uap->using_tx_dma = true; |
1118 | |
1119 | if (!uap->dmarx.chan) |
1120 | goto skip_rx; |
1121 | |
1122 | /* Allocate and map DMA RX buffers */ |
1123 | ret = pl011_dmabuf_init(chan: uap->dmarx.chan, db: &uap->dmarx.dbuf_a, |
1124 | dir: DMA_FROM_DEVICE); |
1125 | if (ret) { |
1126 | dev_err(uap->port.dev, "failed to init DMA %s: %d\n", |
1127 | "RX buffer A", ret); |
1128 | goto skip_rx; |
1129 | } |
1130 | |
1131 | ret = pl011_dmabuf_init(chan: uap->dmarx.chan, db: &uap->dmarx.dbuf_b, |
1132 | dir: DMA_FROM_DEVICE); |
1133 | if (ret) { |
1134 | dev_err(uap->port.dev, "failed to init DMA %s: %d\n", |
1135 | "RX buffer B", ret); |
1136 | pl011_dmabuf_free(chan: uap->dmarx.chan, db: &uap->dmarx.dbuf_a, |
1137 | dir: DMA_FROM_DEVICE); |
1138 | goto skip_rx; |
1139 | } |
1140 | |
1141 | uap->using_rx_dma = true; |
1142 | |
1143 | skip_rx: |
1144 | /* Turn on DMA error (RX/TX will be enabled on demand) */ |
1145 | uap->dmacr |= UART011_DMAONERR; |
1146 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
1147 | |
1148 | /* |
1149 | * ST Micro variants has some specific dma burst threshold |
1150 | * compensation. Set this to 16 bytes, so burst will only |
1151 | * be issued above/below 16 bytes. |
1152 | */ |
1153 | if (uap->vendor->dma_threshold) |
1154 | pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, |
1155 | uap, reg: REG_ST_DMAWM); |
1156 | |
1157 | if (uap->using_rx_dma) { |
1158 | if (pl011_dma_rx_trigger_dma(uap)) |
1159 | dev_dbg(uap->port.dev, |
1160 | "could not trigger initial RX DMA job, fall back to interrupt mode\n"); |
1161 | if (uap->dmarx.poll_rate) { |
1162 | timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); |
1163 | mod_timer(timer: &uap->dmarx.timer, |
1164 | expires: jiffies + msecs_to_jiffies(m: uap->dmarx.poll_rate)); |
1165 | uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; |
1166 | uap->dmarx.last_jiffies = jiffies; |
1167 | } |
1168 | } |
1169 | } |
1170 | |
1171 | static void pl011_dma_shutdown(struct uart_amba_port *uap) |
1172 | { |
1173 | if (!(uap->using_tx_dma || uap->using_rx_dma)) |
1174 | return; |
1175 | |
1176 | /* Disable RX and TX DMA */ |
1177 | while (pl011_read(uap, reg: REG_FR) & uap->vendor->fr_busy) |
1178 | cpu_relax(); |
1179 | |
1180 | uart_port_lock_irq(up: &uap->port); |
1181 | uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); |
1182 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
1183 | uart_port_unlock_irq(up: &uap->port); |
1184 | |
1185 | if (uap->using_tx_dma) { |
1186 | /* In theory, this should already be done by pl011_dma_flush_buffer */ |
1187 | dmaengine_terminate_all(chan: uap->dmatx.chan); |
1188 | if (uap->dmatx.queued) { |
1189 | dma_unmap_single(uap->dmatx.chan->device->dev, |
1190 | uap->dmatx.dma, uap->dmatx.len, |
1191 | DMA_TO_DEVICE); |
1192 | uap->dmatx.queued = false; |
1193 | } |
1194 | |
1195 | kfree(objp: uap->dmatx.buf); |
1196 | uap->using_tx_dma = false; |
1197 | } |
1198 | |
1199 | if (uap->using_rx_dma) { |
1200 | dmaengine_terminate_all(chan: uap->dmarx.chan); |
1201 | /* Clean up the RX DMA */ |
1202 | pl011_dmabuf_free(chan: uap->dmarx.chan, db: &uap->dmarx.dbuf_a, dir: DMA_FROM_DEVICE); |
1203 | pl011_dmabuf_free(chan: uap->dmarx.chan, db: &uap->dmarx.dbuf_b, dir: DMA_FROM_DEVICE); |
1204 | if (uap->dmarx.poll_rate) |
1205 | del_timer_sync(timer: &uap->dmarx.timer); |
1206 | uap->using_rx_dma = false; |
1207 | } |
1208 | } |
1209 | |
1210 | static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) |
1211 | { |
1212 | return uap->using_rx_dma; |
1213 | } |
1214 | |
1215 | static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) |
1216 | { |
1217 | return uap->using_rx_dma && uap->dmarx.running; |
1218 | } |
1219 | |
1220 | #else |
1221 | /* Blank functions if the DMA engine is not available */ |
1222 | static inline void pl011_dma_remove(struct uart_amba_port *uap) |
1223 | { |
1224 | } |
1225 | |
1226 | static inline void pl011_dma_startup(struct uart_amba_port *uap) |
1227 | { |
1228 | } |
1229 | |
1230 | static inline void pl011_dma_shutdown(struct uart_amba_port *uap) |
1231 | { |
1232 | } |
1233 | |
1234 | static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) |
1235 | { |
1236 | return false; |
1237 | } |
1238 | |
1239 | static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) |
1240 | { |
1241 | } |
1242 | |
1243 | static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) |
1244 | { |
1245 | return false; |
1246 | } |
1247 | |
1248 | static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) |
1249 | { |
1250 | } |
1251 | |
1252 | static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) |
1253 | { |
1254 | } |
1255 | |
1256 | static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) |
1257 | { |
1258 | return -EIO; |
1259 | } |
1260 | |
1261 | static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) |
1262 | { |
1263 | return false; |
1264 | } |
1265 | |
1266 | static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) |
1267 | { |
1268 | return false; |
1269 | } |
1270 | |
1271 | #define pl011_dma_flush_buffer NULL |
1272 | #endif |
1273 | |
1274 | static void pl011_rs485_tx_stop(struct uart_amba_port *uap) |
1275 | { |
1276 | /* |
1277 | * To be on the safe side only time out after twice as many iterations |
1278 | * as fifo size. |
1279 | */ |
1280 | const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2; |
1281 | struct uart_port *port = &uap->port; |
1282 | int i = 0; |
1283 | u32 cr; |
1284 | |
1285 | /* Wait until hardware tx queue is empty */ |
1286 | while (!pl011_tx_empty(port)) { |
1287 | if (i > MAX_TX_DRAIN_ITERS) { |
1288 | dev_warn(port->dev, |
1289 | "timeout while draining hardware tx queue\n"); |
1290 | break; |
1291 | } |
1292 | |
1293 | udelay(uap->rs485_tx_drain_interval); |
1294 | i++; |
1295 | } |
1296 | |
1297 | if (port->rs485.delay_rts_after_send) |
1298 | mdelay(port->rs485.delay_rts_after_send); |
1299 | |
1300 | cr = pl011_read(uap, reg: REG_CR); |
1301 | |
1302 | if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) |
1303 | cr &= ~UART011_CR_RTS; |
1304 | else |
1305 | cr |= UART011_CR_RTS; |
1306 | |
1307 | /* Disable the transmitter and reenable the transceiver */ |
1308 | cr &= ~UART011_CR_TXE; |
1309 | cr |= UART011_CR_RXE; |
1310 | pl011_write(val: cr, uap, reg: REG_CR); |
1311 | |
1312 | uap->rs485_tx_started = false; |
1313 | } |
1314 | |
1315 | static void pl011_stop_tx(struct uart_port *port) |
1316 | { |
1317 | struct uart_amba_port *uap = |
1318 | container_of(port, struct uart_amba_port, port); |
1319 | |
1320 | uap->im &= ~UART011_TXIM; |
1321 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1322 | pl011_dma_tx_stop(uap); |
1323 | |
1324 | if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) |
1325 | pl011_rs485_tx_stop(uap); |
1326 | } |
1327 | |
1328 | static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); |
1329 | |
1330 | /* Start TX with programmed I/O only (no DMA) */ |
1331 | static void pl011_start_tx_pio(struct uart_amba_port *uap) |
1332 | { |
1333 | if (pl011_tx_chars(uap, from_irq: false)) { |
1334 | uap->im |= UART011_TXIM; |
1335 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1336 | } |
1337 | } |
1338 | |
1339 | static void pl011_rs485_tx_start(struct uart_amba_port *uap) |
1340 | { |
1341 | struct uart_port *port = &uap->port; |
1342 | u32 cr; |
1343 | |
1344 | /* Enable transmitter */ |
1345 | cr = pl011_read(uap, reg: REG_CR); |
1346 | cr |= UART011_CR_TXE; |
1347 | |
1348 | /* Disable receiver if half-duplex */ |
1349 | if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) |
1350 | cr &= ~UART011_CR_RXE; |
1351 | |
1352 | if (port->rs485.flags & SER_RS485_RTS_ON_SEND) |
1353 | cr &= ~UART011_CR_RTS; |
1354 | else |
1355 | cr |= UART011_CR_RTS; |
1356 | |
1357 | pl011_write(val: cr, uap, reg: REG_CR); |
1358 | |
1359 | if (port->rs485.delay_rts_before_send) |
1360 | mdelay(port->rs485.delay_rts_before_send); |
1361 | |
1362 | uap->rs485_tx_started = true; |
1363 | } |
1364 | |
1365 | static void pl011_start_tx(struct uart_port *port) |
1366 | { |
1367 | struct uart_amba_port *uap = |
1368 | container_of(port, struct uart_amba_port, port); |
1369 | |
1370 | if ((uap->port.rs485.flags & SER_RS485_ENABLED) && |
1371 | !uap->rs485_tx_started) |
1372 | pl011_rs485_tx_start(uap); |
1373 | |
1374 | if (!pl011_dma_tx_start(uap)) |
1375 | pl011_start_tx_pio(uap); |
1376 | } |
1377 | |
1378 | static void pl011_stop_rx(struct uart_port *port) |
1379 | { |
1380 | struct uart_amba_port *uap = |
1381 | container_of(port, struct uart_amba_port, port); |
1382 | |
1383 | uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM | |
1384 | UART011_PEIM | UART011_BEIM | UART011_OEIM); |
1385 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1386 | |
1387 | pl011_dma_rx_stop(uap); |
1388 | } |
1389 | |
1390 | static void pl011_throttle_rx(struct uart_port *port) |
1391 | { |
1392 | unsigned long flags; |
1393 | |
1394 | uart_port_lock_irqsave(up: port, flags: &flags); |
1395 | pl011_stop_rx(port); |
1396 | uart_port_unlock_irqrestore(up: port, flags); |
1397 | } |
1398 | |
1399 | static void pl011_enable_ms(struct uart_port *port) |
1400 | { |
1401 | struct uart_amba_port *uap = |
1402 | container_of(port, struct uart_amba_port, port); |
1403 | |
1404 | uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM; |
1405 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1406 | } |
1407 | |
1408 | static void pl011_rx_chars(struct uart_amba_port *uap) |
1409 | __releases(&uap->port.lock) |
1410 | __acquires(&uap->port.lock) |
1411 | { |
1412 | pl011_fifo_to_tty(uap); |
1413 | |
1414 | uart_port_unlock(up: &uap->port); |
1415 | tty_flip_buffer_push(port: &uap->port.state->port); |
1416 | /* |
1417 | * If we were temporarily out of DMA mode for a while, |
1418 | * attempt to switch back to DMA mode again. |
1419 | */ |
1420 | if (pl011_dma_rx_available(uap)) { |
1421 | if (pl011_dma_rx_trigger_dma(uap)) { |
1422 | dev_dbg(uap->port.dev, |
1423 | "could not trigger RX DMA job fall back to interrupt mode again\n"); |
1424 | uap->im |= UART011_RXIM; |
1425 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1426 | } else { |
1427 | #ifdef CONFIG_DMA_ENGINE |
1428 | /* Start Rx DMA poll */ |
1429 | if (uap->dmarx.poll_rate) { |
1430 | uap->dmarx.last_jiffies = jiffies; |
1431 | uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; |
1432 | mod_timer(timer: &uap->dmarx.timer, |
1433 | expires: jiffies + msecs_to_jiffies(m: uap->dmarx.poll_rate)); |
1434 | } |
1435 | #endif |
1436 | } |
1437 | } |
1438 | uart_port_lock(up: &uap->port); |
1439 | } |
1440 | |
1441 | static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, |
1442 | bool from_irq) |
1443 | { |
1444 | if (unlikely(!from_irq) && |
1445 | pl011_read(uap, reg: REG_FR) & UART01x_FR_TXFF) |
1446 | return false; /* unable to transmit character */ |
1447 | |
1448 | pl011_write(val: c, uap, reg: REG_DR); |
1449 | uap->port.icount.tx++; |
1450 | |
1451 | return true; |
1452 | } |
1453 | |
1454 | /* Returns true if tx interrupts have to be (kept) enabled */ |
1455 | static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) |
1456 | { |
1457 | struct circ_buf *xmit = &uap->port.state->xmit; |
1458 | int count = uap->fifosize >> 1; |
1459 | |
1460 | if (uap->port.x_char) { |
1461 | if (!pl011_tx_char(uap, c: uap->port.x_char, from_irq)) |
1462 | return true; |
1463 | uap->port.x_char = 0; |
1464 | --count; |
1465 | } |
1466 | if (uart_circ_empty(xmit) || uart_tx_stopped(port: &uap->port)) { |
1467 | pl011_stop_tx(port: &uap->port); |
1468 | return false; |
1469 | } |
1470 | |
1471 | /* If we are using DMA mode, try to send some characters. */ |
1472 | if (pl011_dma_tx_irq(uap)) |
1473 | return true; |
1474 | |
1475 | do { |
1476 | if (likely(from_irq) && count-- == 0) |
1477 | break; |
1478 | |
1479 | if (!pl011_tx_char(uap, c: xmit->buf[xmit->tail], from_irq)) |
1480 | break; |
1481 | |
1482 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
1483 | } while (!uart_circ_empty(xmit)); |
1484 | |
1485 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
1486 | uart_write_wakeup(port: &uap->port); |
1487 | |
1488 | if (uart_circ_empty(xmit)) { |
1489 | pl011_stop_tx(port: &uap->port); |
1490 | return false; |
1491 | } |
1492 | return true; |
1493 | } |
1494 | |
1495 | static void pl011_modem_status(struct uart_amba_port *uap) |
1496 | { |
1497 | unsigned int status, delta; |
1498 | |
1499 | status = pl011_read(uap, reg: REG_FR) & UART01x_FR_MODEM_ANY; |
1500 | |
1501 | delta = status ^ uap->old_status; |
1502 | uap->old_status = status; |
1503 | |
1504 | if (!delta) |
1505 | return; |
1506 | |
1507 | if (delta & UART01x_FR_DCD) |
1508 | uart_handle_dcd_change(uport: &uap->port, active: status & UART01x_FR_DCD); |
1509 | |
1510 | if (delta & uap->vendor->fr_dsr) |
1511 | uap->port.icount.dsr++; |
1512 | |
1513 | if (delta & uap->vendor->fr_cts) |
1514 | uart_handle_cts_change(uport: &uap->port, |
1515 | active: status & uap->vendor->fr_cts); |
1516 | |
1517 | wake_up_interruptible(&uap->port.state->port.delta_msr_wait); |
1518 | } |
1519 | |
1520 | static void check_apply_cts_event_workaround(struct uart_amba_port *uap) |
1521 | { |
1522 | if (!uap->vendor->cts_event_workaround) |
1523 | return; |
1524 | |
1525 | /* workaround to make sure that all bits are unlocked.. */ |
1526 | pl011_write(val: 0x00, uap, reg: REG_ICR); |
1527 | |
1528 | /* |
1529 | * WA: introduce 26ns(1 uart clk) delay before W1C; |
1530 | * single apb access will incur 2 pclk(133.12Mhz) delay, |
1531 | * so add 2 dummy reads |
1532 | */ |
1533 | pl011_read(uap, reg: REG_ICR); |
1534 | pl011_read(uap, reg: REG_ICR); |
1535 | } |
1536 | |
1537 | static irqreturn_t pl011_int(int irq, void *dev_id) |
1538 | { |
1539 | struct uart_amba_port *uap = dev_id; |
1540 | unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; |
1541 | int handled = 0; |
1542 | |
1543 | uart_port_lock(up: &uap->port); |
1544 | status = pl011_read(uap, reg: REG_RIS) & uap->im; |
1545 | if (status) { |
1546 | do { |
1547 | check_apply_cts_event_workaround(uap); |
1548 | |
1549 | pl011_write(val: status & ~(UART011_TXIS | UART011_RTIS | UART011_RXIS), |
1550 | uap, reg: REG_ICR); |
1551 | |
1552 | if (status & (UART011_RTIS | UART011_RXIS)) { |
1553 | if (pl011_dma_rx_running(uap)) |
1554 | pl011_dma_rx_irq(uap); |
1555 | else |
1556 | pl011_rx_chars(uap); |
1557 | } |
1558 | if (status & (UART011_DSRMIS | UART011_DCDMIS | |
1559 | UART011_CTSMIS | UART011_RIMIS)) |
1560 | pl011_modem_status(uap); |
1561 | if (status & UART011_TXIS) |
1562 | pl011_tx_chars(uap, from_irq: true); |
1563 | |
1564 | if (pass_counter-- == 0) |
1565 | break; |
1566 | |
1567 | status = pl011_read(uap, reg: REG_RIS) & uap->im; |
1568 | } while (status != 0); |
1569 | handled = 1; |
1570 | } |
1571 | |
1572 | uart_unlock_and_check_sysrq(port: &uap->port); |
1573 | |
1574 | return IRQ_RETVAL(handled); |
1575 | } |
1576 | |
1577 | static unsigned int pl011_tx_empty(struct uart_port *port) |
1578 | { |
1579 | struct uart_amba_port *uap = |
1580 | container_of(port, struct uart_amba_port, port); |
1581 | |
1582 | /* Allow feature register bits to be inverted to work around errata */ |
1583 | unsigned int status = pl011_read(uap, reg: REG_FR) ^ uap->vendor->inv_fr; |
1584 | |
1585 | return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? |
1586 | 0 : TIOCSER_TEMT; |
1587 | } |
1588 | |
1589 | static void pl011_maybe_set_bit(bool cond, unsigned int *ptr, unsigned int mask) |
1590 | { |
1591 | if (cond) |
1592 | *ptr |= mask; |
1593 | } |
1594 | |
1595 | static unsigned int pl011_get_mctrl(struct uart_port *port) |
1596 | { |
1597 | struct uart_amba_port *uap = |
1598 | container_of(port, struct uart_amba_port, port); |
1599 | unsigned int result = 0; |
1600 | unsigned int status = pl011_read(uap, reg: REG_FR); |
1601 | |
1602 | pl011_maybe_set_bit(cond: status & UART01x_FR_DCD, ptr: &result, TIOCM_CAR); |
1603 | pl011_maybe_set_bit(cond: status & uap->vendor->fr_dsr, ptr: &result, TIOCM_DSR); |
1604 | pl011_maybe_set_bit(cond: status & uap->vendor->fr_cts, ptr: &result, TIOCM_CTS); |
1605 | pl011_maybe_set_bit(cond: status & uap->vendor->fr_ri, ptr: &result, TIOCM_RNG); |
1606 | |
1607 | return result; |
1608 | } |
1609 | |
1610 | static void pl011_assign_bit(bool cond, unsigned int *ptr, unsigned int mask) |
1611 | { |
1612 | if (cond) |
1613 | *ptr |= mask; |
1614 | else |
1615 | *ptr &= ~mask; |
1616 | } |
1617 | |
1618 | static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) |
1619 | { |
1620 | struct uart_amba_port *uap = |
1621 | container_of(port, struct uart_amba_port, port); |
1622 | unsigned int cr; |
1623 | |
1624 | cr = pl011_read(uap, reg: REG_CR); |
1625 | |
1626 | pl011_assign_bit(cond: mctrl & TIOCM_RTS, ptr: &cr, UART011_CR_RTS); |
1627 | pl011_assign_bit(cond: mctrl & TIOCM_DTR, ptr: &cr, UART011_CR_DTR); |
1628 | pl011_assign_bit(cond: mctrl & TIOCM_OUT1, ptr: &cr, UART011_CR_OUT1); |
1629 | pl011_assign_bit(cond: mctrl & TIOCM_OUT2, ptr: &cr, UART011_CR_OUT2); |
1630 | pl011_assign_bit(cond: mctrl & TIOCM_LOOP, ptr: &cr, UART011_CR_LBE); |
1631 | |
1632 | if (port->status & UPSTAT_AUTORTS) { |
1633 | /* We need to disable auto-RTS if we want to turn RTS off */ |
1634 | pl011_assign_bit(cond: mctrl & TIOCM_RTS, ptr: &cr, UART011_CR_RTSEN); |
1635 | } |
1636 | |
1637 | pl011_write(val: cr, uap, reg: REG_CR); |
1638 | } |
1639 | |
1640 | static void pl011_break_ctl(struct uart_port *port, int break_state) |
1641 | { |
1642 | struct uart_amba_port *uap = |
1643 | container_of(port, struct uart_amba_port, port); |
1644 | unsigned long flags; |
1645 | unsigned int lcr_h; |
1646 | |
1647 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1648 | lcr_h = pl011_read(uap, reg: REG_LCRH_TX); |
1649 | if (break_state == -1) |
1650 | lcr_h |= UART01x_LCRH_BRK; |
1651 | else |
1652 | lcr_h &= ~UART01x_LCRH_BRK; |
1653 | pl011_write(val: lcr_h, uap, reg: REG_LCRH_TX); |
1654 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1655 | } |
1656 | |
1657 | #ifdef CONFIG_CONSOLE_POLL |
1658 | |
1659 | static void pl011_quiesce_irqs(struct uart_port *port) |
1660 | { |
1661 | struct uart_amba_port *uap = |
1662 | container_of(port, struct uart_amba_port, port); |
1663 | |
1664 | pl011_write(val: pl011_read(uap, reg: REG_MIS), uap, reg: REG_ICR); |
1665 | /* |
1666 | * There is no way to clear TXIM as this is "ready to transmit IRQ", so |
1667 | * we simply mask it. start_tx() will unmask it. |
1668 | * |
1669 | * Note we can race with start_tx(), and if the race happens, the |
1670 | * polling user might get another interrupt just after we clear it. |
1671 | * But it should be OK and can happen even w/o the race, e.g. |
1672 | * controller immediately got some new data and raised the IRQ. |
1673 | * |
1674 | * And whoever uses polling routines assumes that it manages the device |
1675 | * (including tx queue), so we're also fine with start_tx()'s caller |
1676 | * side. |
1677 | */ |
1678 | pl011_write(val: pl011_read(uap, reg: REG_IMSC) & ~UART011_TXIM, uap, |
1679 | reg: REG_IMSC); |
1680 | } |
1681 | |
1682 | static int pl011_get_poll_char(struct uart_port *port) |
1683 | { |
1684 | struct uart_amba_port *uap = |
1685 | container_of(port, struct uart_amba_port, port); |
1686 | unsigned int status; |
1687 | |
1688 | /* |
1689 | * The caller might need IRQs lowered, e.g. if used with KDB NMI |
1690 | * debugger. |
1691 | */ |
1692 | pl011_quiesce_irqs(port); |
1693 | |
1694 | status = pl011_read(uap, reg: REG_FR); |
1695 | if (status & UART01x_FR_RXFE) |
1696 | return NO_POLL_CHAR; |
1697 | |
1698 | return pl011_read(uap, reg: REG_DR); |
1699 | } |
1700 | |
1701 | static void pl011_put_poll_char(struct uart_port *port, unsigned char ch) |
1702 | { |
1703 | struct uart_amba_port *uap = |
1704 | container_of(port, struct uart_amba_port, port); |
1705 | |
1706 | while (pl011_read(uap, reg: REG_FR) & UART01x_FR_TXFF) |
1707 | cpu_relax(); |
1708 | |
1709 | pl011_write(val: ch, uap, reg: REG_DR); |
1710 | } |
1711 | |
1712 | #endif /* CONFIG_CONSOLE_POLL */ |
1713 | |
1714 | static int pl011_hwinit(struct uart_port *port) |
1715 | { |
1716 | struct uart_amba_port *uap = |
1717 | container_of(port, struct uart_amba_port, port); |
1718 | int retval; |
1719 | |
1720 | /* Optionaly enable pins to be muxed in and configured */ |
1721 | pinctrl_pm_select_default_state(dev: port->dev); |
1722 | |
1723 | /* |
1724 | * Try to enable the clock producer. |
1725 | */ |
1726 | retval = clk_prepare_enable(clk: uap->clk); |
1727 | if (retval) |
1728 | return retval; |
1729 | |
1730 | uap->port.uartclk = clk_get_rate(clk: uap->clk); |
1731 | |
1732 | /* Clear pending error and receive interrupts */ |
1733 | pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | |
1734 | UART011_FEIS | UART011_RTIS | UART011_RXIS, |
1735 | uap, reg: REG_ICR); |
1736 | |
1737 | /* |
1738 | * Save interrupts enable mask, and enable RX interrupts in case if |
1739 | * the interrupt is used for NMI entry. |
1740 | */ |
1741 | uap->im = pl011_read(uap, reg: REG_IMSC); |
1742 | pl011_write(UART011_RTIM | UART011_RXIM, uap, reg: REG_IMSC); |
1743 | |
1744 | if (dev_get_platdata(dev: uap->port.dev)) { |
1745 | struct amba_pl011_data *plat; |
1746 | |
1747 | plat = dev_get_platdata(dev: uap->port.dev); |
1748 | if (plat->init) |
1749 | plat->init(); |
1750 | } |
1751 | return 0; |
1752 | } |
1753 | |
1754 | static bool pl011_split_lcrh(const struct uart_amba_port *uap) |
1755 | { |
1756 | return pl011_reg_to_offset(uap, reg: REG_LCRH_RX) != |
1757 | pl011_reg_to_offset(uap, reg: REG_LCRH_TX); |
1758 | } |
1759 | |
1760 | static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) |
1761 | { |
1762 | pl011_write(val: lcr_h, uap, reg: REG_LCRH_RX); |
1763 | if (pl011_split_lcrh(uap)) { |
1764 | int i; |
1765 | /* |
1766 | * Wait 10 PCLKs before writing LCRH_TX register, |
1767 | * to get this delay write read only register 10 times |
1768 | */ |
1769 | for (i = 0; i < 10; ++i) |
1770 | pl011_write(val: 0xff, uap, reg: REG_MIS); |
1771 | pl011_write(val: lcr_h, uap, reg: REG_LCRH_TX); |
1772 | } |
1773 | } |
1774 | |
1775 | static int pl011_allocate_irq(struct uart_amba_port *uap) |
1776 | { |
1777 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1778 | |
1779 | return request_irq(irq: uap->port.irq, handler: pl011_int, IRQF_SHARED, name: "uart-pl011", dev: uap); |
1780 | } |
1781 | |
1782 | /* |
1783 | * Enable interrupts, only timeouts when using DMA |
1784 | * if initial RX DMA job failed, start in interrupt mode |
1785 | * as well. |
1786 | */ |
1787 | static void pl011_enable_interrupts(struct uart_amba_port *uap) |
1788 | { |
1789 | unsigned long flags; |
1790 | unsigned int i; |
1791 | |
1792 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1793 | |
1794 | /* Clear out any spuriously appearing RX interrupts */ |
1795 | pl011_write(UART011_RTIS | UART011_RXIS, uap, reg: REG_ICR); |
1796 | |
1797 | /* |
1798 | * RXIS is asserted only when the RX FIFO transitions from below |
1799 | * to above the trigger threshold. If the RX FIFO is already |
1800 | * full to the threshold this can't happen and RXIS will now be |
1801 | * stuck off. Drain the RX FIFO explicitly to fix this: |
1802 | */ |
1803 | for (i = 0; i < uap->fifosize * 2; ++i) { |
1804 | if (pl011_read(uap, reg: REG_FR) & UART01x_FR_RXFE) |
1805 | break; |
1806 | |
1807 | pl011_read(uap, reg: REG_DR); |
1808 | } |
1809 | |
1810 | uap->im = UART011_RTIM; |
1811 | if (!pl011_dma_rx_running(uap)) |
1812 | uap->im |= UART011_RXIM; |
1813 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1814 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1815 | } |
1816 | |
1817 | static void pl011_unthrottle_rx(struct uart_port *port) |
1818 | { |
1819 | struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); |
1820 | unsigned long flags; |
1821 | |
1822 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1823 | |
1824 | uap->im = UART011_RTIM; |
1825 | if (!pl011_dma_rx_running(uap)) |
1826 | uap->im |= UART011_RXIM; |
1827 | |
1828 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1829 | |
1830 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1831 | } |
1832 | |
1833 | static int pl011_startup(struct uart_port *port) |
1834 | { |
1835 | struct uart_amba_port *uap = |
1836 | container_of(port, struct uart_amba_port, port); |
1837 | unsigned int cr; |
1838 | int retval; |
1839 | |
1840 | retval = pl011_hwinit(port); |
1841 | if (retval) |
1842 | goto clk_dis; |
1843 | |
1844 | retval = pl011_allocate_irq(uap); |
1845 | if (retval) |
1846 | goto clk_dis; |
1847 | |
1848 | pl011_write(val: uap->vendor->ifls, uap, reg: REG_IFLS); |
1849 | |
1850 | uart_port_lock_irq(up: &uap->port); |
1851 | |
1852 | cr = pl011_read(uap, reg: REG_CR); |
1853 | cr &= UART011_CR_RTS | UART011_CR_DTR; |
1854 | cr |= UART01x_CR_UARTEN | UART011_CR_RXE; |
1855 | |
1856 | if (!(port->rs485.flags & SER_RS485_ENABLED)) |
1857 | cr |= UART011_CR_TXE; |
1858 | |
1859 | pl011_write(val: cr, uap, reg: REG_CR); |
1860 | |
1861 | uart_port_unlock_irq(up: &uap->port); |
1862 | |
1863 | /* |
1864 | * initialise the old status of the modem signals |
1865 | */ |
1866 | uap->old_status = pl011_read(uap, reg: REG_FR) & UART01x_FR_MODEM_ANY; |
1867 | |
1868 | /* Startup DMA */ |
1869 | pl011_dma_startup(uap); |
1870 | |
1871 | pl011_enable_interrupts(uap); |
1872 | |
1873 | return 0; |
1874 | |
1875 | clk_dis: |
1876 | clk_disable_unprepare(clk: uap->clk); |
1877 | return retval; |
1878 | } |
1879 | |
1880 | static int sbsa_uart_startup(struct uart_port *port) |
1881 | { |
1882 | struct uart_amba_port *uap = |
1883 | container_of(port, struct uart_amba_port, port); |
1884 | int retval; |
1885 | |
1886 | retval = pl011_hwinit(port); |
1887 | if (retval) |
1888 | return retval; |
1889 | |
1890 | retval = pl011_allocate_irq(uap); |
1891 | if (retval) |
1892 | return retval; |
1893 | |
1894 | /* The SBSA UART does not support any modem status lines. */ |
1895 | uap->old_status = 0; |
1896 | |
1897 | pl011_enable_interrupts(uap); |
1898 | |
1899 | return 0; |
1900 | } |
1901 | |
1902 | static void pl011_shutdown_channel(struct uart_amba_port *uap, unsigned int lcrh) |
1903 | { |
1904 | unsigned long val; |
1905 | |
1906 | val = pl011_read(uap, reg: lcrh); |
1907 | val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); |
1908 | pl011_write(val, uap, reg: lcrh); |
1909 | } |
1910 | |
1911 | /* |
1912 | * disable the port. It should not disable RTS and DTR. |
1913 | * Also RTS and DTR state should be preserved to restore |
1914 | * it during startup(). |
1915 | */ |
1916 | static void pl011_disable_uart(struct uart_amba_port *uap) |
1917 | { |
1918 | unsigned int cr; |
1919 | |
1920 | uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); |
1921 | uart_port_lock_irq(up: &uap->port); |
1922 | cr = pl011_read(uap, reg: REG_CR); |
1923 | cr &= UART011_CR_RTS | UART011_CR_DTR; |
1924 | cr |= UART01x_CR_UARTEN | UART011_CR_TXE; |
1925 | pl011_write(val: cr, uap, reg: REG_CR); |
1926 | uart_port_unlock_irq(up: &uap->port); |
1927 | |
1928 | /* |
1929 | * disable break condition and fifos |
1930 | */ |
1931 | pl011_shutdown_channel(uap, lcrh: REG_LCRH_RX); |
1932 | if (pl011_split_lcrh(uap)) |
1933 | pl011_shutdown_channel(uap, lcrh: REG_LCRH_TX); |
1934 | } |
1935 | |
1936 | static void pl011_disable_interrupts(struct uart_amba_port *uap) |
1937 | { |
1938 | uart_port_lock_irq(up: &uap->port); |
1939 | |
1940 | /* mask all interrupts and clear all pending ones */ |
1941 | uap->im = 0; |
1942 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1943 | pl011_write(val: 0xffff, uap, reg: REG_ICR); |
1944 | |
1945 | uart_port_unlock_irq(up: &uap->port); |
1946 | } |
1947 | |
1948 | static void pl011_shutdown(struct uart_port *port) |
1949 | { |
1950 | struct uart_amba_port *uap = |
1951 | container_of(port, struct uart_amba_port, port); |
1952 | |
1953 | pl011_disable_interrupts(uap); |
1954 | |
1955 | pl011_dma_shutdown(uap); |
1956 | |
1957 | if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) |
1958 | pl011_rs485_tx_stop(uap); |
1959 | |
1960 | free_irq(uap->port.irq, uap); |
1961 | |
1962 | pl011_disable_uart(uap); |
1963 | |
1964 | /* |
1965 | * Shut down the clock producer |
1966 | */ |
1967 | clk_disable_unprepare(clk: uap->clk); |
1968 | /* Optionally let pins go into sleep states */ |
1969 | pinctrl_pm_select_sleep_state(dev: port->dev); |
1970 | |
1971 | if (dev_get_platdata(dev: uap->port.dev)) { |
1972 | struct amba_pl011_data *plat; |
1973 | |
1974 | plat = dev_get_platdata(dev: uap->port.dev); |
1975 | if (plat->exit) |
1976 | plat->exit(); |
1977 | } |
1978 | |
1979 | if (uap->port.ops->flush_buffer) |
1980 | uap->port.ops->flush_buffer(port); |
1981 | } |
1982 | |
1983 | static void sbsa_uart_shutdown(struct uart_port *port) |
1984 | { |
1985 | struct uart_amba_port *uap = |
1986 | container_of(port, struct uart_amba_port, port); |
1987 | |
1988 | pl011_disable_interrupts(uap); |
1989 | |
1990 | free_irq(uap->port.irq, uap); |
1991 | |
1992 | if (uap->port.ops->flush_buffer) |
1993 | uap->port.ops->flush_buffer(port); |
1994 | } |
1995 | |
1996 | static void |
1997 | pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios) |
1998 | { |
1999 | port->read_status_mask = UART011_DR_OE | 255; |
2000 | if (termios->c_iflag & INPCK) |
2001 | port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; |
2002 | if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) |
2003 | port->read_status_mask |= UART011_DR_BE; |
2004 | |
2005 | /* |
2006 | * Characters to ignore |
2007 | */ |
2008 | port->ignore_status_mask = 0; |
2009 | if (termios->c_iflag & IGNPAR) |
2010 | port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; |
2011 | if (termios->c_iflag & IGNBRK) { |
2012 | port->ignore_status_mask |= UART011_DR_BE; |
2013 | /* |
2014 | * If we're ignoring parity and break indicators, |
2015 | * ignore overruns too (for real raw support). |
2016 | */ |
2017 | if (termios->c_iflag & IGNPAR) |
2018 | port->ignore_status_mask |= UART011_DR_OE; |
2019 | } |
2020 | |
2021 | /* |
2022 | * Ignore all characters if CREAD is not set. |
2023 | */ |
2024 | if ((termios->c_cflag & CREAD) == 0) |
2025 | port->ignore_status_mask |= UART_DUMMY_DR_RX; |
2026 | } |
2027 | |
2028 | static void |
2029 | pl011_set_termios(struct uart_port *port, struct ktermios *termios, |
2030 | const struct ktermios *old) |
2031 | { |
2032 | struct uart_amba_port *uap = |
2033 | container_of(port, struct uart_amba_port, port); |
2034 | unsigned int lcr_h, old_cr; |
2035 | unsigned long flags; |
2036 | unsigned int baud, quot, clkdiv; |
2037 | unsigned int bits; |
2038 | |
2039 | if (uap->vendor->oversampling) |
2040 | clkdiv = 8; |
2041 | else |
2042 | clkdiv = 16; |
2043 | |
2044 | /* |
2045 | * Ask the core to calculate the divisor for us. |
2046 | */ |
2047 | baud = uart_get_baud_rate(port, termios, old, min: 0, |
2048 | max: port->uartclk / clkdiv); |
2049 | #ifdef CONFIG_DMA_ENGINE |
2050 | /* |
2051 | * Adjust RX DMA polling rate with baud rate if not specified. |
2052 | */ |
2053 | if (uap->dmarx.auto_poll_rate) |
2054 | uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); |
2055 | #endif |
2056 | |
2057 | if (baud > port->uartclk / 16) |
2058 | quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); |
2059 | else |
2060 | quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); |
2061 | |
2062 | switch (termios->c_cflag & CSIZE) { |
2063 | case CS5: |
2064 | lcr_h = UART01x_LCRH_WLEN_5; |
2065 | break; |
2066 | case CS6: |
2067 | lcr_h = UART01x_LCRH_WLEN_6; |
2068 | break; |
2069 | case CS7: |
2070 | lcr_h = UART01x_LCRH_WLEN_7; |
2071 | break; |
2072 | default: // CS8 |
2073 | lcr_h = UART01x_LCRH_WLEN_8; |
2074 | break; |
2075 | } |
2076 | if (termios->c_cflag & CSTOPB) |
2077 | lcr_h |= UART01x_LCRH_STP2; |
2078 | if (termios->c_cflag & PARENB) { |
2079 | lcr_h |= UART01x_LCRH_PEN; |
2080 | if (!(termios->c_cflag & PARODD)) |
2081 | lcr_h |= UART01x_LCRH_EPS; |
2082 | if (termios->c_cflag & CMSPAR) |
2083 | lcr_h |= UART011_LCRH_SPS; |
2084 | } |
2085 | if (uap->fifosize > 1) |
2086 | lcr_h |= UART01x_LCRH_FEN; |
2087 | |
2088 | bits = tty_get_frame_size(cflag: termios->c_cflag); |
2089 | |
2090 | uart_port_lock_irqsave(up: port, flags: &flags); |
2091 | |
2092 | /* |
2093 | * Update the per-port timeout. |
2094 | */ |
2095 | uart_update_timeout(port, cflag: termios->c_cflag, baud); |
2096 | |
2097 | /* |
2098 | * Calculate the approximated time it takes to transmit one character |
2099 | * with the given baud rate. We use this as the poll interval when we |
2100 | * wait for the tx queue to empty. |
2101 | */ |
2102 | uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); |
2103 | |
2104 | pl011_setup_status_masks(port, termios); |
2105 | |
2106 | if (UART_ENABLE_MS(port, termios->c_cflag)) |
2107 | pl011_enable_ms(port); |
2108 | |
2109 | if (port->rs485.flags & SER_RS485_ENABLED) |
2110 | termios->c_cflag &= ~CRTSCTS; |
2111 | |
2112 | old_cr = pl011_read(uap, reg: REG_CR); |
2113 | |
2114 | if (termios->c_cflag & CRTSCTS) { |
2115 | if (old_cr & UART011_CR_RTS) |
2116 | old_cr |= UART011_CR_RTSEN; |
2117 | |
2118 | old_cr |= UART011_CR_CTSEN; |
2119 | port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; |
2120 | } else { |
2121 | old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); |
2122 | port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); |
2123 | } |
2124 | |
2125 | if (uap->vendor->oversampling) { |
2126 | if (baud > port->uartclk / 16) |
2127 | old_cr |= ST_UART011_CR_OVSFACT; |
2128 | else |
2129 | old_cr &= ~ST_UART011_CR_OVSFACT; |
2130 | } |
2131 | |
2132 | /* |
2133 | * Workaround for the ST Micro oversampling variants to |
2134 | * increase the bitrate slightly, by lowering the divisor, |
2135 | * to avoid delayed sampling of start bit at high speeds, |
2136 | * else we see data corruption. |
2137 | */ |
2138 | if (uap->vendor->oversampling) { |
2139 | if (baud >= 3000000 && baud < 3250000 && quot > 1) |
2140 | quot -= 1; |
2141 | else if (baud > 3250000 && quot > 2) |
2142 | quot -= 2; |
2143 | } |
2144 | /* Set baud rate */ |
2145 | pl011_write(val: quot & 0x3f, uap, reg: REG_FBRD); |
2146 | pl011_write(val: quot >> 6, uap, reg: REG_IBRD); |
2147 | |
2148 | /* |
2149 | * ----------v----------v----------v----------v----- |
2150 | * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER |
2151 | * REG_FBRD & REG_IBRD. |
2152 | * ----------^----------^----------^----------^----- |
2153 | */ |
2154 | pl011_write_lcr_h(uap, lcr_h); |
2155 | |
2156 | /* |
2157 | * Receive was disabled by pl011_disable_uart during shutdown. |
2158 | * Need to reenable receive if you need to use a tty_driver |
2159 | * returns from tty_find_polling_driver() after a port shutdown. |
2160 | */ |
2161 | old_cr |= UART011_CR_RXE; |
2162 | pl011_write(val: old_cr, uap, reg: REG_CR); |
2163 | |
2164 | uart_port_unlock_irqrestore(up: port, flags); |
2165 | } |
2166 | |
2167 | static void |
2168 | sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, |
2169 | const struct ktermios *old) |
2170 | { |
2171 | struct uart_amba_port *uap = |
2172 | container_of(port, struct uart_amba_port, port); |
2173 | unsigned long flags; |
2174 | |
2175 | tty_termios_encode_baud_rate(termios, ibaud: uap->fixed_baud, obaud: uap->fixed_baud); |
2176 | |
2177 | /* The SBSA UART only supports 8n1 without hardware flow control. */ |
2178 | termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); |
2179 | termios->c_cflag &= ~(CMSPAR | CRTSCTS); |
2180 | termios->c_cflag |= CS8 | CLOCAL; |
2181 | |
2182 | uart_port_lock_irqsave(up: port, flags: &flags); |
2183 | uart_update_timeout(port, CS8, baud: uap->fixed_baud); |
2184 | pl011_setup_status_masks(port, termios); |
2185 | uart_port_unlock_irqrestore(up: port, flags); |
2186 | } |
2187 | |
2188 | static const char *pl011_type(struct uart_port *port) |
2189 | { |
2190 | struct uart_amba_port *uap = |
2191 | container_of(port, struct uart_amba_port, port); |
2192 | return uap->port.type == PORT_AMBA ? uap->type : NULL; |
2193 | } |
2194 | |
2195 | /* |
2196 | * Configure/autoconfigure the port. |
2197 | */ |
2198 | static void pl011_config_port(struct uart_port *port, int flags) |
2199 | { |
2200 | if (flags & UART_CONFIG_TYPE) |
2201 | port->type = PORT_AMBA; |
2202 | } |
2203 | |
2204 | /* |
2205 | * verify the new serial_struct (for TIOCSSERIAL). |
2206 | */ |
2207 | static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) |
2208 | { |
2209 | int ret = 0; |
2210 | |
2211 | if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) |
2212 | ret = -EINVAL; |
2213 | if (ser->irq < 0 || ser->irq >= nr_irqs) |
2214 | ret = -EINVAL; |
2215 | if (ser->baud_base < 9600) |
2216 | ret = -EINVAL; |
2217 | if (port->mapbase != (unsigned long)ser->iomem_base) |
2218 | ret = -EINVAL; |
2219 | return ret; |
2220 | } |
2221 | |
2222 | static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios, |
2223 | struct serial_rs485 *rs485) |
2224 | { |
2225 | struct uart_amba_port *uap = |
2226 | container_of(port, struct uart_amba_port, port); |
2227 | |
2228 | if (port->rs485.flags & SER_RS485_ENABLED) |
2229 | pl011_rs485_tx_stop(uap); |
2230 | |
2231 | /* Make sure auto RTS is disabled */ |
2232 | if (rs485->flags & SER_RS485_ENABLED) { |
2233 | u32 cr = pl011_read(uap, reg: REG_CR); |
2234 | |
2235 | cr &= ~UART011_CR_RTSEN; |
2236 | pl011_write(val: cr, uap, reg: REG_CR); |
2237 | port->status &= ~UPSTAT_AUTORTS; |
2238 | } |
2239 | |
2240 | return 0; |
2241 | } |
2242 | |
2243 | static const struct uart_ops amba_pl011_pops = { |
2244 | .tx_empty = pl011_tx_empty, |
2245 | .set_mctrl = pl011_set_mctrl, |
2246 | .get_mctrl = pl011_get_mctrl, |
2247 | .stop_tx = pl011_stop_tx, |
2248 | .start_tx = pl011_start_tx, |
2249 | .stop_rx = pl011_stop_rx, |
2250 | .throttle = pl011_throttle_rx, |
2251 | .unthrottle = pl011_unthrottle_rx, |
2252 | .enable_ms = pl011_enable_ms, |
2253 | .break_ctl = pl011_break_ctl, |
2254 | .startup = pl011_startup, |
2255 | .shutdown = pl011_shutdown, |
2256 | .flush_buffer = pl011_dma_flush_buffer, |
2257 | .set_termios = pl011_set_termios, |
2258 | .type = pl011_type, |
2259 | .config_port = pl011_config_port, |
2260 | .verify_port = pl011_verify_port, |
2261 | #ifdef CONFIG_CONSOLE_POLL |
2262 | .poll_init = pl011_hwinit, |
2263 | .poll_get_char = pl011_get_poll_char, |
2264 | .poll_put_char = pl011_put_poll_char, |
2265 | #endif |
2266 | }; |
2267 | |
2268 | static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) |
2269 | { |
2270 | } |
2271 | |
2272 | static unsigned int sbsa_uart_get_mctrl(struct uart_port *port) |
2273 | { |
2274 | return 0; |
2275 | } |
2276 | |
2277 | static const struct uart_ops sbsa_uart_pops = { |
2278 | .tx_empty = pl011_tx_empty, |
2279 | .set_mctrl = sbsa_uart_set_mctrl, |
2280 | .get_mctrl = sbsa_uart_get_mctrl, |
2281 | .stop_tx = pl011_stop_tx, |
2282 | .start_tx = pl011_start_tx, |
2283 | .stop_rx = pl011_stop_rx, |
2284 | .startup = sbsa_uart_startup, |
2285 | .shutdown = sbsa_uart_shutdown, |
2286 | .set_termios = sbsa_uart_set_termios, |
2287 | .type = pl011_type, |
2288 | .config_port = pl011_config_port, |
2289 | .verify_port = pl011_verify_port, |
2290 | #ifdef CONFIG_CONSOLE_POLL |
2291 | .poll_init = pl011_hwinit, |
2292 | .poll_get_char = pl011_get_poll_char, |
2293 | .poll_put_char = pl011_put_poll_char, |
2294 | #endif |
2295 | }; |
2296 | |
2297 | static struct uart_amba_port *amba_ports[UART_NR]; |
2298 | |
2299 | #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE |
2300 | |
2301 | static void pl011_console_putchar(struct uart_port *port, unsigned char ch) |
2302 | { |
2303 | struct uart_amba_port *uap = |
2304 | container_of(port, struct uart_amba_port, port); |
2305 | |
2306 | while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) |
2307 | cpu_relax(); |
2308 | pl011_write(ch, uap, REG_DR); |
2309 | } |
2310 | |
2311 | static void |
2312 | pl011_console_write(struct console *co, const char *s, unsigned int count) |
2313 | { |
2314 | struct uart_amba_port *uap = amba_ports[co->index]; |
2315 | unsigned int old_cr = 0, new_cr; |
2316 | unsigned long flags; |
2317 | int locked = 1; |
2318 | |
2319 | clk_enable(uap->clk); |
2320 | |
2321 | if (oops_in_progress) |
2322 | locked = uart_port_trylock_irqsave(&uap->port, &flags); |
2323 | else |
2324 | uart_port_lock_irqsave(&uap->port, &flags); |
2325 | |
2326 | /* |
2327 | * First save the CR then disable the interrupts |
2328 | */ |
2329 | if (!uap->vendor->always_enabled) { |
2330 | old_cr = pl011_read(uap, REG_CR); |
2331 | new_cr = old_cr & ~UART011_CR_CTSEN; |
2332 | new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; |
2333 | pl011_write(new_cr, uap, REG_CR); |
2334 | } |
2335 | |
2336 | uart_console_write(&uap->port, s, count, pl011_console_putchar); |
2337 | |
2338 | /* |
2339 | * Finally, wait for transmitter to become empty and restore the |
2340 | * TCR. Allow feature register bits to be inverted to work around |
2341 | * errata. |
2342 | */ |
2343 | while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) |
2344 | & uap->vendor->fr_busy) |
2345 | cpu_relax(); |
2346 | if (!uap->vendor->always_enabled) |
2347 | pl011_write(old_cr, uap, REG_CR); |
2348 | |
2349 | if (locked) |
2350 | uart_port_unlock_irqrestore(&uap->port, flags); |
2351 | |
2352 | clk_disable(uap->clk); |
2353 | } |
2354 | |
2355 | static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, |
2356 | int *parity, int *bits) |
2357 | { |
2358 | unsigned int lcr_h, ibrd, fbrd; |
2359 | |
2360 | if (!(pl011_read(uap, REG_CR) & UART01x_CR_UARTEN)) |
2361 | return; |
2362 | |
2363 | lcr_h = pl011_read(uap, REG_LCRH_TX); |
2364 | |
2365 | *parity = 'n'; |
2366 | if (lcr_h & UART01x_LCRH_PEN) { |
2367 | if (lcr_h & UART01x_LCRH_EPS) |
2368 | *parity = 'e'; |
2369 | else |
2370 | *parity = 'o'; |
2371 | } |
2372 | |
2373 | if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) |
2374 | *bits = 7; |
2375 | else |
2376 | *bits = 8; |
2377 | |
2378 | ibrd = pl011_read(uap, REG_IBRD); |
2379 | fbrd = pl011_read(uap, REG_FBRD); |
2380 | |
2381 | *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); |
2382 | |
2383 | if (uap->vendor->oversampling && |
2384 | (pl011_read(uap, REG_CR) & ST_UART011_CR_OVSFACT)) |
2385 | *baud *= 2; |
2386 | } |
2387 | |
2388 | static int pl011_console_setup(struct console *co, char *options) |
2389 | { |
2390 | struct uart_amba_port *uap; |
2391 | int baud = 38400; |
2392 | int bits = 8; |
2393 | int parity = 'n'; |
2394 | int flow = 'n'; |
2395 | int ret; |
2396 | |
2397 | /* |
2398 | * Check whether an invalid uart number has been specified, and |
2399 | * if so, search for the first available port that does have |
2400 | * console support. |
2401 | */ |
2402 | if (co->index >= UART_NR) |
2403 | co->index = 0; |
2404 | uap = amba_ports[co->index]; |
2405 | if (!uap) |
2406 | return -ENODEV; |
2407 | |
2408 | /* Allow pins to be muxed in and configured */ |
2409 | pinctrl_pm_select_default_state(uap->port.dev); |
2410 | |
2411 | ret = clk_prepare(uap->clk); |
2412 | if (ret) |
2413 | return ret; |
2414 | |
2415 | if (dev_get_platdata(uap->port.dev)) { |
2416 | struct amba_pl011_data *plat; |
2417 | |
2418 | plat = dev_get_platdata(uap->port.dev); |
2419 | if (plat->init) |
2420 | plat->init(); |
2421 | } |
2422 | |
2423 | uap->port.uartclk = clk_get_rate(uap->clk); |
2424 | |
2425 | if (uap->vendor->fixed_options) { |
2426 | baud = uap->fixed_baud; |
2427 | } else { |
2428 | if (options) |
2429 | uart_parse_options(options, |
2430 | &baud, &parity, &bits, &flow); |
2431 | else |
2432 | pl011_console_get_options(uap, &baud, &parity, &bits); |
2433 | } |
2434 | |
2435 | return uart_set_options(&uap->port, co, baud, parity, bits, flow); |
2436 | } |
2437 | |
2438 | /** |
2439 | * pl011_console_match - non-standard console matching |
2440 | * @co: registering console |
2441 | * @name: name from console command line |
2442 | * @idx: index from console command line |
2443 | * @options: ptr to option string from console command line |
2444 | * |
2445 | * Only attempts to match console command lines of the form: |
2446 | * console=pl011,mmio|mmio32,<addr>[,<options>] |
2447 | * console=pl011,0x<addr>[,<options>] |
2448 | * This form is used to register an initial earlycon boot console and |
2449 | * replace it with the amba_console at pl011 driver init. |
2450 | * |
2451 | * Performs console setup for a match (as required by interface) |
2452 | * If no <options> are specified, then assume the h/w is already setup. |
2453 | * |
2454 | * Returns 0 if console matches; otherwise non-zero to use default matching |
2455 | */ |
2456 | static int pl011_console_match(struct console *co, char *name, int idx, |
2457 | char *options) |
2458 | { |
2459 | unsigned char iotype; |
2460 | resource_size_t addr; |
2461 | int i; |
2462 | |
2463 | /* |
2464 | * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum |
2465 | * have a distinct console name, so make sure we check for that. |
2466 | * The actual implementation of the erratum occurs in the probe |
2467 | * function. |
2468 | */ |
2469 | if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) |
2470 | return -ENODEV; |
2471 | |
2472 | if (uart_parse_earlycon(options, &iotype, &addr, &options)) |
2473 | return -ENODEV; |
2474 | |
2475 | if (iotype != UPIO_MEM && iotype != UPIO_MEM32) |
2476 | return -ENODEV; |
2477 | |
2478 | /* try to match the port specified on the command line */ |
2479 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { |
2480 | struct uart_port *port; |
2481 | |
2482 | if (!amba_ports[i]) |
2483 | continue; |
2484 | |
2485 | port = &amba_ports[i]->port; |
2486 | |
2487 | if (port->mapbase != addr) |
2488 | continue; |
2489 | |
2490 | co->index = i; |
2491 | port->cons = co; |
2492 | return pl011_console_setup(co, options); |
2493 | } |
2494 | |
2495 | return -ENODEV; |
2496 | } |
2497 | |
2498 | static struct uart_driver amba_reg; |
2499 | static struct console amba_console = { |
2500 | .name = "ttyAMA", |
2501 | .write = pl011_console_write, |
2502 | .device = uart_console_device, |
2503 | .setup = pl011_console_setup, |
2504 | .match = pl011_console_match, |
2505 | .flags = CON_PRINTBUFFER | CON_ANYTIME, |
2506 | .index = -1, |
2507 | .data = &amba_reg, |
2508 | }; |
2509 | |
2510 | #define AMBA_CONSOLE (&amba_console) |
2511 | |
2512 | static void qdf2400_e44_putc(struct uart_port *port, unsigned char c) |
2513 | { |
2514 | while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) |
2515 | cpu_relax(); |
2516 | writel(c, port->membase + UART01x_DR); |
2517 | while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) |
2518 | cpu_relax(); |
2519 | } |
2520 | |
2521 | static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned int n) |
2522 | { |
2523 | struct earlycon_device *dev = con->data; |
2524 | |
2525 | uart_console_write(&dev->port, s, n, qdf2400_e44_putc); |
2526 | } |
2527 | |
2528 | static void pl011_putc(struct uart_port *port, unsigned char c) |
2529 | { |
2530 | while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) |
2531 | cpu_relax(); |
2532 | if (port->iotype == UPIO_MEM32) |
2533 | writel(c, port->membase + UART01x_DR); |
2534 | else |
2535 | writeb(c, port->membase + UART01x_DR); |
2536 | while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) |
2537 | cpu_relax(); |
2538 | } |
2539 | |
2540 | static void pl011_early_write(struct console *con, const char *s, unsigned int n) |
2541 | { |
2542 | struct earlycon_device *dev = con->data; |
2543 | |
2544 | uart_console_write(&dev->port, s, n, pl011_putc); |
2545 | } |
2546 | |
2547 | #ifdef CONFIG_CONSOLE_POLL |
2548 | static int pl011_getc(struct uart_port *port) |
2549 | { |
2550 | if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) |
2551 | return NO_POLL_CHAR; |
2552 | |
2553 | if (port->iotype == UPIO_MEM32) |
2554 | return readl(port->membase + UART01x_DR); |
2555 | else |
2556 | return readb(port->membase + UART01x_DR); |
2557 | } |
2558 | |
2559 | static int pl011_early_read(struct console *con, char *s, unsigned int n) |
2560 | { |
2561 | struct earlycon_device *dev = con->data; |
2562 | int ch, num_read = 0; |
2563 | |
2564 | while (num_read < n) { |
2565 | ch = pl011_getc(&dev->port); |
2566 | if (ch == NO_POLL_CHAR) |
2567 | break; |
2568 | |
2569 | s[num_read++] = ch; |
2570 | } |
2571 | |
2572 | return num_read; |
2573 | } |
2574 | #else |
2575 | #define pl011_early_read NULL |
2576 | #endif |
2577 | |
2578 | /* |
2579 | * On non-ACPI systems, earlycon is enabled by specifying |
2580 | * "earlycon=pl011,<address>" on the kernel command line. |
2581 | * |
2582 | * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, |
2583 | * by specifying only "earlycon" on the command line. Because it requires |
2584 | * SPCR, the console starts after ACPI is parsed, which is later than a |
2585 | * traditional early console. |
2586 | * |
2587 | * To get the traditional early console that starts before ACPI is parsed, |
2588 | * specify the full "earlycon=pl011,<address>" option. |
2589 | */ |
2590 | static int __init pl011_early_console_setup(struct earlycon_device *device, |
2591 | const char *opt) |
2592 | { |
2593 | if (!device->port.membase) |
2594 | return -ENODEV; |
2595 | |
2596 | device->con->write = pl011_early_write; |
2597 | device->con->read = pl011_early_read; |
2598 | |
2599 | return 0; |
2600 | } |
2601 | |
2602 | OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); |
2603 | |
2604 | OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); |
2605 | |
2606 | /* |
2607 | * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by |
2608 | * Erratum 44, traditional earlycon can be enabled by specifying |
2609 | * "earlycon=qdf2400_e44,<address>". Any options are ignored. |
2610 | * |
2611 | * Alternatively, you can just specify "earlycon", and the early console |
2612 | * will be enabled with the information from the SPCR table. In this |
2613 | * case, the SPCR code will detect the need for the E44 work-around, |
2614 | * and set the console name to "qdf2400_e44". |
2615 | */ |
2616 | static int __init |
2617 | qdf2400_e44_early_console_setup(struct earlycon_device *device, |
2618 | const char *opt) |
2619 | { |
2620 | if (!device->port.membase) |
2621 | return -ENODEV; |
2622 | |
2623 | device->con->write = qdf2400_e44_early_write; |
2624 | return 0; |
2625 | } |
2626 | |
2627 | EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup); |
2628 | |
2629 | #else |
2630 | #define AMBA_CONSOLE NULL |
2631 | #endif |
2632 | |
2633 | static struct uart_driver amba_reg = { |
2634 | .owner = THIS_MODULE, |
2635 | .driver_name = "ttyAMA", |
2636 | .dev_name = "ttyAMA", |
2637 | .major = SERIAL_AMBA_MAJOR, |
2638 | .minor = SERIAL_AMBA_MINOR, |
2639 | .nr = UART_NR, |
2640 | .cons = AMBA_CONSOLE, |
2641 | }; |
2642 | |
2643 | static int pl011_probe_dt_alias(int index, struct device *dev) |
2644 | { |
2645 | struct device_node *np; |
2646 | static bool seen_dev_with_alias; |
2647 | static bool seen_dev_without_alias; |
2648 | int ret = index; |
2649 | |
2650 | if (!IS_ENABLED(CONFIG_OF)) |
2651 | return ret; |
2652 | |
2653 | np = dev->of_node; |
2654 | if (!np) |
2655 | return ret; |
2656 | |
2657 | ret = of_alias_get_id(np, stem: "serial"); |
2658 | if (ret < 0) { |
2659 | seen_dev_without_alias = true; |
2660 | ret = index; |
2661 | } else { |
2662 | seen_dev_with_alias = true; |
2663 | if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret]) { |
2664 | dev_warn(dev, "requested serial port %d not available.\n", ret); |
2665 | ret = index; |
2666 | } |
2667 | } |
2668 | |
2669 | if (seen_dev_with_alias && seen_dev_without_alias) |
2670 | dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); |
2671 | |
2672 | return ret; |
2673 | } |
2674 | |
2675 | /* unregisters the driver also if no more ports are left */ |
2676 | static void pl011_unregister_port(struct uart_amba_port *uap) |
2677 | { |
2678 | int i; |
2679 | bool busy = false; |
2680 | |
2681 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { |
2682 | if (amba_ports[i] == uap) |
2683 | amba_ports[i] = NULL; |
2684 | else if (amba_ports[i]) |
2685 | busy = true; |
2686 | } |
2687 | pl011_dma_remove(uap); |
2688 | if (!busy) |
2689 | uart_unregister_driver(uart: &amba_reg); |
2690 | } |
2691 | |
2692 | static int pl011_find_free_port(void) |
2693 | { |
2694 | int i; |
2695 | |
2696 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) |
2697 | if (!amba_ports[i]) |
2698 | return i; |
2699 | |
2700 | return -EBUSY; |
2701 | } |
2702 | |
2703 | static int pl011_get_rs485_mode(struct uart_amba_port *uap) |
2704 | { |
2705 | struct uart_port *port = &uap->port; |
2706 | int ret; |
2707 | |
2708 | ret = uart_get_rs485_mode(port); |
2709 | if (ret) |
2710 | return ret; |
2711 | |
2712 | return 0; |
2713 | } |
2714 | |
2715 | static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, |
2716 | struct resource *mmiobase, int index) |
2717 | { |
2718 | void __iomem *base; |
2719 | int ret; |
2720 | |
2721 | base = devm_ioremap_resource(dev, res: mmiobase); |
2722 | if (IS_ERR(ptr: base)) |
2723 | return PTR_ERR(ptr: base); |
2724 | |
2725 | index = pl011_probe_dt_alias(index, dev); |
2726 | |
2727 | uap->port.dev = dev; |
2728 | uap->port.mapbase = mmiobase->start; |
2729 | uap->port.membase = base; |
2730 | uap->port.fifosize = uap->fifosize; |
2731 | uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); |
2732 | uap->port.flags = UPF_BOOT_AUTOCONF; |
2733 | uap->port.line = index; |
2734 | |
2735 | ret = pl011_get_rs485_mode(uap); |
2736 | if (ret) |
2737 | return ret; |
2738 | |
2739 | amba_ports[index] = uap; |
2740 | |
2741 | return 0; |
2742 | } |
2743 | |
2744 | static int pl011_register_port(struct uart_amba_port *uap) |
2745 | { |
2746 | int ret, i; |
2747 | |
2748 | /* Ensure interrupts from this UART are masked and cleared */ |
2749 | pl011_write(val: 0, uap, reg: REG_IMSC); |
2750 | pl011_write(val: 0xffff, uap, reg: REG_ICR); |
2751 | |
2752 | if (!amba_reg.state) { |
2753 | ret = uart_register_driver(uart: &amba_reg); |
2754 | if (ret < 0) { |
2755 | dev_err(uap->port.dev, |
2756 | "Failed to register AMBA-PL011 driver\n"); |
2757 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) |
2758 | if (amba_ports[i] == uap) |
2759 | amba_ports[i] = NULL; |
2760 | return ret; |
2761 | } |
2762 | } |
2763 | |
2764 | ret = uart_add_one_port(reg: &amba_reg, port: &uap->port); |
2765 | if (ret) |
2766 | pl011_unregister_port(uap); |
2767 | |
2768 | return ret; |
2769 | } |
2770 | |
2771 | static const struct serial_rs485 pl011_rs485_supported = { |
2772 | .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | |
2773 | SER_RS485_RX_DURING_TX, |
2774 | .delay_rts_before_send = 1, |
2775 | .delay_rts_after_send = 1, |
2776 | }; |
2777 | |
2778 | static int pl011_probe(struct amba_device *dev, const struct amba_id *id) |
2779 | { |
2780 | struct uart_amba_port *uap; |
2781 | struct vendor_data *vendor = id->data; |
2782 | int portnr, ret; |
2783 | u32 val; |
2784 | |
2785 | portnr = pl011_find_free_port(); |
2786 | if (portnr < 0) |
2787 | return portnr; |
2788 | |
2789 | uap = devm_kzalloc(dev: &dev->dev, size: sizeof(struct uart_amba_port), |
2790 | GFP_KERNEL); |
2791 | if (!uap) |
2792 | return -ENOMEM; |
2793 | |
2794 | uap->clk = devm_clk_get(dev: &dev->dev, NULL); |
2795 | if (IS_ERR(ptr: uap->clk)) |
2796 | return PTR_ERR(ptr: uap->clk); |
2797 | |
2798 | uap->reg_offset = vendor->reg_offset; |
2799 | uap->vendor = vendor; |
2800 | uap->fifosize = vendor->get_fifosize(dev); |
2801 | uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; |
2802 | uap->port.irq = dev->irq[0]; |
2803 | uap->port.ops = &amba_pl011_pops; |
2804 | uap->port.rs485_config = pl011_rs485_config; |
2805 | uap->port.rs485_supported = pl011_rs485_supported; |
2806 | snprintf(buf: uap->type, size: sizeof(uap->type), fmt: "PL011 rev%u", amba_rev(dev)); |
2807 | |
2808 | if (device_property_read_u32(dev: &dev->dev, propname: "reg-io-width", val: &val) == 0) { |
2809 | switch (val) { |
2810 | case 1: |
2811 | uap->port.iotype = UPIO_MEM; |
2812 | break; |
2813 | case 4: |
2814 | uap->port.iotype = UPIO_MEM32; |
2815 | break; |
2816 | default: |
2817 | dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n", |
2818 | val); |
2819 | return -EINVAL; |
2820 | } |
2821 | } |
2822 | |
2823 | ret = pl011_setup_port(dev: &dev->dev, uap, mmiobase: &dev->res, index: portnr); |
2824 | if (ret) |
2825 | return ret; |
2826 | |
2827 | amba_set_drvdata(dev, uap); |
2828 | |
2829 | return pl011_register_port(uap); |
2830 | } |
2831 | |
2832 | static void pl011_remove(struct amba_device *dev) |
2833 | { |
2834 | struct uart_amba_port *uap = amba_get_drvdata(dev); |
2835 | |
2836 | uart_remove_one_port(reg: &amba_reg, port: &uap->port); |
2837 | pl011_unregister_port(uap); |
2838 | } |
2839 | |
2840 | #ifdef CONFIG_PM_SLEEP |
2841 | static int pl011_suspend(struct device *dev) |
2842 | { |
2843 | struct uart_amba_port *uap = dev_get_drvdata(dev); |
2844 | |
2845 | if (!uap) |
2846 | return -EINVAL; |
2847 | |
2848 | return uart_suspend_port(reg: &amba_reg, port: &uap->port); |
2849 | } |
2850 | |
2851 | static int pl011_resume(struct device *dev) |
2852 | { |
2853 | struct uart_amba_port *uap = dev_get_drvdata(dev); |
2854 | |
2855 | if (!uap) |
2856 | return -EINVAL; |
2857 | |
2858 | return uart_resume_port(reg: &amba_reg, port: &uap->port); |
2859 | } |
2860 | #endif |
2861 | |
2862 | static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume); |
2863 | |
2864 | #ifdef CONFIG_ACPI_SPCR_TABLE |
2865 | static void qpdf2400_erratum44_workaround(struct device *dev, |
2866 | struct uart_amba_port *uap) |
2867 | { |
2868 | if (!qdf2400_e44_present) |
2869 | return; |
2870 | |
2871 | dev_info(dev, "working around QDF2400 SoC erratum 44\n"); |
2872 | uap->vendor = &vendor_qdt_qdf2400_e44; |
2873 | } |
2874 | #else |
2875 | static void qpdf2400_erratum44_workaround(struct device *dev, |
2876 | struct uart_amba_port *uap) |
2877 | { /* empty */ } |
2878 | #endif |
2879 | |
2880 | static int sbsa_uart_probe(struct platform_device *pdev) |
2881 | { |
2882 | struct uart_amba_port *uap; |
2883 | struct resource *r; |
2884 | int portnr, ret; |
2885 | int baudrate; |
2886 | |
2887 | /* |
2888 | * Check the mandatory baud rate parameter in the DT node early |
2889 | * so that we can easily exit with the error. |
2890 | */ |
2891 | if (pdev->dev.of_node) { |
2892 | struct device_node *np = pdev->dev.of_node; |
2893 | |
2894 | ret = of_property_read_u32(np, propname: "current-speed", out_value: &baudrate); |
2895 | if (ret) |
2896 | return ret; |
2897 | } else { |
2898 | baudrate = 115200; |
2899 | } |
2900 | |
2901 | portnr = pl011_find_free_port(); |
2902 | if (portnr < 0) |
2903 | return portnr; |
2904 | |
2905 | uap = devm_kzalloc(dev: &pdev->dev, size: sizeof(struct uart_amba_port), |
2906 | GFP_KERNEL); |
2907 | if (!uap) |
2908 | return -ENOMEM; |
2909 | |
2910 | ret = platform_get_irq(pdev, 0); |
2911 | if (ret < 0) |
2912 | return ret; |
2913 | uap->port.irq = ret; |
2914 | |
2915 | uap->vendor = &vendor_sbsa; |
2916 | qpdf2400_erratum44_workaround(dev: &pdev->dev, uap); |
2917 | |
2918 | uap->reg_offset = uap->vendor->reg_offset; |
2919 | uap->fifosize = 32; |
2920 | uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; |
2921 | uap->port.ops = &sbsa_uart_pops; |
2922 | uap->fixed_baud = baudrate; |
2923 | |
2924 | snprintf(buf: uap->type, size: sizeof(uap->type), fmt: "SBSA"); |
2925 | |
2926 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2927 | |
2928 | ret = pl011_setup_port(dev: &pdev->dev, uap, mmiobase: r, index: portnr); |
2929 | if (ret) |
2930 | return ret; |
2931 | |
2932 | platform_set_drvdata(pdev, data: uap); |
2933 | |
2934 | return pl011_register_port(uap); |
2935 | } |
2936 | |
2937 | static void sbsa_uart_remove(struct platform_device *pdev) |
2938 | { |
2939 | struct uart_amba_port *uap = platform_get_drvdata(pdev); |
2940 | |
2941 | uart_remove_one_port(reg: &amba_reg, port: &uap->port); |
2942 | pl011_unregister_port(uap); |
2943 | } |
2944 | |
2945 | static const struct of_device_id sbsa_uart_of_match[] = { |
2946 | { .compatible = "arm,sbsa-uart", }, |
2947 | {}, |
2948 | }; |
2949 | MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); |
2950 | |
2951 | static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = { |
2952 | { "ARMH0011", 0 }, |
2953 | { "ARMHB000", 0 }, |
2954 | {}, |
2955 | }; |
2956 | MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); |
2957 | |
2958 | static struct platform_driver arm_sbsa_uart_platform_driver = { |
2959 | .probe = sbsa_uart_probe, |
2960 | .remove_new = sbsa_uart_remove, |
2961 | .driver = { |
2962 | .name = "sbsa-uart", |
2963 | .pm = &pl011_dev_pm_ops, |
2964 | .of_match_table = of_match_ptr(sbsa_uart_of_match), |
2965 | .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match), |
2966 | .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), |
2967 | }, |
2968 | }; |
2969 | |
2970 | static const struct amba_id pl011_ids[] = { |
2971 | { |
2972 | .id = 0x00041011, |
2973 | .mask = 0x000fffff, |
2974 | .data = &vendor_arm, |
2975 | }, |
2976 | { |
2977 | .id = 0x00380802, |
2978 | .mask = 0x00ffffff, |
2979 | .data = &vendor_st, |
2980 | }, |
2981 | { 0, 0 }, |
2982 | }; |
2983 | |
2984 | MODULE_DEVICE_TABLE(amba, pl011_ids); |
2985 | |
2986 | static struct amba_driver pl011_driver = { |
2987 | .drv = { |
2988 | .name = "uart-pl011", |
2989 | .pm = &pl011_dev_pm_ops, |
2990 | .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), |
2991 | }, |
2992 | .id_table = pl011_ids, |
2993 | .probe = pl011_probe, |
2994 | .remove = pl011_remove, |
2995 | }; |
2996 | |
2997 | static int __init pl011_init(void) |
2998 | { |
2999 | pr_info("Serial: AMBA PL011 UART driver\n"); |
3000 | |
3001 | if (platform_driver_register(&arm_sbsa_uart_platform_driver)) |
3002 | pr_warn("could not register SBSA UART platform driver\n"); |
3003 | return amba_driver_register(drv: &pl011_driver); |
3004 | } |
3005 | |
3006 | static void __exit pl011_exit(void) |
3007 | { |
3008 | platform_driver_unregister(&arm_sbsa_uart_platform_driver); |
3009 | amba_driver_unregister(drv: &pl011_driver); |
3010 | } |
3011 | |
3012 | /* |
3013 | * While this can be a module, if builtin it's most likely the console |
3014 | * So let's leave module_exit but move module_init to an earlier place |
3015 | */ |
3016 | arch_initcall(pl011_init); |
3017 | module_exit(pl011_exit); |
3018 | |
3019 | MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); |
3020 | MODULE_DESCRIPTION("ARM AMBA serial port driver"); |
3021 | MODULE_LICENSE("GPL"); |
3022 |
Definitions
- pl011_std_offsets
- vendor_data
- get_fifosize_arm
- vendor_arm
- vendor_sbsa
- vendor_qdt_qdf2400_e44
- pl011_st_offsets
- get_fifosize_st
- vendor_st
- pl011_dmabuf
- pl011_dmarx_data
- pl011_dmatx_data
- uart_amba_port
- pl011_reg_to_offset
- pl011_read
- pl011_write
- pl011_fifo_to_tty
- pl011_dmabuf_init
- pl011_dmabuf_free
- pl011_dma_probe
- pl011_dma_remove
- pl011_dma_tx_callback
- pl011_dma_tx_refill
- pl011_dma_tx_irq
- pl011_dma_tx_stop
- pl011_dma_tx_start
- pl011_dma_flush_buffer
- pl011_dma_rx_trigger_dma
- pl011_dma_rx_chars
- pl011_dma_rx_irq
- pl011_dma_rx_callback
- pl011_dma_rx_stop
- pl011_dma_rx_poll
- pl011_dma_startup
- pl011_dma_shutdown
- pl011_dma_rx_available
- pl011_dma_rx_running
- pl011_rs485_tx_stop
- pl011_stop_tx
- pl011_start_tx_pio
- pl011_rs485_tx_start
- pl011_start_tx
- pl011_stop_rx
- pl011_throttle_rx
- pl011_enable_ms
- pl011_rx_chars
- pl011_tx_char
- pl011_tx_chars
- pl011_modem_status
- check_apply_cts_event_workaround
- pl011_int
- pl011_tx_empty
- pl011_maybe_set_bit
- pl011_get_mctrl
- pl011_assign_bit
- pl011_set_mctrl
- pl011_break_ctl
- pl011_quiesce_irqs
- pl011_get_poll_char
- pl011_put_poll_char
- pl011_hwinit
- pl011_split_lcrh
- pl011_write_lcr_h
- pl011_allocate_irq
- pl011_enable_interrupts
- pl011_unthrottle_rx
- pl011_startup
- sbsa_uart_startup
- pl011_shutdown_channel
- pl011_disable_uart
- pl011_disable_interrupts
- pl011_shutdown
- sbsa_uart_shutdown
- pl011_setup_status_masks
- pl011_set_termios
- sbsa_uart_set_termios
- pl011_type
- pl011_config_port
- pl011_verify_port
- pl011_rs485_config
- amba_pl011_pops
- sbsa_uart_set_mctrl
- sbsa_uart_get_mctrl
- sbsa_uart_pops
- amba_ports
- amba_reg
- pl011_probe_dt_alias
- pl011_unregister_port
- pl011_find_free_port
- pl011_get_rs485_mode
- pl011_setup_port
- pl011_register_port
- pl011_rs485_supported
- pl011_probe
- pl011_remove
- pl011_suspend
- pl011_resume
- pl011_dev_pm_ops
- qpdf2400_erratum44_workaround
- sbsa_uart_probe
- sbsa_uart_remove
- sbsa_uart_of_match
- sbsa_uart_acpi_match
- arm_sbsa_uart_platform_driver
- pl011_ids
- pl011_driver
- pl011_init
Improve your Profiling and Debugging skills
Find out more