1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Driver for AMBA serial ports |
4 | * |
5 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. |
6 | * |
7 | * Copyright 1999 ARM Limited |
8 | * Copyright (C) 2000 Deep Blue Solutions Ltd. |
9 | * Copyright (C) 2010 ST-Ericsson SA |
10 | * |
11 | * This is a generic driver for ARM AMBA-type serial ports. They |
12 | * have a lot of 16550-like features, but are not register compatible. |
13 | * Note that although they do have CTS, DCD and DSR inputs, they do |
14 | * not have an RI input, nor do they have DTR or RTS outputs. If |
15 | * required, these have to be supplied via some other means (eg, GPIO) |
16 | * and hooked into this driver. |
17 | */ |
18 | |
19 | #include <linux/module.h> |
20 | #include <linux/ioport.h> |
21 | #include <linux/init.h> |
22 | #include <linux/console.h> |
23 | #include <linux/platform_device.h> |
24 | #include <linux/sysrq.h> |
25 | #include <linux/device.h> |
26 | #include <linux/tty.h> |
27 | #include <linux/tty_flip.h> |
28 | #include <linux/serial_core.h> |
29 | #include <linux/serial.h> |
30 | #include <linux/amba/bus.h> |
31 | #include <linux/amba/serial.h> |
32 | #include <linux/clk.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/dmaengine.h> |
35 | #include <linux/dma-mapping.h> |
36 | #include <linux/scatterlist.h> |
37 | #include <linux/delay.h> |
38 | #include <linux/types.h> |
39 | #include <linux/of.h> |
40 | #include <linux/pinctrl/consumer.h> |
41 | #include <linux/sizes.h> |
42 | #include <linux/io.h> |
43 | #include <linux/acpi.h> |
44 | |
45 | #define UART_NR 14 |
46 | |
47 | #define SERIAL_AMBA_MAJOR 204 |
48 | #define SERIAL_AMBA_MINOR 64 |
49 | #define SERIAL_AMBA_NR UART_NR |
50 | |
51 | #define AMBA_ISR_PASS_LIMIT 256 |
52 | |
53 | #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) |
54 | #define UART_DUMMY_DR_RX (1 << 16) |
55 | |
56 | enum { |
57 | REG_DR, |
58 | REG_ST_DMAWM, |
59 | REG_ST_TIMEOUT, |
60 | REG_FR, |
61 | REG_LCRH_RX, |
62 | REG_LCRH_TX, |
63 | REG_IBRD, |
64 | REG_FBRD, |
65 | REG_CR, |
66 | REG_IFLS, |
67 | REG_IMSC, |
68 | REG_RIS, |
69 | REG_MIS, |
70 | REG_ICR, |
71 | REG_DMACR, |
72 | REG_ST_XFCR, |
73 | REG_ST_XON1, |
74 | REG_ST_XON2, |
75 | REG_ST_XOFF1, |
76 | REG_ST_XOFF2, |
77 | REG_ST_ITCR, |
78 | REG_ST_ITIP, |
79 | REG_ST_ABCR, |
80 | REG_ST_ABIMSC, |
81 | |
82 | /* The size of the array - must be last */ |
83 | REG_ARRAY_SIZE, |
84 | }; |
85 | |
86 | static u16 pl011_std_offsets[REG_ARRAY_SIZE] = { |
87 | [REG_DR] = UART01x_DR, |
88 | [REG_FR] = UART01x_FR, |
89 | [REG_LCRH_RX] = UART011_LCRH, |
90 | [REG_LCRH_TX] = UART011_LCRH, |
91 | [REG_IBRD] = UART011_IBRD, |
92 | [REG_FBRD] = UART011_FBRD, |
93 | [REG_CR] = UART011_CR, |
94 | [REG_IFLS] = UART011_IFLS, |
95 | [REG_IMSC] = UART011_IMSC, |
96 | [REG_RIS] = UART011_RIS, |
97 | [REG_MIS] = UART011_MIS, |
98 | [REG_ICR] = UART011_ICR, |
99 | [REG_DMACR] = UART011_DMACR, |
100 | }; |
101 | |
102 | /* There is by now at least one vendor with differing details, so handle it */ |
103 | struct vendor_data { |
104 | const u16 *reg_offset; |
105 | unsigned int ifls; |
106 | unsigned int fr_busy; |
107 | unsigned int fr_dsr; |
108 | unsigned int fr_cts; |
109 | unsigned int fr_ri; |
110 | unsigned int inv_fr; |
111 | bool access_32b; |
112 | bool oversampling; |
113 | bool dma_threshold; |
114 | bool cts_event_workaround; |
115 | bool always_enabled; |
116 | bool fixed_options; |
117 | |
118 | unsigned int (*get_fifosize)(struct amba_device *dev); |
119 | }; |
120 | |
121 | static unsigned int get_fifosize_arm(struct amba_device *dev) |
122 | { |
123 | return amba_rev(dev) < 3 ? 16 : 32; |
124 | } |
125 | |
126 | static struct vendor_data vendor_arm = { |
127 | .reg_offset = pl011_std_offsets, |
128 | .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, |
129 | .fr_busy = UART01x_FR_BUSY, |
130 | .fr_dsr = UART01x_FR_DSR, |
131 | .fr_cts = UART01x_FR_CTS, |
132 | .fr_ri = UART011_FR_RI, |
133 | .oversampling = false, |
134 | .dma_threshold = false, |
135 | .cts_event_workaround = false, |
136 | .always_enabled = false, |
137 | .fixed_options = false, |
138 | .get_fifosize = get_fifosize_arm, |
139 | }; |
140 | |
141 | static const struct vendor_data vendor_sbsa = { |
142 | .reg_offset = pl011_std_offsets, |
143 | .fr_busy = UART01x_FR_BUSY, |
144 | .fr_dsr = UART01x_FR_DSR, |
145 | .fr_cts = UART01x_FR_CTS, |
146 | .fr_ri = UART011_FR_RI, |
147 | .access_32b = true, |
148 | .oversampling = false, |
149 | .dma_threshold = false, |
150 | .cts_event_workaround = false, |
151 | .always_enabled = true, |
152 | .fixed_options = true, |
153 | }; |
154 | |
155 | #ifdef CONFIG_ACPI_SPCR_TABLE |
156 | static const struct vendor_data vendor_qdt_qdf2400_e44 = { |
157 | .reg_offset = pl011_std_offsets, |
158 | .fr_busy = UART011_FR_TXFE, |
159 | .fr_dsr = UART01x_FR_DSR, |
160 | .fr_cts = UART01x_FR_CTS, |
161 | .fr_ri = UART011_FR_RI, |
162 | .inv_fr = UART011_FR_TXFE, |
163 | .access_32b = true, |
164 | .oversampling = false, |
165 | .dma_threshold = false, |
166 | .cts_event_workaround = false, |
167 | .always_enabled = true, |
168 | .fixed_options = true, |
169 | }; |
170 | #endif |
171 | |
172 | static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { |
173 | [REG_DR] = UART01x_DR, |
174 | [REG_ST_DMAWM] = ST_UART011_DMAWM, |
175 | [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT, |
176 | [REG_FR] = UART01x_FR, |
177 | [REG_LCRH_RX] = ST_UART011_LCRH_RX, |
178 | [REG_LCRH_TX] = ST_UART011_LCRH_TX, |
179 | [REG_IBRD] = UART011_IBRD, |
180 | [REG_FBRD] = UART011_FBRD, |
181 | [REG_CR] = UART011_CR, |
182 | [REG_IFLS] = UART011_IFLS, |
183 | [REG_IMSC] = UART011_IMSC, |
184 | [REG_RIS] = UART011_RIS, |
185 | [REG_MIS] = UART011_MIS, |
186 | [REG_ICR] = UART011_ICR, |
187 | [REG_DMACR] = UART011_DMACR, |
188 | [REG_ST_XFCR] = ST_UART011_XFCR, |
189 | [REG_ST_XON1] = ST_UART011_XON1, |
190 | [REG_ST_XON2] = ST_UART011_XON2, |
191 | [REG_ST_XOFF1] = ST_UART011_XOFF1, |
192 | [REG_ST_XOFF2] = ST_UART011_XOFF2, |
193 | [REG_ST_ITCR] = ST_UART011_ITCR, |
194 | [REG_ST_ITIP] = ST_UART011_ITIP, |
195 | [REG_ST_ABCR] = ST_UART011_ABCR, |
196 | [REG_ST_ABIMSC] = ST_UART011_ABIMSC, |
197 | }; |
198 | |
199 | static unsigned int get_fifosize_st(struct amba_device *dev) |
200 | { |
201 | return 64; |
202 | } |
203 | |
204 | static struct vendor_data vendor_st = { |
205 | .reg_offset = pl011_st_offsets, |
206 | .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, |
207 | .fr_busy = UART01x_FR_BUSY, |
208 | .fr_dsr = UART01x_FR_DSR, |
209 | .fr_cts = UART01x_FR_CTS, |
210 | .fr_ri = UART011_FR_RI, |
211 | .oversampling = true, |
212 | .dma_threshold = true, |
213 | .cts_event_workaround = true, |
214 | .always_enabled = false, |
215 | .fixed_options = false, |
216 | .get_fifosize = get_fifosize_st, |
217 | }; |
218 | |
219 | /* Deals with DMA transactions */ |
220 | |
221 | struct pl011_sgbuf { |
222 | struct scatterlist sg; |
223 | char *buf; |
224 | }; |
225 | |
226 | struct pl011_dmarx_data { |
227 | struct dma_chan *chan; |
228 | struct completion complete; |
229 | bool use_buf_b; |
230 | struct pl011_sgbuf sgbuf_a; |
231 | struct pl011_sgbuf sgbuf_b; |
232 | dma_cookie_t cookie; |
233 | bool running; |
234 | struct timer_list timer; |
235 | unsigned int last_residue; |
236 | unsigned long last_jiffies; |
237 | bool auto_poll_rate; |
238 | unsigned int poll_rate; |
239 | unsigned int poll_timeout; |
240 | }; |
241 | |
242 | struct pl011_dmatx_data { |
243 | struct dma_chan *chan; |
244 | struct scatterlist sg; |
245 | char *buf; |
246 | bool queued; |
247 | }; |
248 | |
249 | /* |
250 | * We wrap our port structure around the generic uart_port. |
251 | */ |
252 | struct uart_amba_port { |
253 | struct uart_port port; |
254 | const u16 *reg_offset; |
255 | struct clk *clk; |
256 | const struct vendor_data *vendor; |
257 | unsigned int dmacr; /* dma control reg */ |
258 | unsigned int im; /* interrupt mask */ |
259 | unsigned int old_status; |
260 | unsigned int fifosize; /* vendor-specific */ |
261 | unsigned int fixed_baud; /* vendor-set fixed baud rate */ |
262 | char type[12]; |
263 | bool rs485_tx_started; |
264 | unsigned int rs485_tx_drain_interval; /* usecs */ |
265 | #ifdef CONFIG_DMA_ENGINE |
266 | /* DMA stuff */ |
267 | bool using_tx_dma; |
268 | bool using_rx_dma; |
269 | struct pl011_dmarx_data dmarx; |
270 | struct pl011_dmatx_data dmatx; |
271 | bool dma_probed; |
272 | #endif |
273 | }; |
274 | |
275 | static unsigned int pl011_tx_empty(struct uart_port *port); |
276 | |
277 | static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap, |
278 | unsigned int reg) |
279 | { |
280 | return uap->reg_offset[reg]; |
281 | } |
282 | |
283 | static unsigned int pl011_read(const struct uart_amba_port *uap, |
284 | unsigned int reg) |
285 | { |
286 | void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); |
287 | |
288 | return (uap->port.iotype == UPIO_MEM32) ? |
289 | readl_relaxed(addr) : readw_relaxed(addr); |
290 | } |
291 | |
292 | static void pl011_write(unsigned int val, const struct uart_amba_port *uap, |
293 | unsigned int reg) |
294 | { |
295 | void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); |
296 | |
297 | if (uap->port.iotype == UPIO_MEM32) |
298 | writel_relaxed(val, addr); |
299 | else |
300 | writew_relaxed(val, addr); |
301 | } |
302 | |
303 | /* |
304 | * Reads up to 256 characters from the FIFO or until it's empty and |
305 | * inserts them into the TTY layer. Returns the number of characters |
306 | * read from the FIFO. |
307 | */ |
308 | static int pl011_fifo_to_tty(struct uart_amba_port *uap) |
309 | { |
310 | unsigned int ch, fifotaken; |
311 | int sysrq; |
312 | u16 status; |
313 | u8 flag; |
314 | |
315 | for (fifotaken = 0; fifotaken != 256; fifotaken++) { |
316 | status = pl011_read(uap, reg: REG_FR); |
317 | if (status & UART01x_FR_RXFE) |
318 | break; |
319 | |
320 | /* Take chars from the FIFO and update status */ |
321 | ch = pl011_read(uap, reg: REG_DR) | UART_DUMMY_DR_RX; |
322 | flag = TTY_NORMAL; |
323 | uap->port.icount.rx++; |
324 | |
325 | if (unlikely(ch & UART_DR_ERROR)) { |
326 | if (ch & UART011_DR_BE) { |
327 | ch &= ~(UART011_DR_FE | UART011_DR_PE); |
328 | uap->port.icount.brk++; |
329 | if (uart_handle_break(port: &uap->port)) |
330 | continue; |
331 | } else if (ch & UART011_DR_PE) |
332 | uap->port.icount.parity++; |
333 | else if (ch & UART011_DR_FE) |
334 | uap->port.icount.frame++; |
335 | if (ch & UART011_DR_OE) |
336 | uap->port.icount.overrun++; |
337 | |
338 | ch &= uap->port.read_status_mask; |
339 | |
340 | if (ch & UART011_DR_BE) |
341 | flag = TTY_BREAK; |
342 | else if (ch & UART011_DR_PE) |
343 | flag = TTY_PARITY; |
344 | else if (ch & UART011_DR_FE) |
345 | flag = TTY_FRAME; |
346 | } |
347 | |
348 | uart_port_unlock(up: &uap->port); |
349 | sysrq = uart_handle_sysrq_char(port: &uap->port, ch: ch & 255); |
350 | uart_port_lock(up: &uap->port); |
351 | |
352 | if (!sysrq) |
353 | uart_insert_char(port: &uap->port, status: ch, UART011_DR_OE, ch, flag); |
354 | } |
355 | |
356 | return fifotaken; |
357 | } |
358 | |
359 | |
360 | /* |
361 | * All the DMA operation mode stuff goes inside this ifdef. |
362 | * This assumes that you have a generic DMA device interface, |
363 | * no custom DMA interfaces are supported. |
364 | */ |
365 | #ifdef CONFIG_DMA_ENGINE |
366 | |
367 | #define PL011_DMA_BUFFER_SIZE PAGE_SIZE |
368 | |
369 | static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, |
370 | enum dma_data_direction dir) |
371 | { |
372 | dma_addr_t dma_addr; |
373 | |
374 | sg->buf = dma_alloc_coherent(dev: chan->device->dev, |
375 | PL011_DMA_BUFFER_SIZE, dma_handle: &dma_addr, GFP_KERNEL); |
376 | if (!sg->buf) |
377 | return -ENOMEM; |
378 | |
379 | sg_init_table(&sg->sg, 1); |
380 | sg_set_page(sg: &sg->sg, page: phys_to_page(dma_addr), |
381 | PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); |
382 | sg_dma_address(&sg->sg) = dma_addr; |
383 | sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE; |
384 | |
385 | return 0; |
386 | } |
387 | |
388 | static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, |
389 | enum dma_data_direction dir) |
390 | { |
391 | if (sg->buf) { |
392 | dma_free_coherent(dev: chan->device->dev, |
393 | PL011_DMA_BUFFER_SIZE, cpu_addr: sg->buf, |
394 | sg_dma_address(&sg->sg)); |
395 | } |
396 | } |
397 | |
398 | static void pl011_dma_probe(struct uart_amba_port *uap) |
399 | { |
400 | /* DMA is the sole user of the platform data right now */ |
401 | struct amba_pl011_data *plat = dev_get_platdata(dev: uap->port.dev); |
402 | struct device *dev = uap->port.dev; |
403 | struct dma_slave_config tx_conf = { |
404 | .dst_addr = uap->port.mapbase + |
405 | pl011_reg_to_offset(uap, reg: REG_DR), |
406 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
407 | .direction = DMA_MEM_TO_DEV, |
408 | .dst_maxburst = uap->fifosize >> 1, |
409 | .device_fc = false, |
410 | }; |
411 | struct dma_chan *chan; |
412 | dma_cap_mask_t mask; |
413 | |
414 | uap->dma_probed = true; |
415 | chan = dma_request_chan(dev, name: "tx" ); |
416 | if (IS_ERR(ptr: chan)) { |
417 | if (PTR_ERR(ptr: chan) == -EPROBE_DEFER) { |
418 | uap->dma_probed = false; |
419 | return; |
420 | } |
421 | |
422 | /* We need platform data */ |
423 | if (!plat || !plat->dma_filter) { |
424 | dev_dbg(uap->port.dev, "no DMA platform data\n" ); |
425 | return; |
426 | } |
427 | |
428 | /* Try to acquire a generic DMA engine slave TX channel */ |
429 | dma_cap_zero(mask); |
430 | dma_cap_set(DMA_SLAVE, mask); |
431 | |
432 | chan = dma_request_channel(mask, plat->dma_filter, |
433 | plat->dma_tx_param); |
434 | if (!chan) { |
435 | dev_err(uap->port.dev, "no TX DMA channel!\n" ); |
436 | return; |
437 | } |
438 | } |
439 | |
440 | dmaengine_slave_config(chan, config: &tx_conf); |
441 | uap->dmatx.chan = chan; |
442 | |
443 | dev_info(uap->port.dev, "DMA channel TX %s\n" , |
444 | dma_chan_name(uap->dmatx.chan)); |
445 | |
446 | /* Optionally make use of an RX channel as well */ |
447 | chan = dma_request_slave_channel(dev, name: "rx" ); |
448 | |
449 | if (!chan && plat && plat->dma_rx_param) { |
450 | chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); |
451 | |
452 | if (!chan) { |
453 | dev_err(uap->port.dev, "no RX DMA channel!\n" ); |
454 | return; |
455 | } |
456 | } |
457 | |
458 | if (chan) { |
459 | struct dma_slave_config rx_conf = { |
460 | .src_addr = uap->port.mapbase + |
461 | pl011_reg_to_offset(uap, reg: REG_DR), |
462 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
463 | .direction = DMA_DEV_TO_MEM, |
464 | .src_maxburst = uap->fifosize >> 2, |
465 | .device_fc = false, |
466 | }; |
467 | struct dma_slave_caps caps; |
468 | |
469 | /* |
470 | * Some DMA controllers provide information on their capabilities. |
471 | * If the controller does, check for suitable residue processing |
472 | * otherwise assime all is well. |
473 | */ |
474 | if (0 == dma_get_slave_caps(chan, caps: &caps)) { |
475 | if (caps.residue_granularity == |
476 | DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { |
477 | dma_release_channel(chan); |
478 | dev_info(uap->port.dev, |
479 | "RX DMA disabled - no residue processing\n" ); |
480 | return; |
481 | } |
482 | } |
483 | dmaengine_slave_config(chan, config: &rx_conf); |
484 | uap->dmarx.chan = chan; |
485 | |
486 | uap->dmarx.auto_poll_rate = false; |
487 | if (plat && plat->dma_rx_poll_enable) { |
488 | /* Set poll rate if specified. */ |
489 | if (plat->dma_rx_poll_rate) { |
490 | uap->dmarx.auto_poll_rate = false; |
491 | uap->dmarx.poll_rate = plat->dma_rx_poll_rate; |
492 | } else { |
493 | /* |
494 | * 100 ms defaults to poll rate if not |
495 | * specified. This will be adjusted with |
496 | * the baud rate at set_termios. |
497 | */ |
498 | uap->dmarx.auto_poll_rate = true; |
499 | uap->dmarx.poll_rate = 100; |
500 | } |
501 | /* 3 secs defaults poll_timeout if not specified. */ |
502 | if (plat->dma_rx_poll_timeout) |
503 | uap->dmarx.poll_timeout = |
504 | plat->dma_rx_poll_timeout; |
505 | else |
506 | uap->dmarx.poll_timeout = 3000; |
507 | } else if (!plat && dev->of_node) { |
508 | uap->dmarx.auto_poll_rate = of_property_read_bool( |
509 | np: dev->of_node, propname: "auto-poll" ); |
510 | if (uap->dmarx.auto_poll_rate) { |
511 | u32 x; |
512 | |
513 | if (0 == of_property_read_u32(np: dev->of_node, |
514 | propname: "poll-rate-ms" , out_value: &x)) |
515 | uap->dmarx.poll_rate = x; |
516 | else |
517 | uap->dmarx.poll_rate = 100; |
518 | if (0 == of_property_read_u32(np: dev->of_node, |
519 | propname: "poll-timeout-ms" , out_value: &x)) |
520 | uap->dmarx.poll_timeout = x; |
521 | else |
522 | uap->dmarx.poll_timeout = 3000; |
523 | } |
524 | } |
525 | dev_info(uap->port.dev, "DMA channel RX %s\n" , |
526 | dma_chan_name(uap->dmarx.chan)); |
527 | } |
528 | } |
529 | |
530 | static void pl011_dma_remove(struct uart_amba_port *uap) |
531 | { |
532 | if (uap->dmatx.chan) |
533 | dma_release_channel(chan: uap->dmatx.chan); |
534 | if (uap->dmarx.chan) |
535 | dma_release_channel(chan: uap->dmarx.chan); |
536 | } |
537 | |
538 | /* Forward declare these for the refill routine */ |
539 | static int pl011_dma_tx_refill(struct uart_amba_port *uap); |
540 | static void pl011_start_tx_pio(struct uart_amba_port *uap); |
541 | |
542 | /* |
543 | * The current DMA TX buffer has been sent. |
544 | * Try to queue up another DMA buffer. |
545 | */ |
546 | static void pl011_dma_tx_callback(void *data) |
547 | { |
548 | struct uart_amba_port *uap = data; |
549 | struct pl011_dmatx_data *dmatx = &uap->dmatx; |
550 | unsigned long flags; |
551 | u16 dmacr; |
552 | |
553 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
554 | if (uap->dmatx.queued) |
555 | dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, |
556 | DMA_TO_DEVICE); |
557 | |
558 | dmacr = uap->dmacr; |
559 | uap->dmacr = dmacr & ~UART011_TXDMAE; |
560 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
561 | |
562 | /* |
563 | * If TX DMA was disabled, it means that we've stopped the DMA for |
564 | * some reason (eg, XOFF received, or we want to send an X-char.) |
565 | * |
566 | * Note: we need to be careful here of a potential race between DMA |
567 | * and the rest of the driver - if the driver disables TX DMA while |
568 | * a TX buffer completing, we must update the tx queued status to |
569 | * get further refills (hence we check dmacr). |
570 | */ |
571 | if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(port: &uap->port) || |
572 | uart_circ_empty(&uap->port.state->xmit)) { |
573 | uap->dmatx.queued = false; |
574 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
575 | return; |
576 | } |
577 | |
578 | if (pl011_dma_tx_refill(uap) <= 0) |
579 | /* |
580 | * We didn't queue a DMA buffer for some reason, but we |
581 | * have data pending to be sent. Re-enable the TX IRQ. |
582 | */ |
583 | pl011_start_tx_pio(uap); |
584 | |
585 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
586 | } |
587 | |
588 | /* |
589 | * Try to refill the TX DMA buffer. |
590 | * Locking: called with port lock held and IRQs disabled. |
591 | * Returns: |
592 | * 1 if we queued up a TX DMA buffer. |
593 | * 0 if we didn't want to handle this by DMA |
594 | * <0 on error |
595 | */ |
596 | static int pl011_dma_tx_refill(struct uart_amba_port *uap) |
597 | { |
598 | struct pl011_dmatx_data *dmatx = &uap->dmatx; |
599 | struct dma_chan *chan = dmatx->chan; |
600 | struct dma_device *dma_dev = chan->device; |
601 | struct dma_async_tx_descriptor *desc; |
602 | struct circ_buf *xmit = &uap->port.state->xmit; |
603 | unsigned int count; |
604 | |
605 | /* |
606 | * Try to avoid the overhead involved in using DMA if the |
607 | * transaction fits in the first half of the FIFO, by using |
608 | * the standard interrupt handling. This ensures that we |
609 | * issue a uart_write_wakeup() at the appropriate time. |
610 | */ |
611 | count = uart_circ_chars_pending(xmit); |
612 | if (count < (uap->fifosize >> 1)) { |
613 | uap->dmatx.queued = false; |
614 | return 0; |
615 | } |
616 | |
617 | /* |
618 | * Bodge: don't send the last character by DMA, as this |
619 | * will prevent XON from notifying us to restart DMA. |
620 | */ |
621 | count -= 1; |
622 | |
623 | /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ |
624 | if (count > PL011_DMA_BUFFER_SIZE) |
625 | count = PL011_DMA_BUFFER_SIZE; |
626 | |
627 | if (xmit->tail < xmit->head) |
628 | memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); |
629 | else { |
630 | size_t first = UART_XMIT_SIZE - xmit->tail; |
631 | size_t second; |
632 | |
633 | if (first > count) |
634 | first = count; |
635 | second = count - first; |
636 | |
637 | memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); |
638 | if (second) |
639 | memcpy(&dmatx->buf[first], &xmit->buf[0], second); |
640 | } |
641 | |
642 | dmatx->sg.length = count; |
643 | |
644 | if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { |
645 | uap->dmatx.queued = false; |
646 | dev_dbg(uap->port.dev, "unable to map TX DMA\n" ); |
647 | return -EBUSY; |
648 | } |
649 | |
650 | desc = dmaengine_prep_slave_sg(chan, sgl: &dmatx->sg, sg_len: 1, dir: DMA_MEM_TO_DEV, |
651 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
652 | if (!desc) { |
653 | dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); |
654 | uap->dmatx.queued = false; |
655 | /* |
656 | * If DMA cannot be used right now, we complete this |
657 | * transaction via IRQ and let the TTY layer retry. |
658 | */ |
659 | dev_dbg(uap->port.dev, "TX DMA busy\n" ); |
660 | return -EBUSY; |
661 | } |
662 | |
663 | /* Some data to go along to the callback */ |
664 | desc->callback = pl011_dma_tx_callback; |
665 | desc->callback_param = uap; |
666 | |
667 | /* All errors should happen at prepare time */ |
668 | dmaengine_submit(desc); |
669 | |
670 | /* Fire the DMA transaction */ |
671 | dma_dev->device_issue_pending(chan); |
672 | |
673 | uap->dmacr |= UART011_TXDMAE; |
674 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
675 | uap->dmatx.queued = true; |
676 | |
677 | /* |
678 | * Now we know that DMA will fire, so advance the ring buffer |
679 | * with the stuff we just dispatched. |
680 | */ |
681 | uart_xmit_advance(up: &uap->port, chars: count); |
682 | |
683 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
684 | uart_write_wakeup(port: &uap->port); |
685 | |
686 | return 1; |
687 | } |
688 | |
689 | /* |
690 | * We received a transmit interrupt without a pending X-char but with |
691 | * pending characters. |
692 | * Locking: called with port lock held and IRQs disabled. |
693 | * Returns: |
694 | * false if we want to use PIO to transmit |
695 | * true if we queued a DMA buffer |
696 | */ |
697 | static bool pl011_dma_tx_irq(struct uart_amba_port *uap) |
698 | { |
699 | if (!uap->using_tx_dma) |
700 | return false; |
701 | |
702 | /* |
703 | * If we already have a TX buffer queued, but received a |
704 | * TX interrupt, it will be because we've just sent an X-char. |
705 | * Ensure the TX DMA is enabled and the TX IRQ is disabled. |
706 | */ |
707 | if (uap->dmatx.queued) { |
708 | uap->dmacr |= UART011_TXDMAE; |
709 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
710 | uap->im &= ~UART011_TXIM; |
711 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
712 | return true; |
713 | } |
714 | |
715 | /* |
716 | * We don't have a TX buffer queued, so try to queue one. |
717 | * If we successfully queued a buffer, mask the TX IRQ. |
718 | */ |
719 | if (pl011_dma_tx_refill(uap) > 0) { |
720 | uap->im &= ~UART011_TXIM; |
721 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
722 | return true; |
723 | } |
724 | return false; |
725 | } |
726 | |
727 | /* |
728 | * Stop the DMA transmit (eg, due to received XOFF). |
729 | * Locking: called with port lock held and IRQs disabled. |
730 | */ |
731 | static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) |
732 | { |
733 | if (uap->dmatx.queued) { |
734 | uap->dmacr &= ~UART011_TXDMAE; |
735 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
736 | } |
737 | } |
738 | |
739 | /* |
740 | * Try to start a DMA transmit, or in the case of an XON/OFF |
741 | * character queued for send, try to get that character out ASAP. |
742 | * Locking: called with port lock held and IRQs disabled. |
743 | * Returns: |
744 | * false if we want the TX IRQ to be enabled |
745 | * true if we have a buffer queued |
746 | */ |
747 | static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) |
748 | { |
749 | u16 dmacr; |
750 | |
751 | if (!uap->using_tx_dma) |
752 | return false; |
753 | |
754 | if (!uap->port.x_char) { |
755 | /* no X-char, try to push chars out in DMA mode */ |
756 | bool ret = true; |
757 | |
758 | if (!uap->dmatx.queued) { |
759 | if (pl011_dma_tx_refill(uap) > 0) { |
760 | uap->im &= ~UART011_TXIM; |
761 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
762 | } else |
763 | ret = false; |
764 | } else if (!(uap->dmacr & UART011_TXDMAE)) { |
765 | uap->dmacr |= UART011_TXDMAE; |
766 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
767 | } |
768 | return ret; |
769 | } |
770 | |
771 | /* |
772 | * We have an X-char to send. Disable DMA to prevent it loading |
773 | * the TX fifo, and then see if we can stuff it into the FIFO. |
774 | */ |
775 | dmacr = uap->dmacr; |
776 | uap->dmacr &= ~UART011_TXDMAE; |
777 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
778 | |
779 | if (pl011_read(uap, reg: REG_FR) & UART01x_FR_TXFF) { |
780 | /* |
781 | * No space in the FIFO, so enable the transmit interrupt |
782 | * so we know when there is space. Note that once we've |
783 | * loaded the character, we should just re-enable DMA. |
784 | */ |
785 | return false; |
786 | } |
787 | |
788 | pl011_write(val: uap->port.x_char, uap, reg: REG_DR); |
789 | uap->port.icount.tx++; |
790 | uap->port.x_char = 0; |
791 | |
792 | /* Success - restore the DMA state */ |
793 | uap->dmacr = dmacr; |
794 | pl011_write(val: dmacr, uap, reg: REG_DMACR); |
795 | |
796 | return true; |
797 | } |
798 | |
799 | /* |
800 | * Flush the transmit buffer. |
801 | * Locking: called with port lock held and IRQs disabled. |
802 | */ |
803 | static void pl011_dma_flush_buffer(struct uart_port *port) |
804 | __releases(&uap->port.lock) |
805 | __acquires(&uap->port.lock) |
806 | { |
807 | struct uart_amba_port *uap = |
808 | container_of(port, struct uart_amba_port, port); |
809 | |
810 | if (!uap->using_tx_dma) |
811 | return; |
812 | |
813 | dmaengine_terminate_async(chan: uap->dmatx.chan); |
814 | |
815 | if (uap->dmatx.queued) { |
816 | dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, |
817 | DMA_TO_DEVICE); |
818 | uap->dmatx.queued = false; |
819 | uap->dmacr &= ~UART011_TXDMAE; |
820 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
821 | } |
822 | } |
823 | |
824 | static void pl011_dma_rx_callback(void *data); |
825 | |
826 | static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) |
827 | { |
828 | struct dma_chan *rxchan = uap->dmarx.chan; |
829 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
830 | struct dma_async_tx_descriptor *desc; |
831 | struct pl011_sgbuf *sgbuf; |
832 | |
833 | if (!rxchan) |
834 | return -EIO; |
835 | |
836 | /* Start the RX DMA job */ |
837 | sgbuf = uap->dmarx.use_buf_b ? |
838 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; |
839 | desc = dmaengine_prep_slave_sg(chan: rxchan, sgl: &sgbuf->sg, sg_len: 1, |
840 | dir: DMA_DEV_TO_MEM, |
841 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
842 | /* |
843 | * If the DMA engine is busy and cannot prepare a |
844 | * channel, no big deal, the driver will fall back |
845 | * to interrupt mode as a result of this error code. |
846 | */ |
847 | if (!desc) { |
848 | uap->dmarx.running = false; |
849 | dmaengine_terminate_all(chan: rxchan); |
850 | return -EBUSY; |
851 | } |
852 | |
853 | /* Some data to go along to the callback */ |
854 | desc->callback = pl011_dma_rx_callback; |
855 | desc->callback_param = uap; |
856 | dmarx->cookie = dmaengine_submit(desc); |
857 | dma_async_issue_pending(chan: rxchan); |
858 | |
859 | uap->dmacr |= UART011_RXDMAE; |
860 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
861 | uap->dmarx.running = true; |
862 | |
863 | uap->im &= ~UART011_RXIM; |
864 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
865 | |
866 | return 0; |
867 | } |
868 | |
869 | /* |
870 | * This is called when either the DMA job is complete, or |
871 | * the FIFO timeout interrupt occurred. This must be called |
872 | * with the port spinlock uap->port.lock held. |
873 | */ |
874 | static void pl011_dma_rx_chars(struct uart_amba_port *uap, |
875 | u32 pending, bool use_buf_b, |
876 | bool readfifo) |
877 | { |
878 | struct tty_port *port = &uap->port.state->port; |
879 | struct pl011_sgbuf *sgbuf = use_buf_b ? |
880 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; |
881 | int dma_count = 0; |
882 | u32 fifotaken = 0; /* only used for vdbg() */ |
883 | |
884 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
885 | int dmataken = 0; |
886 | |
887 | if (uap->dmarx.poll_rate) { |
888 | /* The data can be taken by polling */ |
889 | dmataken = sgbuf->sg.length - dmarx->last_residue; |
890 | /* Recalculate the pending size */ |
891 | if (pending >= dmataken) |
892 | pending -= dmataken; |
893 | } |
894 | |
895 | /* Pick the remain data from the DMA */ |
896 | if (pending) { |
897 | |
898 | /* |
899 | * First take all chars in the DMA pipe, then look in the FIFO. |
900 | * Note that tty_insert_flip_buf() tries to take as many chars |
901 | * as it can. |
902 | */ |
903 | dma_count = tty_insert_flip_string(port, chars: sgbuf->buf + dmataken, |
904 | size: pending); |
905 | |
906 | uap->port.icount.rx += dma_count; |
907 | if (dma_count < pending) |
908 | dev_warn(uap->port.dev, |
909 | "couldn't insert all characters (TTY is full?)\n" ); |
910 | } |
911 | |
912 | /* Reset the last_residue for Rx DMA poll */ |
913 | if (uap->dmarx.poll_rate) |
914 | dmarx->last_residue = sgbuf->sg.length; |
915 | |
916 | /* |
917 | * Only continue with trying to read the FIFO if all DMA chars have |
918 | * been taken first. |
919 | */ |
920 | if (dma_count == pending && readfifo) { |
921 | /* Clear any error flags */ |
922 | pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | |
923 | UART011_FEIS, uap, reg: REG_ICR); |
924 | |
925 | /* |
926 | * If we read all the DMA'd characters, and we had an |
927 | * incomplete buffer, that could be due to an rx error, or |
928 | * maybe we just timed out. Read any pending chars and check |
929 | * the error status. |
930 | * |
931 | * Error conditions will only occur in the FIFO, these will |
932 | * trigger an immediate interrupt and stop the DMA job, so we |
933 | * will always find the error in the FIFO, never in the DMA |
934 | * buffer. |
935 | */ |
936 | fifotaken = pl011_fifo_to_tty(uap); |
937 | } |
938 | |
939 | dev_vdbg(uap->port.dev, |
940 | "Took %d chars from DMA buffer and %d chars from the FIFO\n" , |
941 | dma_count, fifotaken); |
942 | tty_flip_buffer_push(port); |
943 | } |
944 | |
945 | static void pl011_dma_rx_irq(struct uart_amba_port *uap) |
946 | { |
947 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
948 | struct dma_chan *rxchan = dmarx->chan; |
949 | struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? |
950 | &dmarx->sgbuf_b : &dmarx->sgbuf_a; |
951 | size_t pending; |
952 | struct dma_tx_state state; |
953 | enum dma_status dmastat; |
954 | |
955 | /* |
956 | * Pause the transfer so we can trust the current counter, |
957 | * do this before we pause the PL011 block, else we may |
958 | * overflow the FIFO. |
959 | */ |
960 | if (dmaengine_pause(chan: rxchan)) |
961 | dev_err(uap->port.dev, "unable to pause DMA transfer\n" ); |
962 | dmastat = rxchan->device->device_tx_status(rxchan, |
963 | dmarx->cookie, &state); |
964 | if (dmastat != DMA_PAUSED) |
965 | dev_err(uap->port.dev, "unable to pause DMA transfer\n" ); |
966 | |
967 | /* Disable RX DMA - incoming data will wait in the FIFO */ |
968 | uap->dmacr &= ~UART011_RXDMAE; |
969 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
970 | uap->dmarx.running = false; |
971 | |
972 | pending = sgbuf->sg.length - state.residue; |
973 | BUG_ON(pending > PL011_DMA_BUFFER_SIZE); |
974 | /* Then we terminate the transfer - we now know our residue */ |
975 | dmaengine_terminate_all(chan: rxchan); |
976 | |
977 | /* |
978 | * This will take the chars we have so far and insert |
979 | * into the framework. |
980 | */ |
981 | pl011_dma_rx_chars(uap, pending, use_buf_b: dmarx->use_buf_b, readfifo: true); |
982 | |
983 | /* Switch buffer & re-trigger DMA job */ |
984 | dmarx->use_buf_b = !dmarx->use_buf_b; |
985 | if (pl011_dma_rx_trigger_dma(uap)) { |
986 | dev_dbg(uap->port.dev, "could not retrigger RX DMA job " |
987 | "fall back to interrupt mode\n" ); |
988 | uap->im |= UART011_RXIM; |
989 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
990 | } |
991 | } |
992 | |
993 | static void pl011_dma_rx_callback(void *data) |
994 | { |
995 | struct uart_amba_port *uap = data; |
996 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
997 | struct dma_chan *rxchan = dmarx->chan; |
998 | bool lastbuf = dmarx->use_buf_b; |
999 | struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? |
1000 | &dmarx->sgbuf_b : &dmarx->sgbuf_a; |
1001 | size_t pending; |
1002 | struct dma_tx_state state; |
1003 | int ret; |
1004 | |
1005 | /* |
1006 | * This completion interrupt occurs typically when the |
1007 | * RX buffer is totally stuffed but no timeout has yet |
1008 | * occurred. When that happens, we just want the RX |
1009 | * routine to flush out the secondary DMA buffer while |
1010 | * we immediately trigger the next DMA job. |
1011 | */ |
1012 | uart_port_lock_irq(up: &uap->port); |
1013 | /* |
1014 | * Rx data can be taken by the UART interrupts during |
1015 | * the DMA irq handler. So we check the residue here. |
1016 | */ |
1017 | rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); |
1018 | pending = sgbuf->sg.length - state.residue; |
1019 | BUG_ON(pending > PL011_DMA_BUFFER_SIZE); |
1020 | /* Then we terminate the transfer - we now know our residue */ |
1021 | dmaengine_terminate_all(chan: rxchan); |
1022 | |
1023 | uap->dmarx.running = false; |
1024 | dmarx->use_buf_b = !lastbuf; |
1025 | ret = pl011_dma_rx_trigger_dma(uap); |
1026 | |
1027 | pl011_dma_rx_chars(uap, pending, use_buf_b: lastbuf, readfifo: false); |
1028 | uart_port_unlock_irq(up: &uap->port); |
1029 | /* |
1030 | * Do this check after we picked the DMA chars so we don't |
1031 | * get some IRQ immediately from RX. |
1032 | */ |
1033 | if (ret) { |
1034 | dev_dbg(uap->port.dev, "could not retrigger RX DMA job " |
1035 | "fall back to interrupt mode\n" ); |
1036 | uap->im |= UART011_RXIM; |
1037 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1038 | } |
1039 | } |
1040 | |
1041 | /* |
1042 | * Stop accepting received characters, when we're shutting down or |
1043 | * suspending this port. |
1044 | * Locking: called with port lock held and IRQs disabled. |
1045 | */ |
1046 | static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) |
1047 | { |
1048 | if (!uap->using_rx_dma) |
1049 | return; |
1050 | |
1051 | /* FIXME. Just disable the DMA enable */ |
1052 | uap->dmacr &= ~UART011_RXDMAE; |
1053 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
1054 | } |
1055 | |
1056 | /* |
1057 | * Timer handler for Rx DMA polling. |
1058 | * Every polling, It checks the residue in the dma buffer and transfer |
1059 | * data to the tty. Also, last_residue is updated for the next polling. |
1060 | */ |
1061 | static void pl011_dma_rx_poll(struct timer_list *t) |
1062 | { |
1063 | struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer); |
1064 | struct tty_port *port = &uap->port.state->port; |
1065 | struct pl011_dmarx_data *dmarx = &uap->dmarx; |
1066 | struct dma_chan *rxchan = uap->dmarx.chan; |
1067 | unsigned long flags; |
1068 | unsigned int dmataken = 0; |
1069 | unsigned int size = 0; |
1070 | struct pl011_sgbuf *sgbuf; |
1071 | int dma_count; |
1072 | struct dma_tx_state state; |
1073 | |
1074 | sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; |
1075 | rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); |
1076 | if (likely(state.residue < dmarx->last_residue)) { |
1077 | dmataken = sgbuf->sg.length - dmarx->last_residue; |
1078 | size = dmarx->last_residue - state.residue; |
1079 | dma_count = tty_insert_flip_string(port, chars: sgbuf->buf + dmataken, |
1080 | size); |
1081 | if (dma_count == size) |
1082 | dmarx->last_residue = state.residue; |
1083 | dmarx->last_jiffies = jiffies; |
1084 | } |
1085 | tty_flip_buffer_push(port); |
1086 | |
1087 | /* |
1088 | * If no data is received in poll_timeout, the driver will fall back |
1089 | * to interrupt mode. We will retrigger DMA at the first interrupt. |
1090 | */ |
1091 | if (jiffies_to_msecs(j: jiffies - dmarx->last_jiffies) |
1092 | > uap->dmarx.poll_timeout) { |
1093 | |
1094 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1095 | pl011_dma_rx_stop(uap); |
1096 | uap->im |= UART011_RXIM; |
1097 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1098 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1099 | |
1100 | uap->dmarx.running = false; |
1101 | dmaengine_terminate_all(chan: rxchan); |
1102 | del_timer(timer: &uap->dmarx.timer); |
1103 | } else { |
1104 | mod_timer(timer: &uap->dmarx.timer, |
1105 | expires: jiffies + msecs_to_jiffies(m: uap->dmarx.poll_rate)); |
1106 | } |
1107 | } |
1108 | |
1109 | static void pl011_dma_startup(struct uart_amba_port *uap) |
1110 | { |
1111 | int ret; |
1112 | |
1113 | if (!uap->dma_probed) |
1114 | pl011_dma_probe(uap); |
1115 | |
1116 | if (!uap->dmatx.chan) |
1117 | return; |
1118 | |
1119 | uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); |
1120 | if (!uap->dmatx.buf) { |
1121 | dev_err(uap->port.dev, "no memory for DMA TX buffer\n" ); |
1122 | uap->port.fifosize = uap->fifosize; |
1123 | return; |
1124 | } |
1125 | |
1126 | sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); |
1127 | |
1128 | /* The DMA buffer is now the FIFO the TTY subsystem can use */ |
1129 | uap->port.fifosize = PL011_DMA_BUFFER_SIZE; |
1130 | uap->using_tx_dma = true; |
1131 | |
1132 | if (!uap->dmarx.chan) |
1133 | goto skip_rx; |
1134 | |
1135 | /* Allocate and map DMA RX buffers */ |
1136 | ret = pl011_sgbuf_init(chan: uap->dmarx.chan, sg: &uap->dmarx.sgbuf_a, |
1137 | dir: DMA_FROM_DEVICE); |
1138 | if (ret) { |
1139 | dev_err(uap->port.dev, "failed to init DMA %s: %d\n" , |
1140 | "RX buffer A" , ret); |
1141 | goto skip_rx; |
1142 | } |
1143 | |
1144 | ret = pl011_sgbuf_init(chan: uap->dmarx.chan, sg: &uap->dmarx.sgbuf_b, |
1145 | dir: DMA_FROM_DEVICE); |
1146 | if (ret) { |
1147 | dev_err(uap->port.dev, "failed to init DMA %s: %d\n" , |
1148 | "RX buffer B" , ret); |
1149 | pl011_sgbuf_free(chan: uap->dmarx.chan, sg: &uap->dmarx.sgbuf_a, |
1150 | dir: DMA_FROM_DEVICE); |
1151 | goto skip_rx; |
1152 | } |
1153 | |
1154 | uap->using_rx_dma = true; |
1155 | |
1156 | skip_rx: |
1157 | /* Turn on DMA error (RX/TX will be enabled on demand) */ |
1158 | uap->dmacr |= UART011_DMAONERR; |
1159 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
1160 | |
1161 | /* |
1162 | * ST Micro variants has some specific dma burst threshold |
1163 | * compensation. Set this to 16 bytes, so burst will only |
1164 | * be issued above/below 16 bytes. |
1165 | */ |
1166 | if (uap->vendor->dma_threshold) |
1167 | pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, |
1168 | uap, reg: REG_ST_DMAWM); |
1169 | |
1170 | if (uap->using_rx_dma) { |
1171 | if (pl011_dma_rx_trigger_dma(uap)) |
1172 | dev_dbg(uap->port.dev, "could not trigger initial " |
1173 | "RX DMA job, fall back to interrupt mode\n" ); |
1174 | if (uap->dmarx.poll_rate) { |
1175 | timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); |
1176 | mod_timer(timer: &uap->dmarx.timer, |
1177 | expires: jiffies + |
1178 | msecs_to_jiffies(m: uap->dmarx.poll_rate)); |
1179 | uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; |
1180 | uap->dmarx.last_jiffies = jiffies; |
1181 | } |
1182 | } |
1183 | } |
1184 | |
1185 | static void pl011_dma_shutdown(struct uart_amba_port *uap) |
1186 | { |
1187 | if (!(uap->using_tx_dma || uap->using_rx_dma)) |
1188 | return; |
1189 | |
1190 | /* Disable RX and TX DMA */ |
1191 | while (pl011_read(uap, reg: REG_FR) & uap->vendor->fr_busy) |
1192 | cpu_relax(); |
1193 | |
1194 | uart_port_lock_irq(up: &uap->port); |
1195 | uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); |
1196 | pl011_write(val: uap->dmacr, uap, reg: REG_DMACR); |
1197 | uart_port_unlock_irq(up: &uap->port); |
1198 | |
1199 | if (uap->using_tx_dma) { |
1200 | /* In theory, this should already be done by pl011_dma_flush_buffer */ |
1201 | dmaengine_terminate_all(chan: uap->dmatx.chan); |
1202 | if (uap->dmatx.queued) { |
1203 | dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, |
1204 | DMA_TO_DEVICE); |
1205 | uap->dmatx.queued = false; |
1206 | } |
1207 | |
1208 | kfree(objp: uap->dmatx.buf); |
1209 | uap->using_tx_dma = false; |
1210 | } |
1211 | |
1212 | if (uap->using_rx_dma) { |
1213 | dmaengine_terminate_all(chan: uap->dmarx.chan); |
1214 | /* Clean up the RX DMA */ |
1215 | pl011_sgbuf_free(chan: uap->dmarx.chan, sg: &uap->dmarx.sgbuf_a, dir: DMA_FROM_DEVICE); |
1216 | pl011_sgbuf_free(chan: uap->dmarx.chan, sg: &uap->dmarx.sgbuf_b, dir: DMA_FROM_DEVICE); |
1217 | if (uap->dmarx.poll_rate) |
1218 | del_timer_sync(timer: &uap->dmarx.timer); |
1219 | uap->using_rx_dma = false; |
1220 | } |
1221 | } |
1222 | |
1223 | static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) |
1224 | { |
1225 | return uap->using_rx_dma; |
1226 | } |
1227 | |
1228 | static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) |
1229 | { |
1230 | return uap->using_rx_dma && uap->dmarx.running; |
1231 | } |
1232 | |
1233 | #else |
1234 | /* Blank functions if the DMA engine is not available */ |
1235 | static inline void pl011_dma_remove(struct uart_amba_port *uap) |
1236 | { |
1237 | } |
1238 | |
1239 | static inline void pl011_dma_startup(struct uart_amba_port *uap) |
1240 | { |
1241 | } |
1242 | |
1243 | static inline void pl011_dma_shutdown(struct uart_amba_port *uap) |
1244 | { |
1245 | } |
1246 | |
1247 | static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) |
1248 | { |
1249 | return false; |
1250 | } |
1251 | |
1252 | static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) |
1253 | { |
1254 | } |
1255 | |
1256 | static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) |
1257 | { |
1258 | return false; |
1259 | } |
1260 | |
1261 | static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) |
1262 | { |
1263 | } |
1264 | |
1265 | static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) |
1266 | { |
1267 | } |
1268 | |
1269 | static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) |
1270 | { |
1271 | return -EIO; |
1272 | } |
1273 | |
1274 | static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) |
1275 | { |
1276 | return false; |
1277 | } |
1278 | |
1279 | static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) |
1280 | { |
1281 | return false; |
1282 | } |
1283 | |
1284 | #define pl011_dma_flush_buffer NULL |
1285 | #endif |
1286 | |
1287 | static void pl011_rs485_tx_stop(struct uart_amba_port *uap) |
1288 | { |
1289 | /* |
1290 | * To be on the safe side only time out after twice as many iterations |
1291 | * as fifo size. |
1292 | */ |
1293 | const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2; |
1294 | struct uart_port *port = &uap->port; |
1295 | int i = 0; |
1296 | u32 cr; |
1297 | |
1298 | /* Wait until hardware tx queue is empty */ |
1299 | while (!pl011_tx_empty(port)) { |
1300 | if (i > MAX_TX_DRAIN_ITERS) { |
1301 | dev_warn(port->dev, |
1302 | "timeout while draining hardware tx queue\n" ); |
1303 | break; |
1304 | } |
1305 | |
1306 | udelay(uap->rs485_tx_drain_interval); |
1307 | i++; |
1308 | } |
1309 | |
1310 | if (port->rs485.delay_rts_after_send) |
1311 | mdelay(port->rs485.delay_rts_after_send); |
1312 | |
1313 | cr = pl011_read(uap, reg: REG_CR); |
1314 | |
1315 | if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) |
1316 | cr &= ~UART011_CR_RTS; |
1317 | else |
1318 | cr |= UART011_CR_RTS; |
1319 | |
1320 | /* Disable the transmitter and reenable the transceiver */ |
1321 | cr &= ~UART011_CR_TXE; |
1322 | cr |= UART011_CR_RXE; |
1323 | pl011_write(val: cr, uap, reg: REG_CR); |
1324 | |
1325 | uap->rs485_tx_started = false; |
1326 | } |
1327 | |
1328 | static void pl011_stop_tx(struct uart_port *port) |
1329 | { |
1330 | struct uart_amba_port *uap = |
1331 | container_of(port, struct uart_amba_port, port); |
1332 | |
1333 | uap->im &= ~UART011_TXIM; |
1334 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1335 | pl011_dma_tx_stop(uap); |
1336 | |
1337 | if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) |
1338 | pl011_rs485_tx_stop(uap); |
1339 | } |
1340 | |
1341 | static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); |
1342 | |
1343 | /* Start TX with programmed I/O only (no DMA) */ |
1344 | static void pl011_start_tx_pio(struct uart_amba_port *uap) |
1345 | { |
1346 | if (pl011_tx_chars(uap, from_irq: false)) { |
1347 | uap->im |= UART011_TXIM; |
1348 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1349 | } |
1350 | } |
1351 | |
1352 | static void pl011_start_tx(struct uart_port *port) |
1353 | { |
1354 | struct uart_amba_port *uap = |
1355 | container_of(port, struct uart_amba_port, port); |
1356 | |
1357 | if (!pl011_dma_tx_start(uap)) |
1358 | pl011_start_tx_pio(uap); |
1359 | } |
1360 | |
1361 | static void pl011_stop_rx(struct uart_port *port) |
1362 | { |
1363 | struct uart_amba_port *uap = |
1364 | container_of(port, struct uart_amba_port, port); |
1365 | |
1366 | uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| |
1367 | UART011_PEIM|UART011_BEIM|UART011_OEIM); |
1368 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1369 | |
1370 | pl011_dma_rx_stop(uap); |
1371 | } |
1372 | |
1373 | static void pl011_throttle_rx(struct uart_port *port) |
1374 | { |
1375 | unsigned long flags; |
1376 | |
1377 | uart_port_lock_irqsave(up: port, flags: &flags); |
1378 | pl011_stop_rx(port); |
1379 | uart_port_unlock_irqrestore(up: port, flags); |
1380 | } |
1381 | |
1382 | static void pl011_enable_ms(struct uart_port *port) |
1383 | { |
1384 | struct uart_amba_port *uap = |
1385 | container_of(port, struct uart_amba_port, port); |
1386 | |
1387 | uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; |
1388 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1389 | } |
1390 | |
1391 | static void pl011_rx_chars(struct uart_amba_port *uap) |
1392 | __releases(&uap->port.lock) |
1393 | __acquires(&uap->port.lock) |
1394 | { |
1395 | pl011_fifo_to_tty(uap); |
1396 | |
1397 | uart_port_unlock(up: &uap->port); |
1398 | tty_flip_buffer_push(port: &uap->port.state->port); |
1399 | /* |
1400 | * If we were temporarily out of DMA mode for a while, |
1401 | * attempt to switch back to DMA mode again. |
1402 | */ |
1403 | if (pl011_dma_rx_available(uap)) { |
1404 | if (pl011_dma_rx_trigger_dma(uap)) { |
1405 | dev_dbg(uap->port.dev, "could not trigger RX DMA job " |
1406 | "fall back to interrupt mode again\n" ); |
1407 | uap->im |= UART011_RXIM; |
1408 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1409 | } else { |
1410 | #ifdef CONFIG_DMA_ENGINE |
1411 | /* Start Rx DMA poll */ |
1412 | if (uap->dmarx.poll_rate) { |
1413 | uap->dmarx.last_jiffies = jiffies; |
1414 | uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; |
1415 | mod_timer(timer: &uap->dmarx.timer, |
1416 | expires: jiffies + |
1417 | msecs_to_jiffies(m: uap->dmarx.poll_rate)); |
1418 | } |
1419 | #endif |
1420 | } |
1421 | } |
1422 | uart_port_lock(up: &uap->port); |
1423 | } |
1424 | |
1425 | static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, |
1426 | bool from_irq) |
1427 | { |
1428 | if (unlikely(!from_irq) && |
1429 | pl011_read(uap, reg: REG_FR) & UART01x_FR_TXFF) |
1430 | return false; /* unable to transmit character */ |
1431 | |
1432 | pl011_write(val: c, uap, reg: REG_DR); |
1433 | uap->port.icount.tx++; |
1434 | |
1435 | return true; |
1436 | } |
1437 | |
1438 | static void pl011_rs485_tx_start(struct uart_amba_port *uap) |
1439 | { |
1440 | struct uart_port *port = &uap->port; |
1441 | u32 cr; |
1442 | |
1443 | /* Enable transmitter */ |
1444 | cr = pl011_read(uap, reg: REG_CR); |
1445 | cr |= UART011_CR_TXE; |
1446 | |
1447 | /* Disable receiver if half-duplex */ |
1448 | if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) |
1449 | cr &= ~UART011_CR_RXE; |
1450 | |
1451 | if (port->rs485.flags & SER_RS485_RTS_ON_SEND) |
1452 | cr &= ~UART011_CR_RTS; |
1453 | else |
1454 | cr |= UART011_CR_RTS; |
1455 | |
1456 | pl011_write(val: cr, uap, reg: REG_CR); |
1457 | |
1458 | if (port->rs485.delay_rts_before_send) |
1459 | mdelay(port->rs485.delay_rts_before_send); |
1460 | |
1461 | uap->rs485_tx_started = true; |
1462 | } |
1463 | |
1464 | /* Returns true if tx interrupts have to be (kept) enabled */ |
1465 | static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) |
1466 | { |
1467 | struct circ_buf *xmit = &uap->port.state->xmit; |
1468 | int count = uap->fifosize >> 1; |
1469 | |
1470 | if ((uap->port.rs485.flags & SER_RS485_ENABLED) && |
1471 | !uap->rs485_tx_started) |
1472 | pl011_rs485_tx_start(uap); |
1473 | |
1474 | if (uap->port.x_char) { |
1475 | if (!pl011_tx_char(uap, c: uap->port.x_char, from_irq)) |
1476 | return true; |
1477 | uap->port.x_char = 0; |
1478 | --count; |
1479 | } |
1480 | if (uart_circ_empty(xmit) || uart_tx_stopped(port: &uap->port)) { |
1481 | pl011_stop_tx(port: &uap->port); |
1482 | return false; |
1483 | } |
1484 | |
1485 | /* If we are using DMA mode, try to send some characters. */ |
1486 | if (pl011_dma_tx_irq(uap)) |
1487 | return true; |
1488 | |
1489 | do { |
1490 | if (likely(from_irq) && count-- == 0) |
1491 | break; |
1492 | |
1493 | if (!pl011_tx_char(uap, c: xmit->buf[xmit->tail], from_irq)) |
1494 | break; |
1495 | |
1496 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
1497 | } while (!uart_circ_empty(xmit)); |
1498 | |
1499 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
1500 | uart_write_wakeup(port: &uap->port); |
1501 | |
1502 | if (uart_circ_empty(xmit)) { |
1503 | pl011_stop_tx(port: &uap->port); |
1504 | return false; |
1505 | } |
1506 | return true; |
1507 | } |
1508 | |
1509 | static void pl011_modem_status(struct uart_amba_port *uap) |
1510 | { |
1511 | unsigned int status, delta; |
1512 | |
1513 | status = pl011_read(uap, reg: REG_FR) & UART01x_FR_MODEM_ANY; |
1514 | |
1515 | delta = status ^ uap->old_status; |
1516 | uap->old_status = status; |
1517 | |
1518 | if (!delta) |
1519 | return; |
1520 | |
1521 | if (delta & UART01x_FR_DCD) |
1522 | uart_handle_dcd_change(uport: &uap->port, active: status & UART01x_FR_DCD); |
1523 | |
1524 | if (delta & uap->vendor->fr_dsr) |
1525 | uap->port.icount.dsr++; |
1526 | |
1527 | if (delta & uap->vendor->fr_cts) |
1528 | uart_handle_cts_change(uport: &uap->port, |
1529 | active: status & uap->vendor->fr_cts); |
1530 | |
1531 | wake_up_interruptible(&uap->port.state->port.delta_msr_wait); |
1532 | } |
1533 | |
1534 | static void check_apply_cts_event_workaround(struct uart_amba_port *uap) |
1535 | { |
1536 | if (!uap->vendor->cts_event_workaround) |
1537 | return; |
1538 | |
1539 | /* workaround to make sure that all bits are unlocked.. */ |
1540 | pl011_write(val: 0x00, uap, reg: REG_ICR); |
1541 | |
1542 | /* |
1543 | * WA: introduce 26ns(1 uart clk) delay before W1C; |
1544 | * single apb access will incur 2 pclk(133.12Mhz) delay, |
1545 | * so add 2 dummy reads |
1546 | */ |
1547 | pl011_read(uap, reg: REG_ICR); |
1548 | pl011_read(uap, reg: REG_ICR); |
1549 | } |
1550 | |
1551 | static irqreturn_t pl011_int(int irq, void *dev_id) |
1552 | { |
1553 | struct uart_amba_port *uap = dev_id; |
1554 | unsigned long flags; |
1555 | unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; |
1556 | int handled = 0; |
1557 | |
1558 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1559 | status = pl011_read(uap, reg: REG_RIS) & uap->im; |
1560 | if (status) { |
1561 | do { |
1562 | check_apply_cts_event_workaround(uap); |
1563 | |
1564 | pl011_write(val: status & ~(UART011_TXIS|UART011_RTIS| |
1565 | UART011_RXIS), |
1566 | uap, reg: REG_ICR); |
1567 | |
1568 | if (status & (UART011_RTIS|UART011_RXIS)) { |
1569 | if (pl011_dma_rx_running(uap)) |
1570 | pl011_dma_rx_irq(uap); |
1571 | else |
1572 | pl011_rx_chars(uap); |
1573 | } |
1574 | if (status & (UART011_DSRMIS|UART011_DCDMIS| |
1575 | UART011_CTSMIS|UART011_RIMIS)) |
1576 | pl011_modem_status(uap); |
1577 | if (status & UART011_TXIS) |
1578 | pl011_tx_chars(uap, from_irq: true); |
1579 | |
1580 | if (pass_counter-- == 0) |
1581 | break; |
1582 | |
1583 | status = pl011_read(uap, reg: REG_RIS) & uap->im; |
1584 | } while (status != 0); |
1585 | handled = 1; |
1586 | } |
1587 | |
1588 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1589 | |
1590 | return IRQ_RETVAL(handled); |
1591 | } |
1592 | |
1593 | static unsigned int pl011_tx_empty(struct uart_port *port) |
1594 | { |
1595 | struct uart_amba_port *uap = |
1596 | container_of(port, struct uart_amba_port, port); |
1597 | |
1598 | /* Allow feature register bits to be inverted to work around errata */ |
1599 | unsigned int status = pl011_read(uap, reg: REG_FR) ^ uap->vendor->inv_fr; |
1600 | |
1601 | return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? |
1602 | 0 : TIOCSER_TEMT; |
1603 | } |
1604 | |
1605 | static unsigned int pl011_get_mctrl(struct uart_port *port) |
1606 | { |
1607 | struct uart_amba_port *uap = |
1608 | container_of(port, struct uart_amba_port, port); |
1609 | unsigned int result = 0; |
1610 | unsigned int status = pl011_read(uap, reg: REG_FR); |
1611 | |
1612 | #define TIOCMBIT(uartbit, tiocmbit) \ |
1613 | if (status & uartbit) \ |
1614 | result |= tiocmbit |
1615 | |
1616 | TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR); |
1617 | TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR); |
1618 | TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS); |
1619 | TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG); |
1620 | #undef TIOCMBIT |
1621 | return result; |
1622 | } |
1623 | |
1624 | static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) |
1625 | { |
1626 | struct uart_amba_port *uap = |
1627 | container_of(port, struct uart_amba_port, port); |
1628 | unsigned int cr; |
1629 | |
1630 | cr = pl011_read(uap, reg: REG_CR); |
1631 | |
1632 | #define TIOCMBIT(tiocmbit, uartbit) \ |
1633 | if (mctrl & tiocmbit) \ |
1634 | cr |= uartbit; \ |
1635 | else \ |
1636 | cr &= ~uartbit |
1637 | |
1638 | TIOCMBIT(TIOCM_RTS, UART011_CR_RTS); |
1639 | TIOCMBIT(TIOCM_DTR, UART011_CR_DTR); |
1640 | TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1); |
1641 | TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2); |
1642 | TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE); |
1643 | |
1644 | if (port->status & UPSTAT_AUTORTS) { |
1645 | /* We need to disable auto-RTS if we want to turn RTS off */ |
1646 | TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN); |
1647 | } |
1648 | #undef TIOCMBIT |
1649 | |
1650 | pl011_write(val: cr, uap, reg: REG_CR); |
1651 | } |
1652 | |
1653 | static void pl011_break_ctl(struct uart_port *port, int break_state) |
1654 | { |
1655 | struct uart_amba_port *uap = |
1656 | container_of(port, struct uart_amba_port, port); |
1657 | unsigned long flags; |
1658 | unsigned int lcr_h; |
1659 | |
1660 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1661 | lcr_h = pl011_read(uap, reg: REG_LCRH_TX); |
1662 | if (break_state == -1) |
1663 | lcr_h |= UART01x_LCRH_BRK; |
1664 | else |
1665 | lcr_h &= ~UART01x_LCRH_BRK; |
1666 | pl011_write(val: lcr_h, uap, reg: REG_LCRH_TX); |
1667 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1668 | } |
1669 | |
1670 | #ifdef CONFIG_CONSOLE_POLL |
1671 | |
1672 | static void pl011_quiesce_irqs(struct uart_port *port) |
1673 | { |
1674 | struct uart_amba_port *uap = |
1675 | container_of(port, struct uart_amba_port, port); |
1676 | |
1677 | pl011_write(val: pl011_read(uap, reg: REG_MIS), uap, reg: REG_ICR); |
1678 | /* |
1679 | * There is no way to clear TXIM as this is "ready to transmit IRQ", so |
1680 | * we simply mask it. start_tx() will unmask it. |
1681 | * |
1682 | * Note we can race with start_tx(), and if the race happens, the |
1683 | * polling user might get another interrupt just after we clear it. |
1684 | * But it should be OK and can happen even w/o the race, e.g. |
1685 | * controller immediately got some new data and raised the IRQ. |
1686 | * |
1687 | * And whoever uses polling routines assumes that it manages the device |
1688 | * (including tx queue), so we're also fine with start_tx()'s caller |
1689 | * side. |
1690 | */ |
1691 | pl011_write(val: pl011_read(uap, reg: REG_IMSC) & ~UART011_TXIM, uap, |
1692 | reg: REG_IMSC); |
1693 | } |
1694 | |
1695 | static int pl011_get_poll_char(struct uart_port *port) |
1696 | { |
1697 | struct uart_amba_port *uap = |
1698 | container_of(port, struct uart_amba_port, port); |
1699 | unsigned int status; |
1700 | |
1701 | /* |
1702 | * The caller might need IRQs lowered, e.g. if used with KDB NMI |
1703 | * debugger. |
1704 | */ |
1705 | pl011_quiesce_irqs(port); |
1706 | |
1707 | status = pl011_read(uap, reg: REG_FR); |
1708 | if (status & UART01x_FR_RXFE) |
1709 | return NO_POLL_CHAR; |
1710 | |
1711 | return pl011_read(uap, reg: REG_DR); |
1712 | } |
1713 | |
1714 | static void pl011_put_poll_char(struct uart_port *port, |
1715 | unsigned char ch) |
1716 | { |
1717 | struct uart_amba_port *uap = |
1718 | container_of(port, struct uart_amba_port, port); |
1719 | |
1720 | while (pl011_read(uap, reg: REG_FR) & UART01x_FR_TXFF) |
1721 | cpu_relax(); |
1722 | |
1723 | pl011_write(val: ch, uap, reg: REG_DR); |
1724 | } |
1725 | |
1726 | #endif /* CONFIG_CONSOLE_POLL */ |
1727 | |
1728 | static int pl011_hwinit(struct uart_port *port) |
1729 | { |
1730 | struct uart_amba_port *uap = |
1731 | container_of(port, struct uart_amba_port, port); |
1732 | int retval; |
1733 | |
1734 | /* Optionaly enable pins to be muxed in and configured */ |
1735 | pinctrl_pm_select_default_state(dev: port->dev); |
1736 | |
1737 | /* |
1738 | * Try to enable the clock producer. |
1739 | */ |
1740 | retval = clk_prepare_enable(clk: uap->clk); |
1741 | if (retval) |
1742 | return retval; |
1743 | |
1744 | uap->port.uartclk = clk_get_rate(clk: uap->clk); |
1745 | |
1746 | /* Clear pending error and receive interrupts */ |
1747 | pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS | |
1748 | UART011_FEIS | UART011_RTIS | UART011_RXIS, |
1749 | uap, reg: REG_ICR); |
1750 | |
1751 | /* |
1752 | * Save interrupts enable mask, and enable RX interrupts in case if |
1753 | * the interrupt is used for NMI entry. |
1754 | */ |
1755 | uap->im = pl011_read(uap, reg: REG_IMSC); |
1756 | pl011_write(UART011_RTIM | UART011_RXIM, uap, reg: REG_IMSC); |
1757 | |
1758 | if (dev_get_platdata(dev: uap->port.dev)) { |
1759 | struct amba_pl011_data *plat; |
1760 | |
1761 | plat = dev_get_platdata(dev: uap->port.dev); |
1762 | if (plat->init) |
1763 | plat->init(); |
1764 | } |
1765 | return 0; |
1766 | } |
1767 | |
1768 | static bool pl011_split_lcrh(const struct uart_amba_port *uap) |
1769 | { |
1770 | return pl011_reg_to_offset(uap, reg: REG_LCRH_RX) != |
1771 | pl011_reg_to_offset(uap, reg: REG_LCRH_TX); |
1772 | } |
1773 | |
1774 | static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) |
1775 | { |
1776 | pl011_write(val: lcr_h, uap, reg: REG_LCRH_RX); |
1777 | if (pl011_split_lcrh(uap)) { |
1778 | int i; |
1779 | /* |
1780 | * Wait 10 PCLKs before writing LCRH_TX register, |
1781 | * to get this delay write read only register 10 times |
1782 | */ |
1783 | for (i = 0; i < 10; ++i) |
1784 | pl011_write(val: 0xff, uap, reg: REG_MIS); |
1785 | pl011_write(val: lcr_h, uap, reg: REG_LCRH_TX); |
1786 | } |
1787 | } |
1788 | |
1789 | static int pl011_allocate_irq(struct uart_amba_port *uap) |
1790 | { |
1791 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1792 | |
1793 | return request_irq(irq: uap->port.irq, handler: pl011_int, IRQF_SHARED, name: "uart-pl011" , dev: uap); |
1794 | } |
1795 | |
1796 | /* |
1797 | * Enable interrupts, only timeouts when using DMA |
1798 | * if initial RX DMA job failed, start in interrupt mode |
1799 | * as well. |
1800 | */ |
1801 | static void pl011_enable_interrupts(struct uart_amba_port *uap) |
1802 | { |
1803 | unsigned long flags; |
1804 | unsigned int i; |
1805 | |
1806 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1807 | |
1808 | /* Clear out any spuriously appearing RX interrupts */ |
1809 | pl011_write(UART011_RTIS | UART011_RXIS, uap, reg: REG_ICR); |
1810 | |
1811 | /* |
1812 | * RXIS is asserted only when the RX FIFO transitions from below |
1813 | * to above the trigger threshold. If the RX FIFO is already |
1814 | * full to the threshold this can't happen and RXIS will now be |
1815 | * stuck off. Drain the RX FIFO explicitly to fix this: |
1816 | */ |
1817 | for (i = 0; i < uap->fifosize * 2; ++i) { |
1818 | if (pl011_read(uap, reg: REG_FR) & UART01x_FR_RXFE) |
1819 | break; |
1820 | |
1821 | pl011_read(uap, reg: REG_DR); |
1822 | } |
1823 | |
1824 | uap->im = UART011_RTIM; |
1825 | if (!pl011_dma_rx_running(uap)) |
1826 | uap->im |= UART011_RXIM; |
1827 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1828 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1829 | } |
1830 | |
1831 | static void pl011_unthrottle_rx(struct uart_port *port) |
1832 | { |
1833 | struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); |
1834 | unsigned long flags; |
1835 | |
1836 | uart_port_lock_irqsave(up: &uap->port, flags: &flags); |
1837 | |
1838 | uap->im = UART011_RTIM; |
1839 | if (!pl011_dma_rx_running(uap)) |
1840 | uap->im |= UART011_RXIM; |
1841 | |
1842 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1843 | |
1844 | uart_port_unlock_irqrestore(up: &uap->port, flags); |
1845 | } |
1846 | |
1847 | static int pl011_startup(struct uart_port *port) |
1848 | { |
1849 | struct uart_amba_port *uap = |
1850 | container_of(port, struct uart_amba_port, port); |
1851 | unsigned int cr; |
1852 | int retval; |
1853 | |
1854 | retval = pl011_hwinit(port); |
1855 | if (retval) |
1856 | goto clk_dis; |
1857 | |
1858 | retval = pl011_allocate_irq(uap); |
1859 | if (retval) |
1860 | goto clk_dis; |
1861 | |
1862 | pl011_write(val: uap->vendor->ifls, uap, reg: REG_IFLS); |
1863 | |
1864 | uart_port_lock_irq(up: &uap->port); |
1865 | |
1866 | cr = pl011_read(uap, reg: REG_CR); |
1867 | cr &= UART011_CR_RTS | UART011_CR_DTR; |
1868 | cr |= UART01x_CR_UARTEN | UART011_CR_RXE; |
1869 | |
1870 | if (!(port->rs485.flags & SER_RS485_ENABLED)) |
1871 | cr |= UART011_CR_TXE; |
1872 | |
1873 | pl011_write(val: cr, uap, reg: REG_CR); |
1874 | |
1875 | uart_port_unlock_irq(up: &uap->port); |
1876 | |
1877 | /* |
1878 | * initialise the old status of the modem signals |
1879 | */ |
1880 | uap->old_status = pl011_read(uap, reg: REG_FR) & UART01x_FR_MODEM_ANY; |
1881 | |
1882 | /* Startup DMA */ |
1883 | pl011_dma_startup(uap); |
1884 | |
1885 | pl011_enable_interrupts(uap); |
1886 | |
1887 | return 0; |
1888 | |
1889 | clk_dis: |
1890 | clk_disable_unprepare(clk: uap->clk); |
1891 | return retval; |
1892 | } |
1893 | |
1894 | static int sbsa_uart_startup(struct uart_port *port) |
1895 | { |
1896 | struct uart_amba_port *uap = |
1897 | container_of(port, struct uart_amba_port, port); |
1898 | int retval; |
1899 | |
1900 | retval = pl011_hwinit(port); |
1901 | if (retval) |
1902 | return retval; |
1903 | |
1904 | retval = pl011_allocate_irq(uap); |
1905 | if (retval) |
1906 | return retval; |
1907 | |
1908 | /* The SBSA UART does not support any modem status lines. */ |
1909 | uap->old_status = 0; |
1910 | |
1911 | pl011_enable_interrupts(uap); |
1912 | |
1913 | return 0; |
1914 | } |
1915 | |
1916 | static void pl011_shutdown_channel(struct uart_amba_port *uap, |
1917 | unsigned int lcrh) |
1918 | { |
1919 | unsigned long val; |
1920 | |
1921 | val = pl011_read(uap, reg: lcrh); |
1922 | val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); |
1923 | pl011_write(val, uap, reg: lcrh); |
1924 | } |
1925 | |
1926 | /* |
1927 | * disable the port. It should not disable RTS and DTR. |
1928 | * Also RTS and DTR state should be preserved to restore |
1929 | * it during startup(). |
1930 | */ |
1931 | static void pl011_disable_uart(struct uart_amba_port *uap) |
1932 | { |
1933 | unsigned int cr; |
1934 | |
1935 | uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); |
1936 | uart_port_lock_irq(up: &uap->port); |
1937 | cr = pl011_read(uap, reg: REG_CR); |
1938 | cr &= UART011_CR_RTS | UART011_CR_DTR; |
1939 | cr |= UART01x_CR_UARTEN | UART011_CR_TXE; |
1940 | pl011_write(val: cr, uap, reg: REG_CR); |
1941 | uart_port_unlock_irq(up: &uap->port); |
1942 | |
1943 | /* |
1944 | * disable break condition and fifos |
1945 | */ |
1946 | pl011_shutdown_channel(uap, lcrh: REG_LCRH_RX); |
1947 | if (pl011_split_lcrh(uap)) |
1948 | pl011_shutdown_channel(uap, lcrh: REG_LCRH_TX); |
1949 | } |
1950 | |
1951 | static void pl011_disable_interrupts(struct uart_amba_port *uap) |
1952 | { |
1953 | uart_port_lock_irq(up: &uap->port); |
1954 | |
1955 | /* mask all interrupts and clear all pending ones */ |
1956 | uap->im = 0; |
1957 | pl011_write(val: uap->im, uap, reg: REG_IMSC); |
1958 | pl011_write(val: 0xffff, uap, reg: REG_ICR); |
1959 | |
1960 | uart_port_unlock_irq(up: &uap->port); |
1961 | } |
1962 | |
1963 | static void pl011_shutdown(struct uart_port *port) |
1964 | { |
1965 | struct uart_amba_port *uap = |
1966 | container_of(port, struct uart_amba_port, port); |
1967 | |
1968 | pl011_disable_interrupts(uap); |
1969 | |
1970 | pl011_dma_shutdown(uap); |
1971 | |
1972 | if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) |
1973 | pl011_rs485_tx_stop(uap); |
1974 | |
1975 | free_irq(uap->port.irq, uap); |
1976 | |
1977 | pl011_disable_uart(uap); |
1978 | |
1979 | /* |
1980 | * Shut down the clock producer |
1981 | */ |
1982 | clk_disable_unprepare(clk: uap->clk); |
1983 | /* Optionally let pins go into sleep states */ |
1984 | pinctrl_pm_select_sleep_state(dev: port->dev); |
1985 | |
1986 | if (dev_get_platdata(dev: uap->port.dev)) { |
1987 | struct amba_pl011_data *plat; |
1988 | |
1989 | plat = dev_get_platdata(dev: uap->port.dev); |
1990 | if (plat->exit) |
1991 | plat->exit(); |
1992 | } |
1993 | |
1994 | if (uap->port.ops->flush_buffer) |
1995 | uap->port.ops->flush_buffer(port); |
1996 | } |
1997 | |
1998 | static void sbsa_uart_shutdown(struct uart_port *port) |
1999 | { |
2000 | struct uart_amba_port *uap = |
2001 | container_of(port, struct uart_amba_port, port); |
2002 | |
2003 | pl011_disable_interrupts(uap); |
2004 | |
2005 | free_irq(uap->port.irq, uap); |
2006 | |
2007 | if (uap->port.ops->flush_buffer) |
2008 | uap->port.ops->flush_buffer(port); |
2009 | } |
2010 | |
2011 | static void |
2012 | pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios) |
2013 | { |
2014 | port->read_status_mask = UART011_DR_OE | 255; |
2015 | if (termios->c_iflag & INPCK) |
2016 | port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; |
2017 | if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) |
2018 | port->read_status_mask |= UART011_DR_BE; |
2019 | |
2020 | /* |
2021 | * Characters to ignore |
2022 | */ |
2023 | port->ignore_status_mask = 0; |
2024 | if (termios->c_iflag & IGNPAR) |
2025 | port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; |
2026 | if (termios->c_iflag & IGNBRK) { |
2027 | port->ignore_status_mask |= UART011_DR_BE; |
2028 | /* |
2029 | * If we're ignoring parity and break indicators, |
2030 | * ignore overruns too (for real raw support). |
2031 | */ |
2032 | if (termios->c_iflag & IGNPAR) |
2033 | port->ignore_status_mask |= UART011_DR_OE; |
2034 | } |
2035 | |
2036 | /* |
2037 | * Ignore all characters if CREAD is not set. |
2038 | */ |
2039 | if ((termios->c_cflag & CREAD) == 0) |
2040 | port->ignore_status_mask |= UART_DUMMY_DR_RX; |
2041 | } |
2042 | |
2043 | static void |
2044 | pl011_set_termios(struct uart_port *port, struct ktermios *termios, |
2045 | const struct ktermios *old) |
2046 | { |
2047 | struct uart_amba_port *uap = |
2048 | container_of(port, struct uart_amba_port, port); |
2049 | unsigned int lcr_h, old_cr; |
2050 | unsigned long flags; |
2051 | unsigned int baud, quot, clkdiv; |
2052 | unsigned int bits; |
2053 | |
2054 | if (uap->vendor->oversampling) |
2055 | clkdiv = 8; |
2056 | else |
2057 | clkdiv = 16; |
2058 | |
2059 | /* |
2060 | * Ask the core to calculate the divisor for us. |
2061 | */ |
2062 | baud = uart_get_baud_rate(port, termios, old, min: 0, |
2063 | max: port->uartclk / clkdiv); |
2064 | #ifdef CONFIG_DMA_ENGINE |
2065 | /* |
2066 | * Adjust RX DMA polling rate with baud rate if not specified. |
2067 | */ |
2068 | if (uap->dmarx.auto_poll_rate) |
2069 | uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); |
2070 | #endif |
2071 | |
2072 | if (baud > port->uartclk/16) |
2073 | quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); |
2074 | else |
2075 | quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); |
2076 | |
2077 | switch (termios->c_cflag & CSIZE) { |
2078 | case CS5: |
2079 | lcr_h = UART01x_LCRH_WLEN_5; |
2080 | break; |
2081 | case CS6: |
2082 | lcr_h = UART01x_LCRH_WLEN_6; |
2083 | break; |
2084 | case CS7: |
2085 | lcr_h = UART01x_LCRH_WLEN_7; |
2086 | break; |
2087 | default: // CS8 |
2088 | lcr_h = UART01x_LCRH_WLEN_8; |
2089 | break; |
2090 | } |
2091 | if (termios->c_cflag & CSTOPB) |
2092 | lcr_h |= UART01x_LCRH_STP2; |
2093 | if (termios->c_cflag & PARENB) { |
2094 | lcr_h |= UART01x_LCRH_PEN; |
2095 | if (!(termios->c_cflag & PARODD)) |
2096 | lcr_h |= UART01x_LCRH_EPS; |
2097 | if (termios->c_cflag & CMSPAR) |
2098 | lcr_h |= UART011_LCRH_SPS; |
2099 | } |
2100 | if (uap->fifosize > 1) |
2101 | lcr_h |= UART01x_LCRH_FEN; |
2102 | |
2103 | bits = tty_get_frame_size(cflag: termios->c_cflag); |
2104 | |
2105 | uart_port_lock_irqsave(up: port, flags: &flags); |
2106 | |
2107 | /* |
2108 | * Update the per-port timeout. |
2109 | */ |
2110 | uart_update_timeout(port, cflag: termios->c_cflag, baud); |
2111 | |
2112 | /* |
2113 | * Calculate the approximated time it takes to transmit one character |
2114 | * with the given baud rate. We use this as the poll interval when we |
2115 | * wait for the tx queue to empty. |
2116 | */ |
2117 | uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); |
2118 | |
2119 | pl011_setup_status_masks(port, termios); |
2120 | |
2121 | if (UART_ENABLE_MS(port, termios->c_cflag)) |
2122 | pl011_enable_ms(port); |
2123 | |
2124 | if (port->rs485.flags & SER_RS485_ENABLED) |
2125 | termios->c_cflag &= ~CRTSCTS; |
2126 | |
2127 | old_cr = pl011_read(uap, reg: REG_CR); |
2128 | |
2129 | if (termios->c_cflag & CRTSCTS) { |
2130 | if (old_cr & UART011_CR_RTS) |
2131 | old_cr |= UART011_CR_RTSEN; |
2132 | |
2133 | old_cr |= UART011_CR_CTSEN; |
2134 | port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; |
2135 | } else { |
2136 | old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); |
2137 | port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); |
2138 | } |
2139 | |
2140 | if (uap->vendor->oversampling) { |
2141 | if (baud > port->uartclk / 16) |
2142 | old_cr |= ST_UART011_CR_OVSFACT; |
2143 | else |
2144 | old_cr &= ~ST_UART011_CR_OVSFACT; |
2145 | } |
2146 | |
2147 | /* |
2148 | * Workaround for the ST Micro oversampling variants to |
2149 | * increase the bitrate slightly, by lowering the divisor, |
2150 | * to avoid delayed sampling of start bit at high speeds, |
2151 | * else we see data corruption. |
2152 | */ |
2153 | if (uap->vendor->oversampling) { |
2154 | if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) |
2155 | quot -= 1; |
2156 | else if ((baud > 3250000) && (quot > 2)) |
2157 | quot -= 2; |
2158 | } |
2159 | /* Set baud rate */ |
2160 | pl011_write(val: quot & 0x3f, uap, reg: REG_FBRD); |
2161 | pl011_write(val: quot >> 6, uap, reg: REG_IBRD); |
2162 | |
2163 | /* |
2164 | * ----------v----------v----------v----------v----- |
2165 | * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER |
2166 | * REG_FBRD & REG_IBRD. |
2167 | * ----------^----------^----------^----------^----- |
2168 | */ |
2169 | pl011_write_lcr_h(uap, lcr_h); |
2170 | |
2171 | /* |
2172 | * Receive was disabled by pl011_disable_uart during shutdown. |
2173 | * Need to reenable receive if you need to use a tty_driver |
2174 | * returns from tty_find_polling_driver() after a port shutdown. |
2175 | */ |
2176 | old_cr |= UART011_CR_RXE; |
2177 | pl011_write(val: old_cr, uap, reg: REG_CR); |
2178 | |
2179 | uart_port_unlock_irqrestore(up: port, flags); |
2180 | } |
2181 | |
2182 | static void |
2183 | sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, |
2184 | const struct ktermios *old) |
2185 | { |
2186 | struct uart_amba_port *uap = |
2187 | container_of(port, struct uart_amba_port, port); |
2188 | unsigned long flags; |
2189 | |
2190 | tty_termios_encode_baud_rate(termios, ibaud: uap->fixed_baud, obaud: uap->fixed_baud); |
2191 | |
2192 | /* The SBSA UART only supports 8n1 without hardware flow control. */ |
2193 | termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); |
2194 | termios->c_cflag &= ~(CMSPAR | CRTSCTS); |
2195 | termios->c_cflag |= CS8 | CLOCAL; |
2196 | |
2197 | uart_port_lock_irqsave(up: port, flags: &flags); |
2198 | uart_update_timeout(port, CS8, baud: uap->fixed_baud); |
2199 | pl011_setup_status_masks(port, termios); |
2200 | uart_port_unlock_irqrestore(up: port, flags); |
2201 | } |
2202 | |
2203 | static const char *pl011_type(struct uart_port *port) |
2204 | { |
2205 | struct uart_amba_port *uap = |
2206 | container_of(port, struct uart_amba_port, port); |
2207 | return uap->port.type == PORT_AMBA ? uap->type : NULL; |
2208 | } |
2209 | |
2210 | /* |
2211 | * Configure/autoconfigure the port. |
2212 | */ |
2213 | static void pl011_config_port(struct uart_port *port, int flags) |
2214 | { |
2215 | if (flags & UART_CONFIG_TYPE) |
2216 | port->type = PORT_AMBA; |
2217 | } |
2218 | |
2219 | /* |
2220 | * verify the new serial_struct (for TIOCSSERIAL). |
2221 | */ |
2222 | static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) |
2223 | { |
2224 | int ret = 0; |
2225 | if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) |
2226 | ret = -EINVAL; |
2227 | if (ser->irq < 0 || ser->irq >= nr_irqs) |
2228 | ret = -EINVAL; |
2229 | if (ser->baud_base < 9600) |
2230 | ret = -EINVAL; |
2231 | if (port->mapbase != (unsigned long) ser->iomem_base) |
2232 | ret = -EINVAL; |
2233 | return ret; |
2234 | } |
2235 | |
2236 | static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios, |
2237 | struct serial_rs485 *rs485) |
2238 | { |
2239 | struct uart_amba_port *uap = |
2240 | container_of(port, struct uart_amba_port, port); |
2241 | |
2242 | if (port->rs485.flags & SER_RS485_ENABLED) |
2243 | pl011_rs485_tx_stop(uap); |
2244 | |
2245 | /* Make sure auto RTS is disabled */ |
2246 | if (rs485->flags & SER_RS485_ENABLED) { |
2247 | u32 cr = pl011_read(uap, reg: REG_CR); |
2248 | |
2249 | cr &= ~UART011_CR_RTSEN; |
2250 | pl011_write(val: cr, uap, reg: REG_CR); |
2251 | port->status &= ~UPSTAT_AUTORTS; |
2252 | } |
2253 | |
2254 | return 0; |
2255 | } |
2256 | |
2257 | static const struct uart_ops amba_pl011_pops = { |
2258 | .tx_empty = pl011_tx_empty, |
2259 | .set_mctrl = pl011_set_mctrl, |
2260 | .get_mctrl = pl011_get_mctrl, |
2261 | .stop_tx = pl011_stop_tx, |
2262 | .start_tx = pl011_start_tx, |
2263 | .stop_rx = pl011_stop_rx, |
2264 | .throttle = pl011_throttle_rx, |
2265 | .unthrottle = pl011_unthrottle_rx, |
2266 | .enable_ms = pl011_enable_ms, |
2267 | .break_ctl = pl011_break_ctl, |
2268 | .startup = pl011_startup, |
2269 | .shutdown = pl011_shutdown, |
2270 | .flush_buffer = pl011_dma_flush_buffer, |
2271 | .set_termios = pl011_set_termios, |
2272 | .type = pl011_type, |
2273 | .config_port = pl011_config_port, |
2274 | .verify_port = pl011_verify_port, |
2275 | #ifdef CONFIG_CONSOLE_POLL |
2276 | .poll_init = pl011_hwinit, |
2277 | .poll_get_char = pl011_get_poll_char, |
2278 | .poll_put_char = pl011_put_poll_char, |
2279 | #endif |
2280 | }; |
2281 | |
2282 | static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) |
2283 | { |
2284 | } |
2285 | |
2286 | static unsigned int sbsa_uart_get_mctrl(struct uart_port *port) |
2287 | { |
2288 | return 0; |
2289 | } |
2290 | |
2291 | static const struct uart_ops sbsa_uart_pops = { |
2292 | .tx_empty = pl011_tx_empty, |
2293 | .set_mctrl = sbsa_uart_set_mctrl, |
2294 | .get_mctrl = sbsa_uart_get_mctrl, |
2295 | .stop_tx = pl011_stop_tx, |
2296 | .start_tx = pl011_start_tx, |
2297 | .stop_rx = pl011_stop_rx, |
2298 | .startup = sbsa_uart_startup, |
2299 | .shutdown = sbsa_uart_shutdown, |
2300 | .set_termios = sbsa_uart_set_termios, |
2301 | .type = pl011_type, |
2302 | .config_port = pl011_config_port, |
2303 | .verify_port = pl011_verify_port, |
2304 | #ifdef CONFIG_CONSOLE_POLL |
2305 | .poll_init = pl011_hwinit, |
2306 | .poll_get_char = pl011_get_poll_char, |
2307 | .poll_put_char = pl011_put_poll_char, |
2308 | #endif |
2309 | }; |
2310 | |
2311 | static struct uart_amba_port *amba_ports[UART_NR]; |
2312 | |
2313 | #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE |
2314 | |
2315 | static void pl011_console_putchar(struct uart_port *port, unsigned char ch) |
2316 | { |
2317 | struct uart_amba_port *uap = |
2318 | container_of(port, struct uart_amba_port, port); |
2319 | |
2320 | while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) |
2321 | cpu_relax(); |
2322 | pl011_write(ch, uap, REG_DR); |
2323 | } |
2324 | |
2325 | static void |
2326 | pl011_console_write(struct console *co, const char *s, unsigned int count) |
2327 | { |
2328 | struct uart_amba_port *uap = amba_ports[co->index]; |
2329 | unsigned int old_cr = 0, new_cr; |
2330 | unsigned long flags; |
2331 | int locked = 1; |
2332 | |
2333 | clk_enable(uap->clk); |
2334 | |
2335 | local_irq_save(flags); |
2336 | if (uap->port.sysrq) |
2337 | locked = 0; |
2338 | else if (oops_in_progress) |
2339 | locked = uart_port_trylock(&uap->port); |
2340 | else |
2341 | uart_port_lock(&uap->port); |
2342 | |
2343 | /* |
2344 | * First save the CR then disable the interrupts |
2345 | */ |
2346 | if (!uap->vendor->always_enabled) { |
2347 | old_cr = pl011_read(uap, REG_CR); |
2348 | new_cr = old_cr & ~UART011_CR_CTSEN; |
2349 | new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; |
2350 | pl011_write(new_cr, uap, REG_CR); |
2351 | } |
2352 | |
2353 | uart_console_write(&uap->port, s, count, pl011_console_putchar); |
2354 | |
2355 | /* |
2356 | * Finally, wait for transmitter to become empty and restore the |
2357 | * TCR. Allow feature register bits to be inverted to work around |
2358 | * errata. |
2359 | */ |
2360 | while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) |
2361 | & uap->vendor->fr_busy) |
2362 | cpu_relax(); |
2363 | if (!uap->vendor->always_enabled) |
2364 | pl011_write(old_cr, uap, REG_CR); |
2365 | |
2366 | if (locked) |
2367 | uart_port_unlock(&uap->port); |
2368 | local_irq_restore(flags); |
2369 | |
2370 | clk_disable(uap->clk); |
2371 | } |
2372 | |
2373 | static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, |
2374 | int *parity, int *bits) |
2375 | { |
2376 | if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { |
2377 | unsigned int lcr_h, ibrd, fbrd; |
2378 | |
2379 | lcr_h = pl011_read(uap, REG_LCRH_TX); |
2380 | |
2381 | *parity = 'n'; |
2382 | if (lcr_h & UART01x_LCRH_PEN) { |
2383 | if (lcr_h & UART01x_LCRH_EPS) |
2384 | *parity = 'e'; |
2385 | else |
2386 | *parity = 'o'; |
2387 | } |
2388 | |
2389 | if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) |
2390 | *bits = 7; |
2391 | else |
2392 | *bits = 8; |
2393 | |
2394 | ibrd = pl011_read(uap, REG_IBRD); |
2395 | fbrd = pl011_read(uap, REG_FBRD); |
2396 | |
2397 | *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); |
2398 | |
2399 | if (uap->vendor->oversampling) { |
2400 | if (pl011_read(uap, REG_CR) |
2401 | & ST_UART011_CR_OVSFACT) |
2402 | *baud *= 2; |
2403 | } |
2404 | } |
2405 | } |
2406 | |
2407 | static int pl011_console_setup(struct console *co, char *options) |
2408 | { |
2409 | struct uart_amba_port *uap; |
2410 | int baud = 38400; |
2411 | int bits = 8; |
2412 | int parity = 'n'; |
2413 | int flow = 'n'; |
2414 | int ret; |
2415 | |
2416 | /* |
2417 | * Check whether an invalid uart number has been specified, and |
2418 | * if so, search for the first available port that does have |
2419 | * console support. |
2420 | */ |
2421 | if (co->index >= UART_NR) |
2422 | co->index = 0; |
2423 | uap = amba_ports[co->index]; |
2424 | if (!uap) |
2425 | return -ENODEV; |
2426 | |
2427 | /* Allow pins to be muxed in and configured */ |
2428 | pinctrl_pm_select_default_state(uap->port.dev); |
2429 | |
2430 | ret = clk_prepare(uap->clk); |
2431 | if (ret) |
2432 | return ret; |
2433 | |
2434 | if (dev_get_platdata(uap->port.dev)) { |
2435 | struct amba_pl011_data *plat; |
2436 | |
2437 | plat = dev_get_platdata(uap->port.dev); |
2438 | if (plat->init) |
2439 | plat->init(); |
2440 | } |
2441 | |
2442 | uap->port.uartclk = clk_get_rate(uap->clk); |
2443 | |
2444 | if (uap->vendor->fixed_options) { |
2445 | baud = uap->fixed_baud; |
2446 | } else { |
2447 | if (options) |
2448 | uart_parse_options(options, |
2449 | &baud, &parity, &bits, &flow); |
2450 | else |
2451 | pl011_console_get_options(uap, &baud, &parity, &bits); |
2452 | } |
2453 | |
2454 | return uart_set_options(&uap->port, co, baud, parity, bits, flow); |
2455 | } |
2456 | |
2457 | /** |
2458 | * pl011_console_match - non-standard console matching |
2459 | * @co: registering console |
2460 | * @name: name from console command line |
2461 | * @idx: index from console command line |
2462 | * @options: ptr to option string from console command line |
2463 | * |
2464 | * Only attempts to match console command lines of the form: |
2465 | * console=pl011,mmio|mmio32,<addr>[,<options>] |
2466 | * console=pl011,0x<addr>[,<options>] |
2467 | * This form is used to register an initial earlycon boot console and |
2468 | * replace it with the amba_console at pl011 driver init. |
2469 | * |
2470 | * Performs console setup for a match (as required by interface) |
2471 | * If no <options> are specified, then assume the h/w is already setup. |
2472 | * |
2473 | * Returns 0 if console matches; otherwise non-zero to use default matching |
2474 | */ |
2475 | static int pl011_console_match(struct console *co, char *name, int idx, |
2476 | char *options) |
2477 | { |
2478 | unsigned char iotype; |
2479 | resource_size_t addr; |
2480 | int i; |
2481 | |
2482 | /* |
2483 | * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum |
2484 | * have a distinct console name, so make sure we check for that. |
2485 | * The actual implementation of the erratum occurs in the probe |
2486 | * function. |
2487 | */ |
2488 | if ((strcmp(name, "qdf2400_e44" ) != 0) && (strcmp(name, "pl011" ) != 0)) |
2489 | return -ENODEV; |
2490 | |
2491 | if (uart_parse_earlycon(options, &iotype, &addr, &options)) |
2492 | return -ENODEV; |
2493 | |
2494 | if (iotype != UPIO_MEM && iotype != UPIO_MEM32) |
2495 | return -ENODEV; |
2496 | |
2497 | /* try to match the port specified on the command line */ |
2498 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { |
2499 | struct uart_port *port; |
2500 | |
2501 | if (!amba_ports[i]) |
2502 | continue; |
2503 | |
2504 | port = &amba_ports[i]->port; |
2505 | |
2506 | if (port->mapbase != addr) |
2507 | continue; |
2508 | |
2509 | co->index = i; |
2510 | port->cons = co; |
2511 | return pl011_console_setup(co, options); |
2512 | } |
2513 | |
2514 | return -ENODEV; |
2515 | } |
2516 | |
2517 | static struct uart_driver amba_reg; |
2518 | static struct console amba_console = { |
2519 | .name = "ttyAMA" , |
2520 | .write = pl011_console_write, |
2521 | .device = uart_console_device, |
2522 | .setup = pl011_console_setup, |
2523 | .match = pl011_console_match, |
2524 | .flags = CON_PRINTBUFFER | CON_ANYTIME, |
2525 | .index = -1, |
2526 | .data = &amba_reg, |
2527 | }; |
2528 | |
2529 | #define AMBA_CONSOLE (&amba_console) |
2530 | |
2531 | static void qdf2400_e44_putc(struct uart_port *port, unsigned char c) |
2532 | { |
2533 | while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) |
2534 | cpu_relax(); |
2535 | writel(c, port->membase + UART01x_DR); |
2536 | while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) |
2537 | cpu_relax(); |
2538 | } |
2539 | |
2540 | static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n) |
2541 | { |
2542 | struct earlycon_device *dev = con->data; |
2543 | |
2544 | uart_console_write(&dev->port, s, n, qdf2400_e44_putc); |
2545 | } |
2546 | |
2547 | static void pl011_putc(struct uart_port *port, unsigned char c) |
2548 | { |
2549 | while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) |
2550 | cpu_relax(); |
2551 | if (port->iotype == UPIO_MEM32) |
2552 | writel(c, port->membase + UART01x_DR); |
2553 | else |
2554 | writeb(c, port->membase + UART01x_DR); |
2555 | while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) |
2556 | cpu_relax(); |
2557 | } |
2558 | |
2559 | static void pl011_early_write(struct console *con, const char *s, unsigned n) |
2560 | { |
2561 | struct earlycon_device *dev = con->data; |
2562 | |
2563 | uart_console_write(&dev->port, s, n, pl011_putc); |
2564 | } |
2565 | |
2566 | #ifdef CONFIG_CONSOLE_POLL |
2567 | static int pl011_getc(struct uart_port *port) |
2568 | { |
2569 | if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) |
2570 | return NO_POLL_CHAR; |
2571 | |
2572 | if (port->iotype == UPIO_MEM32) |
2573 | return readl(port->membase + UART01x_DR); |
2574 | else |
2575 | return readb(port->membase + UART01x_DR); |
2576 | } |
2577 | |
2578 | static int pl011_early_read(struct console *con, char *s, unsigned int n) |
2579 | { |
2580 | struct earlycon_device *dev = con->data; |
2581 | int ch, num_read = 0; |
2582 | |
2583 | while (num_read < n) { |
2584 | ch = pl011_getc(&dev->port); |
2585 | if (ch == NO_POLL_CHAR) |
2586 | break; |
2587 | |
2588 | s[num_read++] = ch; |
2589 | } |
2590 | |
2591 | return num_read; |
2592 | } |
2593 | #else |
2594 | #define pl011_early_read NULL |
2595 | #endif |
2596 | |
2597 | /* |
2598 | * On non-ACPI systems, earlycon is enabled by specifying |
2599 | * "earlycon=pl011,<address>" on the kernel command line. |
2600 | * |
2601 | * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, |
2602 | * by specifying only "earlycon" on the command line. Because it requires |
2603 | * SPCR, the console starts after ACPI is parsed, which is later than a |
2604 | * traditional early console. |
2605 | * |
2606 | * To get the traditional early console that starts before ACPI is parsed, |
2607 | * specify the full "earlycon=pl011,<address>" option. |
2608 | */ |
2609 | static int __init pl011_early_console_setup(struct earlycon_device *device, |
2610 | const char *opt) |
2611 | { |
2612 | if (!device->port.membase) |
2613 | return -ENODEV; |
2614 | |
2615 | device->con->write = pl011_early_write; |
2616 | device->con->read = pl011_early_read; |
2617 | |
2618 | return 0; |
2619 | } |
2620 | OF_EARLYCON_DECLARE(pl011, "arm,pl011" , pl011_early_console_setup); |
2621 | OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart" , pl011_early_console_setup); |
2622 | |
2623 | /* |
2624 | * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by |
2625 | * Erratum 44, traditional earlycon can be enabled by specifying |
2626 | * "earlycon=qdf2400_e44,<address>". Any options are ignored. |
2627 | * |
2628 | * Alternatively, you can just specify "earlycon", and the early console |
2629 | * will be enabled with the information from the SPCR table. In this |
2630 | * case, the SPCR code will detect the need for the E44 work-around, |
2631 | * and set the console name to "qdf2400_e44". |
2632 | */ |
2633 | static int __init |
2634 | qdf2400_e44_early_console_setup(struct earlycon_device *device, |
2635 | const char *opt) |
2636 | { |
2637 | if (!device->port.membase) |
2638 | return -ENODEV; |
2639 | |
2640 | device->con->write = qdf2400_e44_early_write; |
2641 | return 0; |
2642 | } |
2643 | EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup); |
2644 | |
2645 | #else |
2646 | #define AMBA_CONSOLE NULL |
2647 | #endif |
2648 | |
2649 | static struct uart_driver amba_reg = { |
2650 | .owner = THIS_MODULE, |
2651 | .driver_name = "ttyAMA" , |
2652 | .dev_name = "ttyAMA" , |
2653 | .major = SERIAL_AMBA_MAJOR, |
2654 | .minor = SERIAL_AMBA_MINOR, |
2655 | .nr = UART_NR, |
2656 | .cons = AMBA_CONSOLE, |
2657 | }; |
2658 | |
2659 | static int pl011_probe_dt_alias(int index, struct device *dev) |
2660 | { |
2661 | struct device_node *np; |
2662 | static bool seen_dev_with_alias = false; |
2663 | static bool seen_dev_without_alias = false; |
2664 | int ret = index; |
2665 | |
2666 | if (!IS_ENABLED(CONFIG_OF)) |
2667 | return ret; |
2668 | |
2669 | np = dev->of_node; |
2670 | if (!np) |
2671 | return ret; |
2672 | |
2673 | ret = of_alias_get_id(np, stem: "serial" ); |
2674 | if (ret < 0) { |
2675 | seen_dev_without_alias = true; |
2676 | ret = index; |
2677 | } else { |
2678 | seen_dev_with_alias = true; |
2679 | if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) { |
2680 | dev_warn(dev, "requested serial port %d not available.\n" , ret); |
2681 | ret = index; |
2682 | } |
2683 | } |
2684 | |
2685 | if (seen_dev_with_alias && seen_dev_without_alias) |
2686 | dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n" ); |
2687 | |
2688 | return ret; |
2689 | } |
2690 | |
2691 | /* unregisters the driver also if no more ports are left */ |
2692 | static void pl011_unregister_port(struct uart_amba_port *uap) |
2693 | { |
2694 | int i; |
2695 | bool busy = false; |
2696 | |
2697 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { |
2698 | if (amba_ports[i] == uap) |
2699 | amba_ports[i] = NULL; |
2700 | else if (amba_ports[i]) |
2701 | busy = true; |
2702 | } |
2703 | pl011_dma_remove(uap); |
2704 | if (!busy) |
2705 | uart_unregister_driver(uart: &amba_reg); |
2706 | } |
2707 | |
2708 | static int pl011_find_free_port(void) |
2709 | { |
2710 | int i; |
2711 | |
2712 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) |
2713 | if (amba_ports[i] == NULL) |
2714 | return i; |
2715 | |
2716 | return -EBUSY; |
2717 | } |
2718 | |
2719 | static int pl011_get_rs485_mode(struct uart_amba_port *uap) |
2720 | { |
2721 | struct uart_port *port = &uap->port; |
2722 | int ret; |
2723 | |
2724 | ret = uart_get_rs485_mode(port); |
2725 | if (ret) |
2726 | return ret; |
2727 | |
2728 | return 0; |
2729 | } |
2730 | |
2731 | static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, |
2732 | struct resource *mmiobase, int index) |
2733 | { |
2734 | void __iomem *base; |
2735 | int ret; |
2736 | |
2737 | base = devm_ioremap_resource(dev, res: mmiobase); |
2738 | if (IS_ERR(ptr: base)) |
2739 | return PTR_ERR(ptr: base); |
2740 | |
2741 | index = pl011_probe_dt_alias(index, dev); |
2742 | |
2743 | uap->port.dev = dev; |
2744 | uap->port.mapbase = mmiobase->start; |
2745 | uap->port.membase = base; |
2746 | uap->port.fifosize = uap->fifosize; |
2747 | uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); |
2748 | uap->port.flags = UPF_BOOT_AUTOCONF; |
2749 | uap->port.line = index; |
2750 | |
2751 | ret = pl011_get_rs485_mode(uap); |
2752 | if (ret) |
2753 | return ret; |
2754 | |
2755 | amba_ports[index] = uap; |
2756 | |
2757 | return 0; |
2758 | } |
2759 | |
2760 | static int pl011_register_port(struct uart_amba_port *uap) |
2761 | { |
2762 | int ret, i; |
2763 | |
2764 | /* Ensure interrupts from this UART are masked and cleared */ |
2765 | pl011_write(val: 0, uap, reg: REG_IMSC); |
2766 | pl011_write(val: 0xffff, uap, reg: REG_ICR); |
2767 | |
2768 | if (!amba_reg.state) { |
2769 | ret = uart_register_driver(uart: &amba_reg); |
2770 | if (ret < 0) { |
2771 | dev_err(uap->port.dev, |
2772 | "Failed to register AMBA-PL011 driver\n" ); |
2773 | for (i = 0; i < ARRAY_SIZE(amba_ports); i++) |
2774 | if (amba_ports[i] == uap) |
2775 | amba_ports[i] = NULL; |
2776 | return ret; |
2777 | } |
2778 | } |
2779 | |
2780 | ret = uart_add_one_port(reg: &amba_reg, port: &uap->port); |
2781 | if (ret) |
2782 | pl011_unregister_port(uap); |
2783 | |
2784 | return ret; |
2785 | } |
2786 | |
2787 | static const struct serial_rs485 pl011_rs485_supported = { |
2788 | .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | |
2789 | SER_RS485_RX_DURING_TX, |
2790 | .delay_rts_before_send = 1, |
2791 | .delay_rts_after_send = 1, |
2792 | }; |
2793 | |
2794 | static int pl011_probe(struct amba_device *dev, const struct amba_id *id) |
2795 | { |
2796 | struct uart_amba_port *uap; |
2797 | struct vendor_data *vendor = id->data; |
2798 | int portnr, ret; |
2799 | u32 val; |
2800 | |
2801 | portnr = pl011_find_free_port(); |
2802 | if (portnr < 0) |
2803 | return portnr; |
2804 | |
2805 | uap = devm_kzalloc(dev: &dev->dev, size: sizeof(struct uart_amba_port), |
2806 | GFP_KERNEL); |
2807 | if (!uap) |
2808 | return -ENOMEM; |
2809 | |
2810 | uap->clk = devm_clk_get(dev: &dev->dev, NULL); |
2811 | if (IS_ERR(ptr: uap->clk)) |
2812 | return PTR_ERR(ptr: uap->clk); |
2813 | |
2814 | uap->reg_offset = vendor->reg_offset; |
2815 | uap->vendor = vendor; |
2816 | uap->fifosize = vendor->get_fifosize(dev); |
2817 | uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; |
2818 | uap->port.irq = dev->irq[0]; |
2819 | uap->port.ops = &amba_pl011_pops; |
2820 | uap->port.rs485_config = pl011_rs485_config; |
2821 | uap->port.rs485_supported = pl011_rs485_supported; |
2822 | snprintf(buf: uap->type, size: sizeof(uap->type), fmt: "PL011 rev%u" , amba_rev(dev)); |
2823 | |
2824 | if (device_property_read_u32(dev: &dev->dev, propname: "reg-io-width" , val: &val) == 0) { |
2825 | switch (val) { |
2826 | case 1: |
2827 | uap->port.iotype = UPIO_MEM; |
2828 | break; |
2829 | case 4: |
2830 | uap->port.iotype = UPIO_MEM32; |
2831 | break; |
2832 | default: |
2833 | dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n" , |
2834 | val); |
2835 | return -EINVAL; |
2836 | } |
2837 | } |
2838 | |
2839 | ret = pl011_setup_port(dev: &dev->dev, uap, mmiobase: &dev->res, index: portnr); |
2840 | if (ret) |
2841 | return ret; |
2842 | |
2843 | amba_set_drvdata(dev, uap); |
2844 | |
2845 | return pl011_register_port(uap); |
2846 | } |
2847 | |
2848 | static void pl011_remove(struct amba_device *dev) |
2849 | { |
2850 | struct uart_amba_port *uap = amba_get_drvdata(dev); |
2851 | |
2852 | uart_remove_one_port(reg: &amba_reg, port: &uap->port); |
2853 | pl011_unregister_port(uap); |
2854 | } |
2855 | |
2856 | #ifdef CONFIG_PM_SLEEP |
2857 | static int pl011_suspend(struct device *dev) |
2858 | { |
2859 | struct uart_amba_port *uap = dev_get_drvdata(dev); |
2860 | |
2861 | if (!uap) |
2862 | return -EINVAL; |
2863 | |
2864 | return uart_suspend_port(reg: &amba_reg, port: &uap->port); |
2865 | } |
2866 | |
2867 | static int pl011_resume(struct device *dev) |
2868 | { |
2869 | struct uart_amba_port *uap = dev_get_drvdata(dev); |
2870 | |
2871 | if (!uap) |
2872 | return -EINVAL; |
2873 | |
2874 | return uart_resume_port(reg: &amba_reg, port: &uap->port); |
2875 | } |
2876 | #endif |
2877 | |
2878 | static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume); |
2879 | |
2880 | static int sbsa_uart_probe(struct platform_device *pdev) |
2881 | { |
2882 | struct uart_amba_port *uap; |
2883 | struct resource *r; |
2884 | int portnr, ret; |
2885 | int baudrate; |
2886 | |
2887 | /* |
2888 | * Check the mandatory baud rate parameter in the DT node early |
2889 | * so that we can easily exit with the error. |
2890 | */ |
2891 | if (pdev->dev.of_node) { |
2892 | struct device_node *np = pdev->dev.of_node; |
2893 | |
2894 | ret = of_property_read_u32(np, propname: "current-speed" , out_value: &baudrate); |
2895 | if (ret) |
2896 | return ret; |
2897 | } else { |
2898 | baudrate = 115200; |
2899 | } |
2900 | |
2901 | portnr = pl011_find_free_port(); |
2902 | if (portnr < 0) |
2903 | return portnr; |
2904 | |
2905 | uap = devm_kzalloc(dev: &pdev->dev, size: sizeof(struct uart_amba_port), |
2906 | GFP_KERNEL); |
2907 | if (!uap) |
2908 | return -ENOMEM; |
2909 | |
2910 | ret = platform_get_irq(pdev, 0); |
2911 | if (ret < 0) |
2912 | return ret; |
2913 | uap->port.irq = ret; |
2914 | |
2915 | #ifdef CONFIG_ACPI_SPCR_TABLE |
2916 | if (qdf2400_e44_present) { |
2917 | dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n" ); |
2918 | uap->vendor = &vendor_qdt_qdf2400_e44; |
2919 | } else |
2920 | #endif |
2921 | uap->vendor = &vendor_sbsa; |
2922 | |
2923 | uap->reg_offset = uap->vendor->reg_offset; |
2924 | uap->fifosize = 32; |
2925 | uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; |
2926 | uap->port.ops = &sbsa_uart_pops; |
2927 | uap->fixed_baud = baudrate; |
2928 | |
2929 | snprintf(buf: uap->type, size: sizeof(uap->type), fmt: "SBSA" ); |
2930 | |
2931 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2932 | |
2933 | ret = pl011_setup_port(dev: &pdev->dev, uap, mmiobase: r, index: portnr); |
2934 | if (ret) |
2935 | return ret; |
2936 | |
2937 | platform_set_drvdata(pdev, data: uap); |
2938 | |
2939 | return pl011_register_port(uap); |
2940 | } |
2941 | |
2942 | static int sbsa_uart_remove(struct platform_device *pdev) |
2943 | { |
2944 | struct uart_amba_port *uap = platform_get_drvdata(pdev); |
2945 | |
2946 | uart_remove_one_port(reg: &amba_reg, port: &uap->port); |
2947 | pl011_unregister_port(uap); |
2948 | return 0; |
2949 | } |
2950 | |
2951 | static const struct of_device_id sbsa_uart_of_match[] = { |
2952 | { .compatible = "arm,sbsa-uart" , }, |
2953 | {}, |
2954 | }; |
2955 | MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); |
2956 | |
2957 | static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = { |
2958 | { "ARMH0011" , 0 }, |
2959 | { "ARMHB000" , 0 }, |
2960 | {}, |
2961 | }; |
2962 | MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); |
2963 | |
2964 | static struct platform_driver arm_sbsa_uart_platform_driver = { |
2965 | .probe = sbsa_uart_probe, |
2966 | .remove = sbsa_uart_remove, |
2967 | .driver = { |
2968 | .name = "sbsa-uart" , |
2969 | .pm = &pl011_dev_pm_ops, |
2970 | .of_match_table = of_match_ptr(sbsa_uart_of_match), |
2971 | .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match), |
2972 | .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), |
2973 | }, |
2974 | }; |
2975 | |
2976 | static const struct amba_id pl011_ids[] = { |
2977 | { |
2978 | .id = 0x00041011, |
2979 | .mask = 0x000fffff, |
2980 | .data = &vendor_arm, |
2981 | }, |
2982 | { |
2983 | .id = 0x00380802, |
2984 | .mask = 0x00ffffff, |
2985 | .data = &vendor_st, |
2986 | }, |
2987 | { 0, 0 }, |
2988 | }; |
2989 | |
2990 | MODULE_DEVICE_TABLE(amba, pl011_ids); |
2991 | |
2992 | static struct amba_driver pl011_driver = { |
2993 | .drv = { |
2994 | .name = "uart-pl011" , |
2995 | .pm = &pl011_dev_pm_ops, |
2996 | .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), |
2997 | }, |
2998 | .id_table = pl011_ids, |
2999 | .probe = pl011_probe, |
3000 | .remove = pl011_remove, |
3001 | }; |
3002 | |
3003 | static int __init pl011_init(void) |
3004 | { |
3005 | printk(KERN_INFO "Serial: AMBA PL011 UART driver\n" ); |
3006 | |
3007 | if (platform_driver_register(&arm_sbsa_uart_platform_driver)) |
3008 | pr_warn("could not register SBSA UART platform driver\n" ); |
3009 | return amba_driver_register(drv: &pl011_driver); |
3010 | } |
3011 | |
3012 | static void __exit pl011_exit(void) |
3013 | { |
3014 | platform_driver_unregister(&arm_sbsa_uart_platform_driver); |
3015 | amba_driver_unregister(drv: &pl011_driver); |
3016 | } |
3017 | |
3018 | /* |
3019 | * While this can be a module, if builtin it's most likely the console |
3020 | * So let's leave module_exit but move module_init to an earlier place |
3021 | */ |
3022 | arch_initcall(pl011_init); |
3023 | module_exit(pl011_exit); |
3024 | |
3025 | MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd" ); |
3026 | MODULE_DESCRIPTION("ARM AMBA serial port driver" ); |
3027 | MODULE_LICENSE("GPL" ); |
3028 | |