• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3  * GT64260, MV64340, MV64360, GT96100, ... ).
4  *
5  * Author: Mark A. Greer <mgreer@mvista.com>
6  *
7  * Based on an old MPSC driver that was in the linuxppc tree.  It appears to
8  * have been created by Chris Zankel (formerly of MontaVista) but there
9  * is no proper Copyright so I'm not sure.  Apparently, parts were also
10  * taken from PPCBoot (now U-Boot).  Also based on drivers/serial/8250.c
11  * by Russell King.
12  *
13  * 2004 (c) MontaVista, Software, Inc.  This file is licensed under
14  * the terms of the GNU General Public License version 2.  This program
15  * is licensed "as is" without any warranty of any kind, whether express
16  * or implied.
17  */
18 /*
19  * The MPSC interface is much like a typical network controller's interface.
20  * That is, you set up separate rings of descriptors for transmitting and
21  * receiving data.  There is also a pool of buffers with (one buffer per
22  * descriptor) that incoming data are dma'd into or outgoing data are dma'd
23  * out of.
24  *
25  * The MPSC requires two other controllers to be able to work.  The Baud Rate
26  * Generator (BRG) provides a clock at programmable frequencies which determines
27  * the baud rate.  The Serial DMA Controller (SDMA) takes incoming data from the
28  * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29  * MPSC.  It is actually the SDMA interrupt that the driver uses to keep the
30  * transmit and receive "engines" going (i.e., indicate data has been
31  * transmitted or received).
32  *
33  * NOTES:
34  *
35  * 1) Some chips have an erratum where several regs cannot be
36  * read.  To work around that, we keep a local copy of those regs in
37  * 'mpsc_port_info'.
38  *
39  * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40  * accesses system mem with coherency enabled.  For that reason, the driver
41  * assumes that coherency for that ctlr has been disabled.  This means
42  * that when in a cache coherent system, the driver has to manually manage
43  * the data cache on the areas that it touches because the dma_* macro are
44  * basically no-ops.
45  *
46  * 3) There is an erratum (on PPC) where you can't use the instruction to do
47  * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48  * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
49  *
50  * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
51  */
52 
53 
54 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
55 #define SUPPORT_SYSRQ
56 #endif
57 
58 #include <linux/tty.h>
59 #include <linux/tty_flip.h>
60 #include <linux/ioport.h>
61 #include <linux/init.h>
62 #include <linux/console.h>
63 #include <linux/sysrq.h>
64 #include <linux/serial.h>
65 #include <linux/serial_core.h>
66 #include <linux/delay.h>
67 #include <linux/device.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/mv643xx.h>
70 #include <linux/platform_device.h>
71 #include <linux/gfp.h>
72 
73 #include <asm/io.h>
74 #include <asm/irq.h>
75 
76 #define	MPSC_NUM_CTLRS		2
77 
78 /*
79  * Descriptors and buffers must be cache line aligned.
80  * Buffers lengths must be multiple of cache line size.
81  * Number of Tx & Rx descriptors must be powers of 2.
82  */
83 #define	MPSC_RXR_ENTRIES	32
84 #define	MPSC_RXRE_SIZE		dma_get_cache_alignment()
85 #define	MPSC_RXR_SIZE		(MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
86 #define	MPSC_RXBE_SIZE		dma_get_cache_alignment()
87 #define	MPSC_RXB_SIZE		(MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
88 
89 #define	MPSC_TXR_ENTRIES	32
90 #define	MPSC_TXRE_SIZE		dma_get_cache_alignment()
91 #define	MPSC_TXR_SIZE		(MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
92 #define	MPSC_TXBE_SIZE		dma_get_cache_alignment()
93 #define	MPSC_TXB_SIZE		(MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
94 
95 #define	MPSC_DMA_ALLOC_SIZE	(MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
96 		+ MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
97 
98 /* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
99 struct mpsc_rx_desc {
100 	u16 bufsize;
101 	u16 bytecnt;
102 	u32 cmdstat;
103 	u32 link;
104 	u32 buf_ptr;
105 } __attribute((packed));
106 
107 struct mpsc_tx_desc {
108 	u16 bytecnt;
109 	u16 shadow;
110 	u32 cmdstat;
111 	u32 link;
112 	u32 buf_ptr;
113 } __attribute((packed));
114 
115 /*
116  * Some regs that have the erratum that you can't read them are are shared
117  * between the two MPSC controllers.  This struct contains those shared regs.
118  */
119 struct mpsc_shared_regs {
120 	phys_addr_t mpsc_routing_base_p;
121 	phys_addr_t sdma_intr_base_p;
122 
123 	void __iomem *mpsc_routing_base;
124 	void __iomem *sdma_intr_base;
125 
126 	u32 MPSC_MRR_m;
127 	u32 MPSC_RCRR_m;
128 	u32 MPSC_TCRR_m;
129 	u32 SDMA_INTR_CAUSE_m;
130 	u32 SDMA_INTR_MASK_m;
131 };
132 
133 /* The main driver data structure */
134 struct mpsc_port_info {
135 	struct uart_port port;	/* Overlay uart_port structure */
136 
137 	/* Internal driver state for this ctlr */
138 	u8 ready;
139 	u8 rcv_data;
140 
141 	/* Info passed in from platform */
142 	u8 mirror_regs;		/* Need to mirror regs? */
143 	u8 cache_mgmt;		/* Need manual cache mgmt? */
144 	u8 brg_can_tune;	/* BRG has baud tuning? */
145 	u32 brg_clk_src;
146 	u16 mpsc_max_idle;
147 	int default_baud;
148 	int default_bits;
149 	int default_parity;
150 	int default_flow;
151 
152 	/* Physical addresses of various blocks of registers (from platform) */
153 	phys_addr_t mpsc_base_p;
154 	phys_addr_t sdma_base_p;
155 	phys_addr_t brg_base_p;
156 
157 	/* Virtual addresses of various blocks of registers (from platform) */
158 	void __iomem *mpsc_base;
159 	void __iomem *sdma_base;
160 	void __iomem *brg_base;
161 
162 	/* Descriptor ring and buffer allocations */
163 	void *dma_region;
164 	dma_addr_t dma_region_p;
165 
166 	dma_addr_t rxr;		/* Rx descriptor ring */
167 	dma_addr_t rxr_p;	/* Phys addr of rxr */
168 	u8 *rxb;		/* Rx Ring I/O buf */
169 	u8 *rxb_p;		/* Phys addr of rxb */
170 	u32 rxr_posn;		/* First desc w/ Rx data */
171 
172 	dma_addr_t txr;		/* Tx descriptor ring */
173 	dma_addr_t txr_p;	/* Phys addr of txr */
174 	u8 *txb;		/* Tx Ring I/O buf */
175 	u8 *txb_p;		/* Phys addr of txb */
176 	int txr_head;		/* Where new data goes */
177 	int txr_tail;		/* Where sent data comes off */
178 	spinlock_t tx_lock;	/* transmit lock */
179 
180 	/* Mirrored values of regs we can't read (if 'mirror_regs' set) */
181 	u32 MPSC_MPCR_m;
182 	u32 MPSC_CHR_1_m;
183 	u32 MPSC_CHR_2_m;
184 	u32 MPSC_CHR_10_m;
185 	u32 BRG_BCR_m;
186 	struct mpsc_shared_regs *shared_regs;
187 };
188 
189 /* Hooks to platform-specific code */
190 int mpsc_platform_register_driver(void);
191 void mpsc_platform_unregister_driver(void);
192 
193 /* Hooks back in to mpsc common to be called by platform-specific code */
194 struct mpsc_port_info *mpsc_device_probe(int index);
195 struct mpsc_port_info *mpsc_device_remove(int index);
196 
197 /* Main MPSC Configuration Register Offsets */
198 #define	MPSC_MMCRL			0x0000
199 #define	MPSC_MMCRH			0x0004
200 #define	MPSC_MPCR			0x0008
201 #define	MPSC_CHR_1			0x000c
202 #define	MPSC_CHR_2			0x0010
203 #define	MPSC_CHR_3			0x0014
204 #define	MPSC_CHR_4			0x0018
205 #define	MPSC_CHR_5			0x001c
206 #define	MPSC_CHR_6			0x0020
207 #define	MPSC_CHR_7			0x0024
208 #define	MPSC_CHR_8			0x0028
209 #define	MPSC_CHR_9			0x002c
210 #define	MPSC_CHR_10			0x0030
211 #define	MPSC_CHR_11			0x0034
212 
213 #define	MPSC_MPCR_FRZ			(1 << 9)
214 #define	MPSC_MPCR_CL_5			0
215 #define	MPSC_MPCR_CL_6			1
216 #define	MPSC_MPCR_CL_7			2
217 #define	MPSC_MPCR_CL_8			3
218 #define	MPSC_MPCR_SBL_1			0
219 #define	MPSC_MPCR_SBL_2			1
220 
221 #define	MPSC_CHR_2_TEV			(1<<1)
222 #define	MPSC_CHR_2_TA			(1<<7)
223 #define	MPSC_CHR_2_TTCS			(1<<9)
224 #define	MPSC_CHR_2_REV			(1<<17)
225 #define	MPSC_CHR_2_RA			(1<<23)
226 #define	MPSC_CHR_2_CRD			(1<<25)
227 #define	MPSC_CHR_2_EH			(1<<31)
228 #define	MPSC_CHR_2_PAR_ODD		0
229 #define	MPSC_CHR_2_PAR_SPACE		1
230 #define	MPSC_CHR_2_PAR_EVEN		2
231 #define	MPSC_CHR_2_PAR_MARK		3
232 
233 /* MPSC Signal Routing */
234 #define	MPSC_MRR			0x0000
235 #define	MPSC_RCRR			0x0004
236 #define	MPSC_TCRR			0x0008
237 
238 /* Serial DMA Controller Interface Registers */
239 #define	SDMA_SDC			0x0000
240 #define	SDMA_SDCM			0x0008
241 #define	SDMA_RX_DESC			0x0800
242 #define	SDMA_RX_BUF_PTR			0x0808
243 #define	SDMA_SCRDP			0x0810
244 #define	SDMA_TX_DESC			0x0c00
245 #define	SDMA_SCTDP			0x0c10
246 #define	SDMA_SFTDP			0x0c14
247 
248 #define	SDMA_DESC_CMDSTAT_PE		(1<<0)
249 #define	SDMA_DESC_CMDSTAT_CDL		(1<<1)
250 #define	SDMA_DESC_CMDSTAT_FR		(1<<3)
251 #define	SDMA_DESC_CMDSTAT_OR		(1<<6)
252 #define	SDMA_DESC_CMDSTAT_BR		(1<<9)
253 #define	SDMA_DESC_CMDSTAT_MI		(1<<10)
254 #define	SDMA_DESC_CMDSTAT_A		(1<<11)
255 #define	SDMA_DESC_CMDSTAT_AM		(1<<12)
256 #define	SDMA_DESC_CMDSTAT_CT		(1<<13)
257 #define	SDMA_DESC_CMDSTAT_C		(1<<14)
258 #define	SDMA_DESC_CMDSTAT_ES		(1<<15)
259 #define	SDMA_DESC_CMDSTAT_L		(1<<16)
260 #define	SDMA_DESC_CMDSTAT_F		(1<<17)
261 #define	SDMA_DESC_CMDSTAT_P		(1<<18)
262 #define	SDMA_DESC_CMDSTAT_EI		(1<<23)
263 #define	SDMA_DESC_CMDSTAT_O		(1<<31)
264 
265 #define SDMA_DESC_DFLT			(SDMA_DESC_CMDSTAT_O \
266 		| SDMA_DESC_CMDSTAT_EI)
267 
268 #define	SDMA_SDC_RFT			(1<<0)
269 #define	SDMA_SDC_SFM			(1<<1)
270 #define	SDMA_SDC_BLMR			(1<<6)
271 #define	SDMA_SDC_BLMT			(1<<7)
272 #define	SDMA_SDC_POVR			(1<<8)
273 #define	SDMA_SDC_RIFB			(1<<9)
274 
275 #define	SDMA_SDCM_ERD			(1<<7)
276 #define	SDMA_SDCM_AR			(1<<15)
277 #define	SDMA_SDCM_STD			(1<<16)
278 #define	SDMA_SDCM_TXD			(1<<23)
279 #define	SDMA_SDCM_AT			(1<<31)
280 
281 #define	SDMA_0_CAUSE_RXBUF		(1<<0)
282 #define	SDMA_0_CAUSE_RXERR		(1<<1)
283 #define	SDMA_0_CAUSE_TXBUF		(1<<2)
284 #define	SDMA_0_CAUSE_TXEND		(1<<3)
285 #define	SDMA_1_CAUSE_RXBUF		(1<<8)
286 #define	SDMA_1_CAUSE_RXERR		(1<<9)
287 #define	SDMA_1_CAUSE_TXBUF		(1<<10)
288 #define	SDMA_1_CAUSE_TXEND		(1<<11)
289 
290 #define	SDMA_CAUSE_RX_MASK	(SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
291 		| SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
292 #define	SDMA_CAUSE_TX_MASK	(SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
293 		| SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
294 
295 /* SDMA Interrupt registers */
296 #define	SDMA_INTR_CAUSE			0x0000
297 #define	SDMA_INTR_MASK			0x0080
298 
299 /* Baud Rate Generator Interface Registers */
300 #define	BRG_BCR				0x0000
301 #define	BRG_BTR				0x0004
302 
303 /*
304  * Define how this driver is known to the outside (we've been assigned a
305  * range on the "Low-density serial ports" major).
306  */
307 #define MPSC_MAJOR			204
308 #define MPSC_MINOR_START		44
309 #define	MPSC_DRIVER_NAME		"MPSC"
310 #define	MPSC_DEV_NAME			"ttyMM"
311 #define	MPSC_VERSION			"1.00"
312 
313 static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
314 static struct mpsc_shared_regs mpsc_shared_regs;
315 static struct uart_driver mpsc_reg;
316 
317 static void mpsc_start_rx(struct mpsc_port_info *pi);
318 static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
319 static void mpsc_release_port(struct uart_port *port);
320 /*
321  ******************************************************************************
322  *
323  * Baud Rate Generator Routines (BRG)
324  *
325  ******************************************************************************
326  */
mpsc_brg_init(struct mpsc_port_info * pi,u32 clk_src)327 static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
328 {
329 	u32	v;
330 
331 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
332 	v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
333 
334 	if (pi->brg_can_tune)
335 		v &= ~(1 << 25);
336 
337 	if (pi->mirror_regs)
338 		pi->BRG_BCR_m = v;
339 	writel(v, pi->brg_base + BRG_BCR);
340 
341 	writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
342 		pi->brg_base + BRG_BTR);
343 }
344 
mpsc_brg_enable(struct mpsc_port_info * pi)345 static void mpsc_brg_enable(struct mpsc_port_info *pi)
346 {
347 	u32	v;
348 
349 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
350 	v |= (1 << 16);
351 
352 	if (pi->mirror_regs)
353 		pi->BRG_BCR_m = v;
354 	writel(v, pi->brg_base + BRG_BCR);
355 }
356 
mpsc_brg_disable(struct mpsc_port_info * pi)357 static void mpsc_brg_disable(struct mpsc_port_info *pi)
358 {
359 	u32	v;
360 
361 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
362 	v &= ~(1 << 16);
363 
364 	if (pi->mirror_regs)
365 		pi->BRG_BCR_m = v;
366 	writel(v, pi->brg_base + BRG_BCR);
367 }
368 
369 /*
370  * To set the baud, we adjust the CDV field in the BRG_BCR reg.
371  * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
372  * However, the input clock is divided by 16 in the MPSC b/c of how
373  * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
374  * calculation by 16 to account for that.  So the real calculation
375  * that accounts for the way the mpsc is set up is:
376  * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
377  */
mpsc_set_baudrate(struct mpsc_port_info * pi,u32 baud)378 static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
379 {
380 	u32	cdv = (pi->port.uartclk / (baud << 5)) - 1;
381 	u32	v;
382 
383 	mpsc_brg_disable(pi);
384 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
385 	v = (v & 0xffff0000) | (cdv & 0xffff);
386 
387 	if (pi->mirror_regs)
388 		pi->BRG_BCR_m = v;
389 	writel(v, pi->brg_base + BRG_BCR);
390 	mpsc_brg_enable(pi);
391 }
392 
393 /*
394  ******************************************************************************
395  *
396  * Serial DMA Routines (SDMA)
397  *
398  ******************************************************************************
399  */
400 
mpsc_sdma_burstsize(struct mpsc_port_info * pi,u32 burst_size)401 static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
402 {
403 	u32	v;
404 
405 	pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
406 			pi->port.line, burst_size);
407 
408 	burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
409 
410 	if (burst_size < 2)
411 		v = 0x0;	/* 1 64-bit word */
412 	else if (burst_size < 4)
413 		v = 0x1;	/* 2 64-bit words */
414 	else if (burst_size < 8)
415 		v = 0x2;	/* 4 64-bit words */
416 	else
417 		v = 0x3;	/* 8 64-bit words */
418 
419 	writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
420 		pi->sdma_base + SDMA_SDC);
421 }
422 
mpsc_sdma_init(struct mpsc_port_info * pi,u32 burst_size)423 static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
424 {
425 	pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
426 		burst_size);
427 
428 	writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
429 		pi->sdma_base + SDMA_SDC);
430 	mpsc_sdma_burstsize(pi, burst_size);
431 }
432 
mpsc_sdma_intr_mask(struct mpsc_port_info * pi,u32 mask)433 static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
434 {
435 	u32	old, v;
436 
437 	pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
438 
439 	old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
440 		readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
441 
442 	mask &= 0xf;
443 	if (pi->port.line)
444 		mask <<= 8;
445 	v &= ~mask;
446 
447 	if (pi->mirror_regs)
448 		pi->shared_regs->SDMA_INTR_MASK_m = v;
449 	writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
450 
451 	if (pi->port.line)
452 		old >>= 8;
453 	return old & 0xf;
454 }
455 
mpsc_sdma_intr_unmask(struct mpsc_port_info * pi,u32 mask)456 static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
457 {
458 	u32	v;
459 
460 	pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
461 
462 	v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m
463 		: readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
464 
465 	mask &= 0xf;
466 	if (pi->port.line)
467 		mask <<= 8;
468 	v |= mask;
469 
470 	if (pi->mirror_regs)
471 		pi->shared_regs->SDMA_INTR_MASK_m = v;
472 	writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
473 }
474 
mpsc_sdma_intr_ack(struct mpsc_port_info * pi)475 static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
476 {
477 	pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
478 
479 	if (pi->mirror_regs)
480 		pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
481 	writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE
482 			+ pi->port.line);
483 }
484 
mpsc_sdma_set_rx_ring(struct mpsc_port_info * pi,struct mpsc_rx_desc * rxre_p)485 static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi,
486 		struct mpsc_rx_desc *rxre_p)
487 {
488 	pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
489 		pi->port.line, (u32)rxre_p);
490 
491 	writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
492 }
493 
mpsc_sdma_set_tx_ring(struct mpsc_port_info * pi,struct mpsc_tx_desc * txre_p)494 static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi,
495 		struct mpsc_tx_desc *txre_p)
496 {
497 	writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
498 	writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
499 }
500 
mpsc_sdma_cmd(struct mpsc_port_info * pi,u32 val)501 static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
502 {
503 	u32	v;
504 
505 	v = readl(pi->sdma_base + SDMA_SDCM);
506 	if (val)
507 		v |= val;
508 	else
509 		v = 0;
510 	wmb();
511 	writel(v, pi->sdma_base + SDMA_SDCM);
512 	wmb();
513 }
514 
mpsc_sdma_tx_active(struct mpsc_port_info * pi)515 static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi)
516 {
517 	return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
518 }
519 
mpsc_sdma_start_tx(struct mpsc_port_info * pi)520 static void mpsc_sdma_start_tx(struct mpsc_port_info *pi)
521 {
522 	struct mpsc_tx_desc *txre, *txre_p;
523 
524 	/* If tx isn't running & there's a desc ready to go, start it */
525 	if (!mpsc_sdma_tx_active(pi)) {
526 		txre = (struct mpsc_tx_desc *)(pi->txr
527 				+ (pi->txr_tail * MPSC_TXRE_SIZE));
528 		dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
529 				DMA_FROM_DEVICE);
530 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
531 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
532 			invalidate_dcache_range((ulong)txre,
533 					(ulong)txre + MPSC_TXRE_SIZE);
534 #endif
535 
536 		if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
537 			txre_p = (struct mpsc_tx_desc *)
538 				(pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE));
539 
540 			mpsc_sdma_set_tx_ring(pi, txre_p);
541 			mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
542 		}
543 	}
544 }
545 
mpsc_sdma_stop(struct mpsc_port_info * pi)546 static void mpsc_sdma_stop(struct mpsc_port_info *pi)
547 {
548 	pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
549 
550 	/* Abort any SDMA transfers */
551 	mpsc_sdma_cmd(pi, 0);
552 	mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
553 
554 	/* Clear the SDMA current and first TX and RX pointers */
555 	mpsc_sdma_set_tx_ring(pi, NULL);
556 	mpsc_sdma_set_rx_ring(pi, NULL);
557 
558 	/* Disable interrupts */
559 	mpsc_sdma_intr_mask(pi, 0xf);
560 	mpsc_sdma_intr_ack(pi);
561 }
562 
563 /*
564  ******************************************************************************
565  *
566  * Multi-Protocol Serial Controller Routines (MPSC)
567  *
568  ******************************************************************************
569  */
570 
mpsc_hw_init(struct mpsc_port_info * pi)571 static void mpsc_hw_init(struct mpsc_port_info *pi)
572 {
573 	u32	v;
574 
575 	pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
576 
577 	/* Set up clock routing */
578 	if (pi->mirror_regs) {
579 		v = pi->shared_regs->MPSC_MRR_m;
580 		v &= ~0x1c7;
581 		pi->shared_regs->MPSC_MRR_m = v;
582 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
583 
584 		v = pi->shared_regs->MPSC_RCRR_m;
585 		v = (v & ~0xf0f) | 0x100;
586 		pi->shared_regs->MPSC_RCRR_m = v;
587 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
588 
589 		v = pi->shared_regs->MPSC_TCRR_m;
590 		v = (v & ~0xf0f) | 0x100;
591 		pi->shared_regs->MPSC_TCRR_m = v;
592 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
593 	} else {
594 		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
595 		v &= ~0x1c7;
596 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
597 
598 		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
599 		v = (v & ~0xf0f) | 0x100;
600 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
601 
602 		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
603 		v = (v & ~0xf0f) | 0x100;
604 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
605 	}
606 
607 	/* Put MPSC in UART mode & enabel Tx/Rx egines */
608 	writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
609 
610 	/* No preamble, 16x divider, low-latency, */
611 	writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
612 	mpsc_set_baudrate(pi, pi->default_baud);
613 
614 	if (pi->mirror_regs) {
615 		pi->MPSC_CHR_1_m = 0;
616 		pi->MPSC_CHR_2_m = 0;
617 	}
618 	writel(0, pi->mpsc_base + MPSC_CHR_1);
619 	writel(0, pi->mpsc_base + MPSC_CHR_2);
620 	writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
621 	writel(0, pi->mpsc_base + MPSC_CHR_4);
622 	writel(0, pi->mpsc_base + MPSC_CHR_5);
623 	writel(0, pi->mpsc_base + MPSC_CHR_6);
624 	writel(0, pi->mpsc_base + MPSC_CHR_7);
625 	writel(0, pi->mpsc_base + MPSC_CHR_8);
626 	writel(0, pi->mpsc_base + MPSC_CHR_9);
627 	writel(0, pi->mpsc_base + MPSC_CHR_10);
628 }
629 
mpsc_enter_hunt(struct mpsc_port_info * pi)630 static void mpsc_enter_hunt(struct mpsc_port_info *pi)
631 {
632 	pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
633 
634 	if (pi->mirror_regs) {
635 		writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
636 			pi->mpsc_base + MPSC_CHR_2);
637 		/* Erratum prevents reading CHR_2 so just delay for a while */
638 		udelay(100);
639 	} else {
640 		writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
641 				pi->mpsc_base + MPSC_CHR_2);
642 
643 		while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
644 			udelay(10);
645 	}
646 }
647 
mpsc_freeze(struct mpsc_port_info * pi)648 static void mpsc_freeze(struct mpsc_port_info *pi)
649 {
650 	u32	v;
651 
652 	pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
653 
654 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
655 		readl(pi->mpsc_base + MPSC_MPCR);
656 	v |= MPSC_MPCR_FRZ;
657 
658 	if (pi->mirror_regs)
659 		pi->MPSC_MPCR_m = v;
660 	writel(v, pi->mpsc_base + MPSC_MPCR);
661 }
662 
mpsc_unfreeze(struct mpsc_port_info * pi)663 static void mpsc_unfreeze(struct mpsc_port_info *pi)
664 {
665 	u32	v;
666 
667 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
668 		readl(pi->mpsc_base + MPSC_MPCR);
669 	v &= ~MPSC_MPCR_FRZ;
670 
671 	if (pi->mirror_regs)
672 		pi->MPSC_MPCR_m = v;
673 	writel(v, pi->mpsc_base + MPSC_MPCR);
674 
675 	pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
676 }
677 
mpsc_set_char_length(struct mpsc_port_info * pi,u32 len)678 static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
679 {
680 	u32	v;
681 
682 	pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
683 
684 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
685 		readl(pi->mpsc_base + MPSC_MPCR);
686 	v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
687 
688 	if (pi->mirror_regs)
689 		pi->MPSC_MPCR_m = v;
690 	writel(v, pi->mpsc_base + MPSC_MPCR);
691 }
692 
mpsc_set_stop_bit_length(struct mpsc_port_info * pi,u32 len)693 static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
694 {
695 	u32	v;
696 
697 	pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
698 		pi->port.line, len);
699 
700 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
701 		readl(pi->mpsc_base + MPSC_MPCR);
702 
703 	v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
704 
705 	if (pi->mirror_regs)
706 		pi->MPSC_MPCR_m = v;
707 	writel(v, pi->mpsc_base + MPSC_MPCR);
708 }
709 
mpsc_set_parity(struct mpsc_port_info * pi,u32 p)710 static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
711 {
712 	u32	v;
713 
714 	pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
715 
716 	v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
717 		readl(pi->mpsc_base + MPSC_CHR_2);
718 
719 	p &= 0x3;
720 	v = (v & ~0xc000c) | (p << 18) | (p << 2);
721 
722 	if (pi->mirror_regs)
723 		pi->MPSC_CHR_2_m = v;
724 	writel(v, pi->mpsc_base + MPSC_CHR_2);
725 }
726 
727 /*
728  ******************************************************************************
729  *
730  * Driver Init Routines
731  *
732  ******************************************************************************
733  */
734 
mpsc_init_hw(struct mpsc_port_info * pi)735 static void mpsc_init_hw(struct mpsc_port_info *pi)
736 {
737 	pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
738 
739 	mpsc_brg_init(pi, pi->brg_clk_src);
740 	mpsc_brg_enable(pi);
741 	mpsc_sdma_init(pi, dma_get_cache_alignment());	/* burst a cacheline */
742 	mpsc_sdma_stop(pi);
743 	mpsc_hw_init(pi);
744 }
745 
mpsc_alloc_ring_mem(struct mpsc_port_info * pi)746 static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
747 {
748 	int rc = 0;
749 
750 	pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
751 		pi->port.line);
752 
753 	if (!pi->dma_region) {
754 		if (!dma_set_mask(pi->port.dev, 0xffffffff)) {
755 			printk(KERN_ERR "MPSC: Inadequate DMA support\n");
756 			rc = -ENXIO;
757 		} else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
758 						MPSC_DMA_ALLOC_SIZE,
759 						&pi->dma_region_p, GFP_KERNEL))
760 				== NULL) {
761 			printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
762 			rc = -ENOMEM;
763 		}
764 	}
765 
766 	return rc;
767 }
768 
mpsc_free_ring_mem(struct mpsc_port_info * pi)769 static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
770 {
771 	pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
772 
773 	if (pi->dma_region) {
774 		dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
775 				pi->dma_region, pi->dma_region_p);
776 		pi->dma_region = NULL;
777 		pi->dma_region_p = (dma_addr_t)NULL;
778 	}
779 }
780 
mpsc_init_rings(struct mpsc_port_info * pi)781 static void mpsc_init_rings(struct mpsc_port_info *pi)
782 {
783 	struct mpsc_rx_desc *rxre;
784 	struct mpsc_tx_desc *txre;
785 	dma_addr_t dp, dp_p;
786 	u8 *bp, *bp_p;
787 	int i;
788 
789 	pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
790 
791 	BUG_ON(pi->dma_region == NULL);
792 
793 	memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
794 
795 	/*
796 	 * Descriptors & buffers are multiples of cacheline size and must be
797 	 * cacheline aligned.
798 	 */
799 	dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment());
800 	dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment());
801 
802 	/*
803 	 * Partition dma region into rx ring descriptor, rx buffers,
804 	 * tx ring descriptors, and tx buffers.
805 	 */
806 	pi->rxr = dp;
807 	pi->rxr_p = dp_p;
808 	dp += MPSC_RXR_SIZE;
809 	dp_p += MPSC_RXR_SIZE;
810 
811 	pi->rxb = (u8 *)dp;
812 	pi->rxb_p = (u8 *)dp_p;
813 	dp += MPSC_RXB_SIZE;
814 	dp_p += MPSC_RXB_SIZE;
815 
816 	pi->rxr_posn = 0;
817 
818 	pi->txr = dp;
819 	pi->txr_p = dp_p;
820 	dp += MPSC_TXR_SIZE;
821 	dp_p += MPSC_TXR_SIZE;
822 
823 	pi->txb = (u8 *)dp;
824 	pi->txb_p = (u8 *)dp_p;
825 
826 	pi->txr_head = 0;
827 	pi->txr_tail = 0;
828 
829 	/* Init rx ring descriptors */
830 	dp = pi->rxr;
831 	dp_p = pi->rxr_p;
832 	bp = pi->rxb;
833 	bp_p = pi->rxb_p;
834 
835 	for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
836 		rxre = (struct mpsc_rx_desc *)dp;
837 
838 		rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
839 		rxre->bytecnt = cpu_to_be16(0);
840 		rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
841 				| SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
842 				| SDMA_DESC_CMDSTAT_L);
843 		rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
844 		rxre->buf_ptr = cpu_to_be32(bp_p);
845 
846 		dp += MPSC_RXRE_SIZE;
847 		dp_p += MPSC_RXRE_SIZE;
848 		bp += MPSC_RXBE_SIZE;
849 		bp_p += MPSC_RXBE_SIZE;
850 	}
851 	rxre->link = cpu_to_be32(pi->rxr_p);	/* Wrap last back to first */
852 
853 	/* Init tx ring descriptors */
854 	dp = pi->txr;
855 	dp_p = pi->txr_p;
856 	bp = pi->txb;
857 	bp_p = pi->txb_p;
858 
859 	for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
860 		txre = (struct mpsc_tx_desc *)dp;
861 
862 		txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
863 		txre->buf_ptr = cpu_to_be32(bp_p);
864 
865 		dp += MPSC_TXRE_SIZE;
866 		dp_p += MPSC_TXRE_SIZE;
867 		bp += MPSC_TXBE_SIZE;
868 		bp_p += MPSC_TXBE_SIZE;
869 	}
870 	txre->link = cpu_to_be32(pi->txr_p);	/* Wrap last back to first */
871 
872 	dma_cache_sync(pi->port.dev, (void *)pi->dma_region,
873 			MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL);
874 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
875 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
876 			flush_dcache_range((ulong)pi->dma_region,
877 					(ulong)pi->dma_region
878 					+ MPSC_DMA_ALLOC_SIZE);
879 #endif
880 
881 	return;
882 }
883 
mpsc_uninit_rings(struct mpsc_port_info * pi)884 static void mpsc_uninit_rings(struct mpsc_port_info *pi)
885 {
886 	pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
887 
888 	BUG_ON(pi->dma_region == NULL);
889 
890 	pi->rxr = 0;
891 	pi->rxr_p = 0;
892 	pi->rxb = NULL;
893 	pi->rxb_p = NULL;
894 	pi->rxr_posn = 0;
895 
896 	pi->txr = 0;
897 	pi->txr_p = 0;
898 	pi->txb = NULL;
899 	pi->txb_p = NULL;
900 	pi->txr_head = 0;
901 	pi->txr_tail = 0;
902 }
903 
mpsc_make_ready(struct mpsc_port_info * pi)904 static int mpsc_make_ready(struct mpsc_port_info *pi)
905 {
906 	int rc;
907 
908 	pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
909 
910 	if (!pi->ready) {
911 		mpsc_init_hw(pi);
912 		rc = mpsc_alloc_ring_mem(pi);
913 		if (rc)
914 			return rc;
915 		mpsc_init_rings(pi);
916 		pi->ready = 1;
917 	}
918 
919 	return 0;
920 }
921 
922 #ifdef CONFIG_CONSOLE_POLL
923 static int serial_polled;
924 #endif
925 
926 /*
927  ******************************************************************************
928  *
929  * Interrupt Handling Routines
930  *
931  ******************************************************************************
932  */
933 
mpsc_rx_intr(struct mpsc_port_info * pi,unsigned long * flags)934 static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags)
935 {
936 	struct mpsc_rx_desc *rxre;
937 	struct tty_port *port = &pi->port.state->port;
938 	u32	cmdstat, bytes_in, i;
939 	int	rc = 0;
940 	u8	*bp;
941 	char	flag = TTY_NORMAL;
942 
943 	pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
944 
945 	rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
946 
947 	dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
948 			DMA_FROM_DEVICE);
949 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
950 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
951 		invalidate_dcache_range((ulong)rxre,
952 				(ulong)rxre + MPSC_RXRE_SIZE);
953 #endif
954 
955 	/*
956 	 * Loop through Rx descriptors handling ones that have been completed.
957 	 */
958 	while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
959 				& SDMA_DESC_CMDSTAT_O)) {
960 		bytes_in = be16_to_cpu(rxre->bytecnt);
961 #ifdef CONFIG_CONSOLE_POLL
962 		if (unlikely(serial_polled)) {
963 			serial_polled = 0;
964 			return 0;
965 		}
966 #endif
967 		/* Following use of tty struct directly is deprecated */
968 		if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
969 			if (port->low_latency) {
970 				spin_unlock_irqrestore(&pi->port.lock, *flags);
971 				tty_flip_buffer_push(port);
972 				spin_lock_irqsave(&pi->port.lock, *flags);
973 			}
974 			/*
975 			 * If this failed then we will throw away the bytes
976 			 * but must do so to clear interrupts.
977 			 */
978 		}
979 
980 		bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
981 		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE,
982 				DMA_FROM_DEVICE);
983 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
984 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
985 			invalidate_dcache_range((ulong)bp,
986 					(ulong)bp + MPSC_RXBE_SIZE);
987 #endif
988 
989 		/*
990 		 * Other than for parity error, the manual provides little
991 		 * info on what data will be in a frame flagged by any of
992 		 * these errors.  For parity error, it is the last byte in
993 		 * the buffer that had the error.  As for the rest, I guess
994 		 * we'll assume there is no data in the buffer.
995 		 * If there is...it gets lost.
996 		 */
997 		if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
998 						| SDMA_DESC_CMDSTAT_FR
999 						| SDMA_DESC_CMDSTAT_OR))) {
1000 
1001 			pi->port.icount.rx++;
1002 
1003 			if (cmdstat & SDMA_DESC_CMDSTAT_BR) {	/* Break */
1004 				pi->port.icount.brk++;
1005 
1006 				if (uart_handle_break(&pi->port))
1007 					goto next_frame;
1008 			} else if (cmdstat & SDMA_DESC_CMDSTAT_FR) {
1009 				pi->port.icount.frame++;
1010 			} else if (cmdstat & SDMA_DESC_CMDSTAT_OR) {
1011 				pi->port.icount.overrun++;
1012 			}
1013 
1014 			cmdstat &= pi->port.read_status_mask;
1015 
1016 			if (cmdstat & SDMA_DESC_CMDSTAT_BR)
1017 				flag = TTY_BREAK;
1018 			else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
1019 				flag = TTY_FRAME;
1020 			else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
1021 				flag = TTY_OVERRUN;
1022 			else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
1023 				flag = TTY_PARITY;
1024 		}
1025 
1026 		if (uart_handle_sysrq_char(&pi->port, *bp)) {
1027 			bp++;
1028 			bytes_in--;
1029 #ifdef CONFIG_CONSOLE_POLL
1030 			if (unlikely(serial_polled)) {
1031 				serial_polled = 0;
1032 				return 0;
1033 			}
1034 #endif
1035 			goto next_frame;
1036 		}
1037 
1038 		if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1039 						| SDMA_DESC_CMDSTAT_FR
1040 						| SDMA_DESC_CMDSTAT_OR)))
1041 				&& !(cmdstat & pi->port.ignore_status_mask)) {
1042 			tty_insert_flip_char(port, *bp, flag);
1043 		} else {
1044 			for (i=0; i<bytes_in; i++)
1045 				tty_insert_flip_char(port, *bp++, TTY_NORMAL);
1046 
1047 			pi->port.icount.rx += bytes_in;
1048 		}
1049 
1050 next_frame:
1051 		rxre->bytecnt = cpu_to_be16(0);
1052 		wmb();
1053 		rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
1054 				| SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
1055 				| SDMA_DESC_CMDSTAT_L);
1056 		wmb();
1057 		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1058 				DMA_BIDIRECTIONAL);
1059 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1060 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1061 			flush_dcache_range((ulong)rxre,
1062 					(ulong)rxre + MPSC_RXRE_SIZE);
1063 #endif
1064 
1065 		/* Advance to next descriptor */
1066 		pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
1067 		rxre = (struct mpsc_rx_desc *)
1068 			(pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE));
1069 		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1070 				DMA_FROM_DEVICE);
1071 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1072 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1073 			invalidate_dcache_range((ulong)rxre,
1074 					(ulong)rxre + MPSC_RXRE_SIZE);
1075 #endif
1076 		rc = 1;
1077 	}
1078 
1079 	/* Restart rx engine, if its stopped */
1080 	if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1081 		mpsc_start_rx(pi);
1082 
1083 	spin_unlock_irqrestore(&pi->port.lock, *flags);
1084 	tty_flip_buffer_push(port);
1085 	spin_lock_irqsave(&pi->port.lock, *flags);
1086 	return rc;
1087 }
1088 
mpsc_setup_tx_desc(struct mpsc_port_info * pi,u32 count,u32 intr)1089 static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1090 {
1091 	struct mpsc_tx_desc *txre;
1092 
1093 	txre = (struct mpsc_tx_desc *)(pi->txr
1094 			+ (pi->txr_head * MPSC_TXRE_SIZE));
1095 
1096 	txre->bytecnt = cpu_to_be16(count);
1097 	txre->shadow = txre->bytecnt;
1098 	wmb();			/* ensure cmdstat is last field updated */
1099 	txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F
1100 			| SDMA_DESC_CMDSTAT_L
1101 			| ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
1102 	wmb();
1103 	dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1104 			DMA_BIDIRECTIONAL);
1105 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1106 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1107 		flush_dcache_range((ulong)txre,
1108 				(ulong)txre + MPSC_TXRE_SIZE);
1109 #endif
1110 }
1111 
mpsc_copy_tx_data(struct mpsc_port_info * pi)1112 static void mpsc_copy_tx_data(struct mpsc_port_info *pi)
1113 {
1114 	struct circ_buf *xmit = &pi->port.state->xmit;
1115 	u8 *bp;
1116 	u32 i;
1117 
1118 	/* Make sure the desc ring isn't full */
1119 	while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES)
1120 			< (MPSC_TXR_ENTRIES - 1)) {
1121 		if (pi->port.x_char) {
1122 			/*
1123 			 * Ideally, we should use the TCS field in
1124 			 * CHR_1 to put the x_char out immediately but
1125 			 * errata prevents us from being able to read
1126 			 * CHR_2 to know that its safe to write to
1127 			 * CHR_1.  Instead, just put it in-band with
1128 			 * all the other Tx data.
1129 			 */
1130 			bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1131 			*bp = pi->port.x_char;
1132 			pi->port.x_char = 0;
1133 			i = 1;
1134 		} else if (!uart_circ_empty(xmit)
1135 				&& !uart_tx_stopped(&pi->port)) {
1136 			i = min((u32)MPSC_TXBE_SIZE,
1137 				(u32)uart_circ_chars_pending(xmit));
1138 			i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail,
1139 				UART_XMIT_SIZE));
1140 			bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1141 			memcpy(bp, &xmit->buf[xmit->tail], i);
1142 			xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
1143 
1144 			if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1145 				uart_write_wakeup(&pi->port);
1146 		} else { /* All tx data copied into ring bufs */
1147 			return;
1148 		}
1149 
1150 		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1151 				DMA_BIDIRECTIONAL);
1152 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1153 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1154 			flush_dcache_range((ulong)bp,
1155 					(ulong)bp + MPSC_TXBE_SIZE);
1156 #endif
1157 		mpsc_setup_tx_desc(pi, i, 1);
1158 
1159 		/* Advance to next descriptor */
1160 		pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1161 	}
1162 }
1163 
mpsc_tx_intr(struct mpsc_port_info * pi)1164 static int mpsc_tx_intr(struct mpsc_port_info *pi)
1165 {
1166 	struct mpsc_tx_desc *txre;
1167 	int rc = 0;
1168 	unsigned long iflags;
1169 
1170 	spin_lock_irqsave(&pi->tx_lock, iflags);
1171 
1172 	if (!mpsc_sdma_tx_active(pi)) {
1173 		txre = (struct mpsc_tx_desc *)(pi->txr
1174 				+ (pi->txr_tail * MPSC_TXRE_SIZE));
1175 
1176 		dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1177 				DMA_FROM_DEVICE);
1178 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1179 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1180 			invalidate_dcache_range((ulong)txre,
1181 					(ulong)txre + MPSC_TXRE_SIZE);
1182 #endif
1183 
1184 		while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
1185 			rc = 1;
1186 			pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
1187 			pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
1188 
1189 			/* If no more data to tx, fall out of loop */
1190 			if (pi->txr_head == pi->txr_tail)
1191 				break;
1192 
1193 			txre = (struct mpsc_tx_desc *)(pi->txr
1194 					+ (pi->txr_tail * MPSC_TXRE_SIZE));
1195 			dma_cache_sync(pi->port.dev, (void *)txre,
1196 					MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1197 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1198 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1199 				invalidate_dcache_range((ulong)txre,
1200 						(ulong)txre + MPSC_TXRE_SIZE);
1201 #endif
1202 		}
1203 
1204 		mpsc_copy_tx_data(pi);
1205 		mpsc_sdma_start_tx(pi);	/* start next desc if ready */
1206 	}
1207 
1208 	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1209 	return rc;
1210 }
1211 
1212 /*
1213  * This is the driver's interrupt handler.  To avoid a race, we first clear
1214  * the interrupt, then handle any completed Rx/Tx descriptors.  When done
1215  * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1216  */
mpsc_sdma_intr(int irq,void * dev_id)1217 static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
1218 {
1219 	struct mpsc_port_info *pi = dev_id;
1220 	ulong iflags;
1221 	int rc = IRQ_NONE;
1222 
1223 	pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1224 
1225 	spin_lock_irqsave(&pi->port.lock, iflags);
1226 	mpsc_sdma_intr_ack(pi);
1227 	if (mpsc_rx_intr(pi, &iflags))
1228 		rc = IRQ_HANDLED;
1229 	if (mpsc_tx_intr(pi))
1230 		rc = IRQ_HANDLED;
1231 	spin_unlock_irqrestore(&pi->port.lock, iflags);
1232 
1233 	pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1234 	return rc;
1235 }
1236 
1237 /*
1238  ******************************************************************************
1239  *
1240  * serial_core.c Interface routines
1241  *
1242  ******************************************************************************
1243  */
mpsc_tx_empty(struct uart_port * port)1244 static uint mpsc_tx_empty(struct uart_port *port)
1245 {
1246 	struct mpsc_port_info *pi =
1247 		container_of(port, struct mpsc_port_info, port);
1248 	ulong iflags;
1249 	uint rc;
1250 
1251 	spin_lock_irqsave(&pi->port.lock, iflags);
1252 	rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1253 	spin_unlock_irqrestore(&pi->port.lock, iflags);
1254 
1255 	return rc;
1256 }
1257 
mpsc_set_mctrl(struct uart_port * port,uint mctrl)1258 static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1259 {
1260 	/* Have no way to set modem control lines AFAICT */
1261 }
1262 
mpsc_get_mctrl(struct uart_port * port)1263 static uint mpsc_get_mctrl(struct uart_port *port)
1264 {
1265 	struct mpsc_port_info *pi =
1266 		container_of(port, struct mpsc_port_info, port);
1267 	u32 mflags, status;
1268 
1269 	status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
1270 		: readl(pi->mpsc_base + MPSC_CHR_10);
1271 
1272 	mflags = 0;
1273 	if (status & 0x1)
1274 		mflags |= TIOCM_CTS;
1275 	if (status & 0x2)
1276 		mflags |= TIOCM_CAR;
1277 
1278 	return mflags | TIOCM_DSR;	/* No way to tell if DSR asserted */
1279 }
1280 
mpsc_stop_tx(struct uart_port * port)1281 static void mpsc_stop_tx(struct uart_port *port)
1282 {
1283 	struct mpsc_port_info *pi =
1284 		container_of(port, struct mpsc_port_info, port);
1285 
1286 	pr_debug("mpsc_stop_tx[%d]\n", port->line);
1287 
1288 	mpsc_freeze(pi);
1289 }
1290 
mpsc_start_tx(struct uart_port * port)1291 static void mpsc_start_tx(struct uart_port *port)
1292 {
1293 	struct mpsc_port_info *pi =
1294 		container_of(port, struct mpsc_port_info, port);
1295 	unsigned long iflags;
1296 
1297 	spin_lock_irqsave(&pi->tx_lock, iflags);
1298 
1299 	mpsc_unfreeze(pi);
1300 	mpsc_copy_tx_data(pi);
1301 	mpsc_sdma_start_tx(pi);
1302 
1303 	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1304 
1305 	pr_debug("mpsc_start_tx[%d]\n", port->line);
1306 }
1307 
mpsc_start_rx(struct mpsc_port_info * pi)1308 static void mpsc_start_rx(struct mpsc_port_info *pi)
1309 {
1310 	pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1311 
1312 	if (pi->rcv_data) {
1313 		mpsc_enter_hunt(pi);
1314 		mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1315 	}
1316 }
1317 
mpsc_stop_rx(struct uart_port * port)1318 static void mpsc_stop_rx(struct uart_port *port)
1319 {
1320 	struct mpsc_port_info *pi =
1321 		container_of(port, struct mpsc_port_info, port);
1322 
1323 	pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1324 
1325 	if (pi->mirror_regs) {
1326 		writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA,
1327 				pi->mpsc_base + MPSC_CHR_2);
1328 		/* Erratum prevents reading CHR_2 so just delay for a while */
1329 		udelay(100);
1330 	} else {
1331 		writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA,
1332 				pi->mpsc_base + MPSC_CHR_2);
1333 
1334 		while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA)
1335 			udelay(10);
1336 	}
1337 
1338 	mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1339 }
1340 
mpsc_break_ctl(struct uart_port * port,int ctl)1341 static void mpsc_break_ctl(struct uart_port *port, int ctl)
1342 {
1343 	struct mpsc_port_info *pi =
1344 		container_of(port, struct mpsc_port_info, port);
1345 	ulong	flags;
1346 	u32	v;
1347 
1348 	v = ctl ? 0x00ff0000 : 0;
1349 
1350 	spin_lock_irqsave(&pi->port.lock, flags);
1351 	if (pi->mirror_regs)
1352 		pi->MPSC_CHR_1_m = v;
1353 	writel(v, pi->mpsc_base + MPSC_CHR_1);
1354 	spin_unlock_irqrestore(&pi->port.lock, flags);
1355 }
1356 
mpsc_startup(struct uart_port * port)1357 static int mpsc_startup(struct uart_port *port)
1358 {
1359 	struct mpsc_port_info *pi =
1360 		container_of(port, struct mpsc_port_info, port);
1361 	u32 flag = 0;
1362 	int rc;
1363 
1364 	pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1365 		port->line, pi->port.irq);
1366 
1367 	if ((rc = mpsc_make_ready(pi)) == 0) {
1368 		/* Setup IRQ handler */
1369 		mpsc_sdma_intr_ack(pi);
1370 
1371 		/* If irq's are shared, need to set flag */
1372 		if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
1373 			flag = IRQF_SHARED;
1374 
1375 		if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
1376 					"mpsc-sdma", pi))
1377 			printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
1378 					pi->port.irq);
1379 
1380 		mpsc_sdma_intr_unmask(pi, 0xf);
1381 		mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p
1382 					+ (pi->rxr_posn * MPSC_RXRE_SIZE)));
1383 	}
1384 
1385 	return rc;
1386 }
1387 
mpsc_shutdown(struct uart_port * port)1388 static void mpsc_shutdown(struct uart_port *port)
1389 {
1390 	struct mpsc_port_info *pi =
1391 		container_of(port, struct mpsc_port_info, port);
1392 
1393 	pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1394 
1395 	mpsc_sdma_stop(pi);
1396 	free_irq(pi->port.irq, pi);
1397 }
1398 
mpsc_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)1399 static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
1400 		 struct ktermios *old)
1401 {
1402 	struct mpsc_port_info *pi =
1403 		container_of(port, struct mpsc_port_info, port);
1404 	u32 baud;
1405 	ulong flags;
1406 	u32 chr_bits, stop_bits, par;
1407 
1408 	switch (termios->c_cflag & CSIZE) {
1409 	case CS5:
1410 		chr_bits = MPSC_MPCR_CL_5;
1411 		break;
1412 	case CS6:
1413 		chr_bits = MPSC_MPCR_CL_6;
1414 		break;
1415 	case CS7:
1416 		chr_bits = MPSC_MPCR_CL_7;
1417 		break;
1418 	case CS8:
1419 	default:
1420 		chr_bits = MPSC_MPCR_CL_8;
1421 		break;
1422 	}
1423 
1424 	if (termios->c_cflag & CSTOPB)
1425 		stop_bits = MPSC_MPCR_SBL_2;
1426 	else
1427 		stop_bits = MPSC_MPCR_SBL_1;
1428 
1429 	par = MPSC_CHR_2_PAR_EVEN;
1430 	if (termios->c_cflag & PARENB)
1431 		if (termios->c_cflag & PARODD)
1432 			par = MPSC_CHR_2_PAR_ODD;
1433 #ifdef	CMSPAR
1434 		if (termios->c_cflag & CMSPAR) {
1435 			if (termios->c_cflag & PARODD)
1436 				par = MPSC_CHR_2_PAR_MARK;
1437 			else
1438 				par = MPSC_CHR_2_PAR_SPACE;
1439 		}
1440 #endif
1441 
1442 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1443 
1444 	spin_lock_irqsave(&pi->port.lock, flags);
1445 
1446 	uart_update_timeout(port, termios->c_cflag, baud);
1447 
1448 	mpsc_set_char_length(pi, chr_bits);
1449 	mpsc_set_stop_bit_length(pi, stop_bits);
1450 	mpsc_set_parity(pi, par);
1451 	mpsc_set_baudrate(pi, baud);
1452 
1453 	/* Characters/events to read */
1454 	pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1455 
1456 	if (termios->c_iflag & INPCK)
1457 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
1458 			| SDMA_DESC_CMDSTAT_FR;
1459 
1460 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1461 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1462 
1463 	/* Characters/events to ignore */
1464 	pi->port.ignore_status_mask = 0;
1465 
1466 	if (termios->c_iflag & IGNPAR)
1467 		pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE
1468 			| SDMA_DESC_CMDSTAT_FR;
1469 
1470 	if (termios->c_iflag & IGNBRK) {
1471 		pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1472 
1473 		if (termios->c_iflag & IGNPAR)
1474 			pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1475 	}
1476 
1477 	if ((termios->c_cflag & CREAD)) {
1478 		if (!pi->rcv_data) {
1479 			pi->rcv_data = 1;
1480 			mpsc_start_rx(pi);
1481 		}
1482 	} else if (pi->rcv_data) {
1483 		mpsc_stop_rx(port);
1484 		pi->rcv_data = 0;
1485 	}
1486 
1487 	spin_unlock_irqrestore(&pi->port.lock, flags);
1488 }
1489 
mpsc_type(struct uart_port * port)1490 static const char *mpsc_type(struct uart_port *port)
1491 {
1492 	pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1493 	return MPSC_DRIVER_NAME;
1494 }
1495 
mpsc_request_port(struct uart_port * port)1496 static int mpsc_request_port(struct uart_port *port)
1497 {
1498 	/* Should make chip/platform specific call */
1499 	return 0;
1500 }
1501 
mpsc_release_port(struct uart_port * port)1502 static void mpsc_release_port(struct uart_port *port)
1503 {
1504 	struct mpsc_port_info *pi =
1505 		container_of(port, struct mpsc_port_info, port);
1506 
1507 	if (pi->ready) {
1508 		mpsc_uninit_rings(pi);
1509 		mpsc_free_ring_mem(pi);
1510 		pi->ready = 0;
1511 	}
1512 }
1513 
mpsc_config_port(struct uart_port * port,int flags)1514 static void mpsc_config_port(struct uart_port *port, int flags)
1515 {
1516 }
1517 
mpsc_verify_port(struct uart_port * port,struct serial_struct * ser)1518 static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1519 {
1520 	struct mpsc_port_info *pi =
1521 		container_of(port, struct mpsc_port_info, port);
1522 	int rc = 0;
1523 
1524 	pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1525 
1526 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1527 		rc = -EINVAL;
1528 	else if (pi->port.irq != ser->irq)
1529 		rc = -EINVAL;
1530 	else if (ser->io_type != SERIAL_IO_MEM)
1531 		rc = -EINVAL;
1532 	else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1533 		rc = -EINVAL;
1534 	else if ((void *)pi->port.mapbase != ser->iomem_base)
1535 		rc = -EINVAL;
1536 	else if (pi->port.iobase != ser->port)
1537 		rc = -EINVAL;
1538 	else if (ser->hub6 != 0)
1539 		rc = -EINVAL;
1540 
1541 	return rc;
1542 }
1543 #ifdef CONFIG_CONSOLE_POLL
1544 /* Serial polling routines for writing and reading from the uart while
1545  * in an interrupt or debug context.
1546  */
1547 
1548 static char poll_buf[2048];
1549 static int poll_ptr;
1550 static int poll_cnt;
1551 static void mpsc_put_poll_char(struct uart_port *port,
1552 							   unsigned char c);
1553 
mpsc_get_poll_char(struct uart_port * port)1554 static int mpsc_get_poll_char(struct uart_port *port)
1555 {
1556 	struct mpsc_port_info *pi =
1557 		container_of(port, struct mpsc_port_info, port);
1558 	struct mpsc_rx_desc *rxre;
1559 	u32	cmdstat, bytes_in, i;
1560 	u8	*bp;
1561 
1562 	if (!serial_polled)
1563 		serial_polled = 1;
1564 
1565 	pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1566 
1567 	if (poll_cnt) {
1568 		poll_cnt--;
1569 		return poll_buf[poll_ptr++];
1570 	}
1571 	poll_ptr = 0;
1572 	poll_cnt = 0;
1573 
1574 	while (poll_cnt == 0) {
1575 		rxre = (struct mpsc_rx_desc *)(pi->rxr +
1576 		       (pi->rxr_posn*MPSC_RXRE_SIZE));
1577 		dma_cache_sync(pi->port.dev, (void *)rxre,
1578 			       MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1579 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1580 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1581 			invalidate_dcache_range((ulong)rxre,
1582 			(ulong)rxre + MPSC_RXRE_SIZE);
1583 #endif
1584 		/*
1585 		 * Loop through Rx descriptors handling ones that have
1586 		 * been completed.
1587 		 */
1588 		while (poll_cnt == 0 &&
1589 		       !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1590 			 SDMA_DESC_CMDSTAT_O)){
1591 			bytes_in = be16_to_cpu(rxre->bytecnt);
1592 			bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1593 			dma_cache_sync(pi->port.dev, (void *) bp,
1594 				       MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1595 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1596 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1597 				invalidate_dcache_range((ulong)bp,
1598 					(ulong)bp + MPSC_RXBE_SIZE);
1599 #endif
1600 			if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1601 			 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1602 				!(cmdstat & pi->port.ignore_status_mask)) {
1603 				poll_buf[poll_cnt] = *bp;
1604 				poll_cnt++;
1605 			} else {
1606 				for (i = 0; i < bytes_in; i++) {
1607 					poll_buf[poll_cnt] = *bp++;
1608 					poll_cnt++;
1609 				}
1610 				pi->port.icount.rx += bytes_in;
1611 			}
1612 			rxre->bytecnt = cpu_to_be16(0);
1613 			wmb();
1614 			rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1615 						    SDMA_DESC_CMDSTAT_EI |
1616 						    SDMA_DESC_CMDSTAT_F |
1617 						    SDMA_DESC_CMDSTAT_L);
1618 			wmb();
1619 			dma_cache_sync(pi->port.dev, (void *)rxre,
1620 				       MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1621 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1622 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1623 				flush_dcache_range((ulong)rxre,
1624 					   (ulong)rxre + MPSC_RXRE_SIZE);
1625 #endif
1626 
1627 			/* Advance to next descriptor */
1628 			pi->rxr_posn = (pi->rxr_posn + 1) &
1629 				(MPSC_RXR_ENTRIES - 1);
1630 			rxre = (struct mpsc_rx_desc *)(pi->rxr +
1631 				       (pi->rxr_posn * MPSC_RXRE_SIZE));
1632 			dma_cache_sync(pi->port.dev, (void *)rxre,
1633 				       MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1634 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1635 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1636 				invalidate_dcache_range((ulong)rxre,
1637 						(ulong)rxre + MPSC_RXRE_SIZE);
1638 #endif
1639 		}
1640 
1641 		/* Restart rx engine, if its stopped */
1642 		if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1643 			mpsc_start_rx(pi);
1644 	}
1645 	if (poll_cnt) {
1646 		poll_cnt--;
1647 		return poll_buf[poll_ptr++];
1648 	}
1649 
1650 	return 0;
1651 }
1652 
1653 
mpsc_put_poll_char(struct uart_port * port,unsigned char c)1654 static void mpsc_put_poll_char(struct uart_port *port,
1655 			 unsigned char c)
1656 {
1657 	struct mpsc_port_info *pi =
1658 		container_of(port, struct mpsc_port_info, port);
1659 	u32 data;
1660 
1661 	data = readl(pi->mpsc_base + MPSC_MPCR);
1662 	writeb(c, pi->mpsc_base + MPSC_CHR_1);
1663 	mb();
1664 	data = readl(pi->mpsc_base + MPSC_CHR_2);
1665 	data |= MPSC_CHR_2_TTCS;
1666 	writel(data, pi->mpsc_base + MPSC_CHR_2);
1667 	mb();
1668 
1669 	while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1670 }
1671 #endif
1672 
1673 static struct uart_ops mpsc_pops = {
1674 	.tx_empty	= mpsc_tx_empty,
1675 	.set_mctrl	= mpsc_set_mctrl,
1676 	.get_mctrl	= mpsc_get_mctrl,
1677 	.stop_tx	= mpsc_stop_tx,
1678 	.start_tx	= mpsc_start_tx,
1679 	.stop_rx	= mpsc_stop_rx,
1680 	.break_ctl	= mpsc_break_ctl,
1681 	.startup	= mpsc_startup,
1682 	.shutdown	= mpsc_shutdown,
1683 	.set_termios	= mpsc_set_termios,
1684 	.type		= mpsc_type,
1685 	.release_port	= mpsc_release_port,
1686 	.request_port	= mpsc_request_port,
1687 	.config_port	= mpsc_config_port,
1688 	.verify_port	= mpsc_verify_port,
1689 #ifdef CONFIG_CONSOLE_POLL
1690 	.poll_get_char = mpsc_get_poll_char,
1691 	.poll_put_char = mpsc_put_poll_char,
1692 #endif
1693 };
1694 
1695 /*
1696  ******************************************************************************
1697  *
1698  * Console Interface Routines
1699  *
1700  ******************************************************************************
1701  */
1702 
1703 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
mpsc_console_write(struct console * co,const char * s,uint count)1704 static void mpsc_console_write(struct console *co, const char *s, uint count)
1705 {
1706 	struct mpsc_port_info *pi = &mpsc_ports[co->index];
1707 	u8 *bp, *dp, add_cr = 0;
1708 	int i;
1709 	unsigned long iflags;
1710 
1711 	spin_lock_irqsave(&pi->tx_lock, iflags);
1712 
1713 	while (pi->txr_head != pi->txr_tail) {
1714 		while (mpsc_sdma_tx_active(pi))
1715 			udelay(100);
1716 		mpsc_sdma_intr_ack(pi);
1717 		mpsc_tx_intr(pi);
1718 	}
1719 
1720 	while (mpsc_sdma_tx_active(pi))
1721 		udelay(100);
1722 
1723 	while (count > 0) {
1724 		bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1725 
1726 		for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1727 			if (count == 0)
1728 				break;
1729 
1730 			if (add_cr) {
1731 				*(dp++) = '\r';
1732 				add_cr = 0;
1733 			} else {
1734 				*(dp++) = *s;
1735 
1736 				if (*(s++) == '\n') { /* add '\r' after '\n' */
1737 					add_cr = 1;
1738 					count++;
1739 				}
1740 			}
1741 
1742 			count--;
1743 		}
1744 
1745 		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1746 				DMA_BIDIRECTIONAL);
1747 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1748 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1749 			flush_dcache_range((ulong)bp,
1750 					(ulong)bp + MPSC_TXBE_SIZE);
1751 #endif
1752 		mpsc_setup_tx_desc(pi, i, 0);
1753 		pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1754 		mpsc_sdma_start_tx(pi);
1755 
1756 		while (mpsc_sdma_tx_active(pi))
1757 			udelay(100);
1758 
1759 		pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1760 	}
1761 
1762 	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1763 }
1764 
mpsc_console_setup(struct console * co,char * options)1765 static int __init mpsc_console_setup(struct console *co, char *options)
1766 {
1767 	struct mpsc_port_info *pi;
1768 	int baud, bits, parity, flow;
1769 
1770 	pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1771 
1772 	if (co->index >= MPSC_NUM_CTLRS)
1773 		co->index = 0;
1774 
1775 	pi = &mpsc_ports[co->index];
1776 
1777 	baud = pi->default_baud;
1778 	bits = pi->default_bits;
1779 	parity = pi->default_parity;
1780 	flow = pi->default_flow;
1781 
1782 	if (!pi->port.ops)
1783 		return -ENODEV;
1784 
1785 	spin_lock_init(&pi->port.lock);	/* Temporary fix--copied from 8250.c */
1786 
1787 	if (options)
1788 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1789 
1790 	return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1791 }
1792 
1793 static struct console mpsc_console = {
1794 	.name	= MPSC_DEV_NAME,
1795 	.write	= mpsc_console_write,
1796 	.device	= uart_console_device,
1797 	.setup	= mpsc_console_setup,
1798 	.flags	= CON_PRINTBUFFER,
1799 	.index	= -1,
1800 	.data	= &mpsc_reg,
1801 };
1802 
mpsc_late_console_init(void)1803 static int __init mpsc_late_console_init(void)
1804 {
1805 	pr_debug("mpsc_late_console_init: Enter\n");
1806 
1807 	if (!(mpsc_console.flags & CON_ENABLED))
1808 		register_console(&mpsc_console);
1809 	return 0;
1810 }
1811 
1812 late_initcall(mpsc_late_console_init);
1813 
1814 #define MPSC_CONSOLE	&mpsc_console
1815 #else
1816 #define MPSC_CONSOLE	NULL
1817 #endif
1818 /*
1819  ******************************************************************************
1820  *
1821  * Dummy Platform Driver to extract & map shared register regions
1822  *
1823  ******************************************************************************
1824  */
mpsc_resource_err(char * s)1825 static void mpsc_resource_err(char *s)
1826 {
1827 	printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1828 }
1829 
mpsc_shared_map_regs(struct platform_device * pd)1830 static int mpsc_shared_map_regs(struct platform_device *pd)
1831 {
1832 	struct resource	*r;
1833 
1834 	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1835 					MPSC_ROUTING_BASE_ORDER))
1836 			&& request_mem_region(r->start,
1837 				MPSC_ROUTING_REG_BLOCK_SIZE,
1838 				"mpsc_routing_regs")) {
1839 		mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
1840 				MPSC_ROUTING_REG_BLOCK_SIZE);
1841 		mpsc_shared_regs.mpsc_routing_base_p = r->start;
1842 	} else {
1843 		mpsc_resource_err("MPSC routing base");
1844 		return -ENOMEM;
1845 	}
1846 
1847 	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1848 					MPSC_SDMA_INTR_BASE_ORDER))
1849 			&& request_mem_region(r->start,
1850 				MPSC_SDMA_INTR_REG_BLOCK_SIZE,
1851 				"sdma_intr_regs")) {
1852 		mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1853 			MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1854 		mpsc_shared_regs.sdma_intr_base_p = r->start;
1855 	} else {
1856 		iounmap(mpsc_shared_regs.mpsc_routing_base);
1857 		release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1858 				MPSC_ROUTING_REG_BLOCK_SIZE);
1859 		mpsc_resource_err("SDMA intr base");
1860 		return -ENOMEM;
1861 	}
1862 
1863 	return 0;
1864 }
1865 
mpsc_shared_unmap_regs(void)1866 static void mpsc_shared_unmap_regs(void)
1867 {
1868 	if (mpsc_shared_regs.mpsc_routing_base) {
1869 		iounmap(mpsc_shared_regs.mpsc_routing_base);
1870 		release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1871 				MPSC_ROUTING_REG_BLOCK_SIZE);
1872 	}
1873 	if (mpsc_shared_regs.sdma_intr_base) {
1874 		iounmap(mpsc_shared_regs.sdma_intr_base);
1875 		release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
1876 				MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1877 	}
1878 
1879 	mpsc_shared_regs.mpsc_routing_base = NULL;
1880 	mpsc_shared_regs.sdma_intr_base = NULL;
1881 
1882 	mpsc_shared_regs.mpsc_routing_base_p = 0;
1883 	mpsc_shared_regs.sdma_intr_base_p = 0;
1884 }
1885 
mpsc_shared_drv_probe(struct platform_device * dev)1886 static int mpsc_shared_drv_probe(struct platform_device *dev)
1887 {
1888 	struct mpsc_shared_pdata	*pdata;
1889 	int rc;
1890 
1891 	if (dev->id != 0)
1892 		return -ENODEV;
1893 
1894 	rc = mpsc_shared_map_regs(dev);
1895 	if (rc)
1896 		return rc;
1897 
1898 	pdata = dev_get_platdata(&dev->dev);
1899 
1900 	mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1901 	mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1902 	mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1903 	mpsc_shared_regs.SDMA_INTR_CAUSE_m = pdata->intr_cause_val;
1904 	mpsc_shared_regs.SDMA_INTR_MASK_m = pdata->intr_mask_val;
1905 
1906 	return 0;
1907 }
1908 
mpsc_shared_drv_remove(struct platform_device * dev)1909 static int mpsc_shared_drv_remove(struct platform_device *dev)
1910 {
1911 	if (dev->id != 0)
1912 		return -ENODEV;
1913 
1914 	mpsc_shared_unmap_regs();
1915 	mpsc_shared_regs.MPSC_MRR_m = 0;
1916 	mpsc_shared_regs.MPSC_RCRR_m = 0;
1917 	mpsc_shared_regs.MPSC_TCRR_m = 0;
1918 	mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1919 	mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1920 
1921 	return 0;
1922 }
1923 
1924 static struct platform_driver mpsc_shared_driver = {
1925 	.probe	= mpsc_shared_drv_probe,
1926 	.remove	= mpsc_shared_drv_remove,
1927 	.driver	= {
1928 		.name	= MPSC_SHARED_NAME,
1929 	},
1930 };
1931 
1932 /*
1933  ******************************************************************************
1934  *
1935  * Driver Interface Routines
1936  *
1937  ******************************************************************************
1938  */
1939 static struct uart_driver mpsc_reg = {
1940 	.owner		= THIS_MODULE,
1941 	.driver_name	= MPSC_DRIVER_NAME,
1942 	.dev_name	= MPSC_DEV_NAME,
1943 	.major		= MPSC_MAJOR,
1944 	.minor		= MPSC_MINOR_START,
1945 	.nr		= MPSC_NUM_CTLRS,
1946 	.cons		= MPSC_CONSOLE,
1947 };
1948 
mpsc_drv_map_regs(struct mpsc_port_info * pi,struct platform_device * pd)1949 static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
1950 		struct platform_device *pd)
1951 {
1952 	struct resource	*r;
1953 
1954 	if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER))
1955 			&& request_mem_region(r->start, MPSC_REG_BLOCK_SIZE,
1956 			"mpsc_regs")) {
1957 		pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1958 		pi->mpsc_base_p = r->start;
1959 	} else {
1960 		mpsc_resource_err("MPSC base");
1961 		goto err;
1962 	}
1963 
1964 	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1965 					MPSC_SDMA_BASE_ORDER))
1966 			&& request_mem_region(r->start,
1967 				MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1968 		pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1969 		pi->sdma_base_p = r->start;
1970 	} else {
1971 		mpsc_resource_err("SDMA base");
1972 		goto err;
1973 	}
1974 
1975 	if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
1976 			&& request_mem_region(r->start,
1977 				MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) {
1978 		pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1979 		pi->brg_base_p = r->start;
1980 	} else {
1981 		mpsc_resource_err("BRG base");
1982 		goto err;
1983 	}
1984 	return 0;
1985 
1986 err:
1987 	if (pi->sdma_base) {
1988 		iounmap(pi->sdma_base);
1989 		pi->sdma_base = NULL;
1990 	}
1991 	if (pi->mpsc_base) {
1992 		iounmap(pi->mpsc_base);
1993 		pi->mpsc_base = NULL;
1994 	}
1995 	return -ENOMEM;
1996 }
1997 
mpsc_drv_unmap_regs(struct mpsc_port_info * pi)1998 static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
1999 {
2000 	if (pi->mpsc_base) {
2001 		iounmap(pi->mpsc_base);
2002 		release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
2003 	}
2004 	if (pi->sdma_base) {
2005 		iounmap(pi->sdma_base);
2006 		release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
2007 	}
2008 	if (pi->brg_base) {
2009 		iounmap(pi->brg_base);
2010 		release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
2011 	}
2012 
2013 	pi->mpsc_base = NULL;
2014 	pi->sdma_base = NULL;
2015 	pi->brg_base = NULL;
2016 
2017 	pi->mpsc_base_p = 0;
2018 	pi->sdma_base_p = 0;
2019 	pi->brg_base_p = 0;
2020 }
2021 
mpsc_drv_get_platform_data(struct mpsc_port_info * pi,struct platform_device * pd,int num)2022 static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
2023 		struct platform_device *pd, int num)
2024 {
2025 	struct mpsc_pdata	*pdata;
2026 
2027 	pdata = dev_get_platdata(&pd->dev);
2028 
2029 	pi->port.uartclk = pdata->brg_clk_freq;
2030 	pi->port.iotype = UPIO_MEM;
2031 	pi->port.line = num;
2032 	pi->port.type = PORT_MPSC;
2033 	pi->port.fifosize = MPSC_TXBE_SIZE;
2034 	pi->port.membase = pi->mpsc_base;
2035 	pi->port.mapbase = (ulong)pi->mpsc_base;
2036 	pi->port.ops = &mpsc_pops;
2037 
2038 	pi->mirror_regs = pdata->mirror_regs;
2039 	pi->cache_mgmt = pdata->cache_mgmt;
2040 	pi->brg_can_tune = pdata->brg_can_tune;
2041 	pi->brg_clk_src = pdata->brg_clk_src;
2042 	pi->mpsc_max_idle = pdata->max_idle;
2043 	pi->default_baud = pdata->default_baud;
2044 	pi->default_bits = pdata->default_bits;
2045 	pi->default_parity = pdata->default_parity;
2046 	pi->default_flow = pdata->default_flow;
2047 
2048 	/* Initial values of mirrored regs */
2049 	pi->MPSC_CHR_1_m = pdata->chr_1_val;
2050 	pi->MPSC_CHR_2_m = pdata->chr_2_val;
2051 	pi->MPSC_CHR_10_m = pdata->chr_10_val;
2052 	pi->MPSC_MPCR_m = pdata->mpcr_val;
2053 	pi->BRG_BCR_m = pdata->bcr_val;
2054 
2055 	pi->shared_regs = &mpsc_shared_regs;
2056 
2057 	pi->port.irq = platform_get_irq(pd, 0);
2058 }
2059 
mpsc_drv_probe(struct platform_device * dev)2060 static int mpsc_drv_probe(struct platform_device *dev)
2061 {
2062 	struct mpsc_port_info *pi;
2063 	int rc;
2064 
2065 	dev_dbg(&dev->dev, "mpsc_drv_probe: Adding MPSC %d\n", dev->id);
2066 
2067 	if (dev->id >= MPSC_NUM_CTLRS)
2068 		return -ENODEV;
2069 
2070 	pi = &mpsc_ports[dev->id];
2071 
2072 	rc = mpsc_drv_map_regs(pi, dev);
2073 	if (rc)
2074 		return rc;
2075 
2076 	mpsc_drv_get_platform_data(pi, dev, dev->id);
2077 	pi->port.dev = &dev->dev;
2078 
2079 	rc = mpsc_make_ready(pi);
2080 	if (rc)
2081 		goto err_unmap;
2082 
2083 	spin_lock_init(&pi->tx_lock);
2084 	rc = uart_add_one_port(&mpsc_reg, &pi->port);
2085 	if (rc)
2086 		goto err_relport;
2087 
2088 	return 0;
2089 err_relport:
2090 	mpsc_release_port(&pi->port);
2091 err_unmap:
2092 	mpsc_drv_unmap_regs(pi);
2093 	return rc;
2094 }
2095 
2096 static struct platform_driver mpsc_driver = {
2097 	.probe	= mpsc_drv_probe,
2098 	.driver	= {
2099 		.name			= MPSC_CTLR_NAME,
2100 		.suppress_bind_attrs	= true,
2101 	},
2102 };
2103 
mpsc_drv_init(void)2104 static int __init mpsc_drv_init(void)
2105 {
2106 	int	rc;
2107 
2108 	printk(KERN_INFO "Serial: MPSC driver\n");
2109 
2110 	memset(mpsc_ports, 0, sizeof(mpsc_ports));
2111 	memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2112 
2113 	rc = uart_register_driver(&mpsc_reg);
2114 	if (rc)
2115 		return rc;
2116 
2117 	rc = platform_driver_register(&mpsc_shared_driver);
2118 	if (rc)
2119 		goto err_unreg_uart;
2120 
2121 	rc = platform_driver_register(&mpsc_driver);
2122 	if (rc)
2123 		goto err_unreg_plat;
2124 
2125 	return 0;
2126 err_unreg_plat:
2127 	platform_driver_unregister(&mpsc_shared_driver);
2128 err_unreg_uart:
2129 	uart_unregister_driver(&mpsc_reg);
2130 	return rc;
2131 }
2132 device_initcall(mpsc_drv_init);
2133 
2134 /*
2135 MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2136 MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
2137 MODULE_LICENSE("GPL");
2138 */
2139