• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3  * GT64260, MV64340, MV64360, GT96100, ... ).
4  *
5  * Author: Mark A. Greer <mgreer@mvista.com>
6  *
7  * Based on an old MPSC driver that was in the linuxppc tree.  It appears to
8  * have been created by Chris Zankel (formerly of MontaVista) but there
9  * is no proper Copyright so I'm not sure.  Apparently, parts were also
10  * taken from PPCBoot (now U-Boot).  Also based on drivers/serial/8250.c
11  * by Russell King.
12  *
13  * 2004 (c) MontaVista, Software, Inc.  This file is licensed under
14  * the terms of the GNU General Public License version 2.  This program
15  * is licensed "as is" without any warranty of any kind, whether express
16  * or implied.
17  */
18 /*
19  * The MPSC interface is much like a typical network controller's interface.
20  * That is, you set up separate rings of descriptors for transmitting and
21  * receiving data.  There is also a pool of buffers with (one buffer per
22  * descriptor) that incoming data are dma'd into or outgoing data are dma'd
23  * out of.
24  *
25  * The MPSC requires two other controllers to be able to work.  The Baud Rate
26  * Generator (BRG) provides a clock at programmable frequencies which determines
27  * the baud rate.  The Serial DMA Controller (SDMA) takes incoming data from the
28  * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29  * MPSC.  It is actually the SDMA interrupt that the driver uses to keep the
30  * transmit and receive "engines" going (i.e., indicate data has been
31  * transmitted or received).
32  *
33  * NOTES:
34  *
35  * 1) Some chips have an erratum where several regs cannot be
36  * read.  To work around that, we keep a local copy of those regs in
37  * 'mpsc_port_info'.
38  *
39  * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40  * accesses system mem with coherency enabled.  For that reason, the driver
41  * assumes that coherency for that ctlr has been disabled.  This means
42  * that when in a cache coherent system, the driver has to manually manage
43  * the data cache on the areas that it touches because the dma_* macro are
44  * basically no-ops.
45  *
46  * 3) There is an erratum (on PPC) where you can't use the instruction to do
47  * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48  * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
49  *
50  * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
51  */
52 
53 
54 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
55 #define SUPPORT_SYSRQ
56 #endif
57 
58 #include <linux/tty.h>
59 #include <linux/tty_flip.h>
60 #include <linux/ioport.h>
61 #include <linux/init.h>
62 #include <linux/console.h>
63 #include <linux/sysrq.h>
64 #include <linux/serial.h>
65 #include <linux/serial_core.h>
66 #include <linux/delay.h>
67 #include <linux/device.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/mv643xx.h>
70 #include <linux/platform_device.h>
71 #include <linux/gfp.h>
72 
73 #include <asm/io.h>
74 #include <asm/irq.h>
75 
76 #define	MPSC_NUM_CTLRS		2
77 
78 /*
79  * Descriptors and buffers must be cache line aligned.
80  * Buffers lengths must be multiple of cache line size.
81  * Number of Tx & Rx descriptors must be powers of 2.
82  */
83 #define	MPSC_RXR_ENTRIES	32
84 #define	MPSC_RXRE_SIZE		dma_get_cache_alignment()
85 #define	MPSC_RXR_SIZE		(MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
86 #define	MPSC_RXBE_SIZE		dma_get_cache_alignment()
87 #define	MPSC_RXB_SIZE		(MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
88 
89 #define	MPSC_TXR_ENTRIES	32
90 #define	MPSC_TXRE_SIZE		dma_get_cache_alignment()
91 #define	MPSC_TXR_SIZE		(MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
92 #define	MPSC_TXBE_SIZE		dma_get_cache_alignment()
93 #define	MPSC_TXB_SIZE		(MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
94 
95 #define	MPSC_DMA_ALLOC_SIZE	(MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
96 		+ MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
97 
98 /* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
99 struct mpsc_rx_desc {
100 	u16 bufsize;
101 	u16 bytecnt;
102 	u32 cmdstat;
103 	u32 link;
104 	u32 buf_ptr;
105 } __attribute((packed));
106 
107 struct mpsc_tx_desc {
108 	u16 bytecnt;
109 	u16 shadow;
110 	u32 cmdstat;
111 	u32 link;
112 	u32 buf_ptr;
113 } __attribute((packed));
114 
115 /*
116  * Some regs that have the erratum that you can't read them are are shared
117  * between the two MPSC controllers.  This struct contains those shared regs.
118  */
119 struct mpsc_shared_regs {
120 	phys_addr_t mpsc_routing_base_p;
121 	phys_addr_t sdma_intr_base_p;
122 
123 	void __iomem *mpsc_routing_base;
124 	void __iomem *sdma_intr_base;
125 
126 	u32 MPSC_MRR_m;
127 	u32 MPSC_RCRR_m;
128 	u32 MPSC_TCRR_m;
129 	u32 SDMA_INTR_CAUSE_m;
130 	u32 SDMA_INTR_MASK_m;
131 };
132 
133 /* The main driver data structure */
134 struct mpsc_port_info {
135 	struct uart_port port;	/* Overlay uart_port structure */
136 
137 	/* Internal driver state for this ctlr */
138 	u8 ready;
139 	u8 rcv_data;
140 	tcflag_t c_iflag;	/* save termios->c_iflag */
141 	tcflag_t c_cflag;	/* save termios->c_cflag */
142 
143 	/* Info passed in from platform */
144 	u8 mirror_regs;		/* Need to mirror regs? */
145 	u8 cache_mgmt;		/* Need manual cache mgmt? */
146 	u8 brg_can_tune;	/* BRG has baud tuning? */
147 	u32 brg_clk_src;
148 	u16 mpsc_max_idle;
149 	int default_baud;
150 	int default_bits;
151 	int default_parity;
152 	int default_flow;
153 
154 	/* Physical addresses of various blocks of registers (from platform) */
155 	phys_addr_t mpsc_base_p;
156 	phys_addr_t sdma_base_p;
157 	phys_addr_t brg_base_p;
158 
159 	/* Virtual addresses of various blocks of registers (from platform) */
160 	void __iomem *mpsc_base;
161 	void __iomem *sdma_base;
162 	void __iomem *brg_base;
163 
164 	/* Descriptor ring and buffer allocations */
165 	void *dma_region;
166 	dma_addr_t dma_region_p;
167 
168 	dma_addr_t rxr;		/* Rx descriptor ring */
169 	dma_addr_t rxr_p;	/* Phys addr of rxr */
170 	u8 *rxb;		/* Rx Ring I/O buf */
171 	u8 *rxb_p;		/* Phys addr of rxb */
172 	u32 rxr_posn;		/* First desc w/ Rx data */
173 
174 	dma_addr_t txr;		/* Tx descriptor ring */
175 	dma_addr_t txr_p;	/* Phys addr of txr */
176 	u8 *txb;		/* Tx Ring I/O buf */
177 	u8 *txb_p;		/* Phys addr of txb */
178 	int txr_head;		/* Where new data goes */
179 	int txr_tail;		/* Where sent data comes off */
180 	spinlock_t tx_lock;	/* transmit lock */
181 
182 	/* Mirrored values of regs we can't read (if 'mirror_regs' set) */
183 	u32 MPSC_MPCR_m;
184 	u32 MPSC_CHR_1_m;
185 	u32 MPSC_CHR_2_m;
186 	u32 MPSC_CHR_10_m;
187 	u32 BRG_BCR_m;
188 	struct mpsc_shared_regs *shared_regs;
189 };
190 
191 /* Hooks to platform-specific code */
192 int mpsc_platform_register_driver(void);
193 void mpsc_platform_unregister_driver(void);
194 
195 /* Hooks back in to mpsc common to be called by platform-specific code */
196 struct mpsc_port_info *mpsc_device_probe(int index);
197 struct mpsc_port_info *mpsc_device_remove(int index);
198 
199 /* Main MPSC Configuration Register Offsets */
200 #define	MPSC_MMCRL			0x0000
201 #define	MPSC_MMCRH			0x0004
202 #define	MPSC_MPCR			0x0008
203 #define	MPSC_CHR_1			0x000c
204 #define	MPSC_CHR_2			0x0010
205 #define	MPSC_CHR_3			0x0014
206 #define	MPSC_CHR_4			0x0018
207 #define	MPSC_CHR_5			0x001c
208 #define	MPSC_CHR_6			0x0020
209 #define	MPSC_CHR_7			0x0024
210 #define	MPSC_CHR_8			0x0028
211 #define	MPSC_CHR_9			0x002c
212 #define	MPSC_CHR_10			0x0030
213 #define	MPSC_CHR_11			0x0034
214 
215 #define	MPSC_MPCR_FRZ			(1 << 9)
216 #define	MPSC_MPCR_CL_5			0
217 #define	MPSC_MPCR_CL_6			1
218 #define	MPSC_MPCR_CL_7			2
219 #define	MPSC_MPCR_CL_8			3
220 #define	MPSC_MPCR_SBL_1			0
221 #define	MPSC_MPCR_SBL_2			1
222 
223 #define	MPSC_CHR_2_TEV			(1<<1)
224 #define	MPSC_CHR_2_TA			(1<<7)
225 #define	MPSC_CHR_2_TTCS			(1<<9)
226 #define	MPSC_CHR_2_REV			(1<<17)
227 #define	MPSC_CHR_2_RA			(1<<23)
228 #define	MPSC_CHR_2_CRD			(1<<25)
229 #define	MPSC_CHR_2_EH			(1<<31)
230 #define	MPSC_CHR_2_PAR_ODD		0
231 #define	MPSC_CHR_2_PAR_SPACE		1
232 #define	MPSC_CHR_2_PAR_EVEN		2
233 #define	MPSC_CHR_2_PAR_MARK		3
234 
235 /* MPSC Signal Routing */
236 #define	MPSC_MRR			0x0000
237 #define	MPSC_RCRR			0x0004
238 #define	MPSC_TCRR			0x0008
239 
240 /* Serial DMA Controller Interface Registers */
241 #define	SDMA_SDC			0x0000
242 #define	SDMA_SDCM			0x0008
243 #define	SDMA_RX_DESC			0x0800
244 #define	SDMA_RX_BUF_PTR			0x0808
245 #define	SDMA_SCRDP			0x0810
246 #define	SDMA_TX_DESC			0x0c00
247 #define	SDMA_SCTDP			0x0c10
248 #define	SDMA_SFTDP			0x0c14
249 
250 #define	SDMA_DESC_CMDSTAT_PE		(1<<0)
251 #define	SDMA_DESC_CMDSTAT_CDL		(1<<1)
252 #define	SDMA_DESC_CMDSTAT_FR		(1<<3)
253 #define	SDMA_DESC_CMDSTAT_OR		(1<<6)
254 #define	SDMA_DESC_CMDSTAT_BR		(1<<9)
255 #define	SDMA_DESC_CMDSTAT_MI		(1<<10)
256 #define	SDMA_DESC_CMDSTAT_A		(1<<11)
257 #define	SDMA_DESC_CMDSTAT_AM		(1<<12)
258 #define	SDMA_DESC_CMDSTAT_CT		(1<<13)
259 #define	SDMA_DESC_CMDSTAT_C		(1<<14)
260 #define	SDMA_DESC_CMDSTAT_ES		(1<<15)
261 #define	SDMA_DESC_CMDSTAT_L		(1<<16)
262 #define	SDMA_DESC_CMDSTAT_F		(1<<17)
263 #define	SDMA_DESC_CMDSTAT_P		(1<<18)
264 #define	SDMA_DESC_CMDSTAT_EI		(1<<23)
265 #define	SDMA_DESC_CMDSTAT_O		(1<<31)
266 
267 #define SDMA_DESC_DFLT			(SDMA_DESC_CMDSTAT_O \
268 		| SDMA_DESC_CMDSTAT_EI)
269 
270 #define	SDMA_SDC_RFT			(1<<0)
271 #define	SDMA_SDC_SFM			(1<<1)
272 #define	SDMA_SDC_BLMR			(1<<6)
273 #define	SDMA_SDC_BLMT			(1<<7)
274 #define	SDMA_SDC_POVR			(1<<8)
275 #define	SDMA_SDC_RIFB			(1<<9)
276 
277 #define	SDMA_SDCM_ERD			(1<<7)
278 #define	SDMA_SDCM_AR			(1<<15)
279 #define	SDMA_SDCM_STD			(1<<16)
280 #define	SDMA_SDCM_TXD			(1<<23)
281 #define	SDMA_SDCM_AT			(1<<31)
282 
283 #define	SDMA_0_CAUSE_RXBUF		(1<<0)
284 #define	SDMA_0_CAUSE_RXERR		(1<<1)
285 #define	SDMA_0_CAUSE_TXBUF		(1<<2)
286 #define	SDMA_0_CAUSE_TXEND		(1<<3)
287 #define	SDMA_1_CAUSE_RXBUF		(1<<8)
288 #define	SDMA_1_CAUSE_RXERR		(1<<9)
289 #define	SDMA_1_CAUSE_TXBUF		(1<<10)
290 #define	SDMA_1_CAUSE_TXEND		(1<<11)
291 
292 #define	SDMA_CAUSE_RX_MASK	(SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
293 		| SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
294 #define	SDMA_CAUSE_TX_MASK	(SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
295 		| SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
296 
297 /* SDMA Interrupt registers */
298 #define	SDMA_INTR_CAUSE			0x0000
299 #define	SDMA_INTR_MASK			0x0080
300 
301 /* Baud Rate Generator Interface Registers */
302 #define	BRG_BCR				0x0000
303 #define	BRG_BTR				0x0004
304 
305 /*
306  * Define how this driver is known to the outside (we've been assigned a
307  * range on the "Low-density serial ports" major).
308  */
309 #define MPSC_MAJOR			204
310 #define MPSC_MINOR_START		44
311 #define	MPSC_DRIVER_NAME		"MPSC"
312 #define	MPSC_DEV_NAME			"ttyMM"
313 #define	MPSC_VERSION			"1.00"
314 
315 static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
316 static struct mpsc_shared_regs mpsc_shared_regs;
317 static struct uart_driver mpsc_reg;
318 
319 static void mpsc_start_rx(struct mpsc_port_info *pi);
320 static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
321 static void mpsc_release_port(struct uart_port *port);
322 /*
323  ******************************************************************************
324  *
325  * Baud Rate Generator Routines (BRG)
326  *
327  ******************************************************************************
328  */
mpsc_brg_init(struct mpsc_port_info * pi,u32 clk_src)329 static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
330 {
331 	u32	v;
332 
333 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
334 	v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
335 
336 	if (pi->brg_can_tune)
337 		v &= ~(1 << 25);
338 
339 	if (pi->mirror_regs)
340 		pi->BRG_BCR_m = v;
341 	writel(v, pi->brg_base + BRG_BCR);
342 
343 	writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
344 		pi->brg_base + BRG_BTR);
345 }
346 
mpsc_brg_enable(struct mpsc_port_info * pi)347 static void mpsc_brg_enable(struct mpsc_port_info *pi)
348 {
349 	u32	v;
350 
351 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
352 	v |= (1 << 16);
353 
354 	if (pi->mirror_regs)
355 		pi->BRG_BCR_m = v;
356 	writel(v, pi->brg_base + BRG_BCR);
357 }
358 
mpsc_brg_disable(struct mpsc_port_info * pi)359 static void mpsc_brg_disable(struct mpsc_port_info *pi)
360 {
361 	u32	v;
362 
363 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
364 	v &= ~(1 << 16);
365 
366 	if (pi->mirror_regs)
367 		pi->BRG_BCR_m = v;
368 	writel(v, pi->brg_base + BRG_BCR);
369 }
370 
371 /*
372  * To set the baud, we adjust the CDV field in the BRG_BCR reg.
373  * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
374  * However, the input clock is divided by 16 in the MPSC b/c of how
375  * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
376  * calculation by 16 to account for that.  So the real calculation
377  * that accounts for the way the mpsc is set up is:
378  * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
379  */
mpsc_set_baudrate(struct mpsc_port_info * pi,u32 baud)380 static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
381 {
382 	u32	cdv = (pi->port.uartclk / (baud << 5)) - 1;
383 	u32	v;
384 
385 	mpsc_brg_disable(pi);
386 	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
387 	v = (v & 0xffff0000) | (cdv & 0xffff);
388 
389 	if (pi->mirror_regs)
390 		pi->BRG_BCR_m = v;
391 	writel(v, pi->brg_base + BRG_BCR);
392 	mpsc_brg_enable(pi);
393 }
394 
395 /*
396  ******************************************************************************
397  *
398  * Serial DMA Routines (SDMA)
399  *
400  ******************************************************************************
401  */
402 
mpsc_sdma_burstsize(struct mpsc_port_info * pi,u32 burst_size)403 static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
404 {
405 	u32	v;
406 
407 	pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
408 			pi->port.line, burst_size);
409 
410 	burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
411 
412 	if (burst_size < 2)
413 		v = 0x0;	/* 1 64-bit word */
414 	else if (burst_size < 4)
415 		v = 0x1;	/* 2 64-bit words */
416 	else if (burst_size < 8)
417 		v = 0x2;	/* 4 64-bit words */
418 	else
419 		v = 0x3;	/* 8 64-bit words */
420 
421 	writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
422 		pi->sdma_base + SDMA_SDC);
423 }
424 
mpsc_sdma_init(struct mpsc_port_info * pi,u32 burst_size)425 static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
426 {
427 	pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
428 		burst_size);
429 
430 	writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
431 		pi->sdma_base + SDMA_SDC);
432 	mpsc_sdma_burstsize(pi, burst_size);
433 }
434 
mpsc_sdma_intr_mask(struct mpsc_port_info * pi,u32 mask)435 static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
436 {
437 	u32	old, v;
438 
439 	pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
440 
441 	old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
442 		readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
443 
444 	mask &= 0xf;
445 	if (pi->port.line)
446 		mask <<= 8;
447 	v &= ~mask;
448 
449 	if (pi->mirror_regs)
450 		pi->shared_regs->SDMA_INTR_MASK_m = v;
451 	writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
452 
453 	if (pi->port.line)
454 		old >>= 8;
455 	return old & 0xf;
456 }
457 
mpsc_sdma_intr_unmask(struct mpsc_port_info * pi,u32 mask)458 static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
459 {
460 	u32	v;
461 
462 	pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
463 
464 	v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m
465 		: readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
466 
467 	mask &= 0xf;
468 	if (pi->port.line)
469 		mask <<= 8;
470 	v |= mask;
471 
472 	if (pi->mirror_regs)
473 		pi->shared_regs->SDMA_INTR_MASK_m = v;
474 	writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
475 }
476 
mpsc_sdma_intr_ack(struct mpsc_port_info * pi)477 static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
478 {
479 	pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
480 
481 	if (pi->mirror_regs)
482 		pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
483 	writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE
484 			+ pi->port.line);
485 }
486 
mpsc_sdma_set_rx_ring(struct mpsc_port_info * pi,struct mpsc_rx_desc * rxre_p)487 static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi,
488 		struct mpsc_rx_desc *rxre_p)
489 {
490 	pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
491 		pi->port.line, (u32)rxre_p);
492 
493 	writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
494 }
495 
mpsc_sdma_set_tx_ring(struct mpsc_port_info * pi,struct mpsc_tx_desc * txre_p)496 static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi,
497 		struct mpsc_tx_desc *txre_p)
498 {
499 	writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
500 	writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
501 }
502 
mpsc_sdma_cmd(struct mpsc_port_info * pi,u32 val)503 static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
504 {
505 	u32	v;
506 
507 	v = readl(pi->sdma_base + SDMA_SDCM);
508 	if (val)
509 		v |= val;
510 	else
511 		v = 0;
512 	wmb();
513 	writel(v, pi->sdma_base + SDMA_SDCM);
514 	wmb();
515 }
516 
mpsc_sdma_tx_active(struct mpsc_port_info * pi)517 static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi)
518 {
519 	return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
520 }
521 
mpsc_sdma_start_tx(struct mpsc_port_info * pi)522 static void mpsc_sdma_start_tx(struct mpsc_port_info *pi)
523 {
524 	struct mpsc_tx_desc *txre, *txre_p;
525 
526 	/* If tx isn't running & there's a desc ready to go, start it */
527 	if (!mpsc_sdma_tx_active(pi)) {
528 		txre = (struct mpsc_tx_desc *)(pi->txr
529 				+ (pi->txr_tail * MPSC_TXRE_SIZE));
530 		dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
531 				DMA_FROM_DEVICE);
532 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
533 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
534 			invalidate_dcache_range((ulong)txre,
535 					(ulong)txre + MPSC_TXRE_SIZE);
536 #endif
537 
538 		if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
539 			txre_p = (struct mpsc_tx_desc *)
540 				(pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE));
541 
542 			mpsc_sdma_set_tx_ring(pi, txre_p);
543 			mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
544 		}
545 	}
546 }
547 
mpsc_sdma_stop(struct mpsc_port_info * pi)548 static void mpsc_sdma_stop(struct mpsc_port_info *pi)
549 {
550 	pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
551 
552 	/* Abort any SDMA transfers */
553 	mpsc_sdma_cmd(pi, 0);
554 	mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
555 
556 	/* Clear the SDMA current and first TX and RX pointers */
557 	mpsc_sdma_set_tx_ring(pi, NULL);
558 	mpsc_sdma_set_rx_ring(pi, NULL);
559 
560 	/* Disable interrupts */
561 	mpsc_sdma_intr_mask(pi, 0xf);
562 	mpsc_sdma_intr_ack(pi);
563 }
564 
565 /*
566  ******************************************************************************
567  *
568  * Multi-Protocol Serial Controller Routines (MPSC)
569  *
570  ******************************************************************************
571  */
572 
mpsc_hw_init(struct mpsc_port_info * pi)573 static void mpsc_hw_init(struct mpsc_port_info *pi)
574 {
575 	u32	v;
576 
577 	pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
578 
579 	/* Set up clock routing */
580 	if (pi->mirror_regs) {
581 		v = pi->shared_regs->MPSC_MRR_m;
582 		v &= ~0x1c7;
583 		pi->shared_regs->MPSC_MRR_m = v;
584 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
585 
586 		v = pi->shared_regs->MPSC_RCRR_m;
587 		v = (v & ~0xf0f) | 0x100;
588 		pi->shared_regs->MPSC_RCRR_m = v;
589 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
590 
591 		v = pi->shared_regs->MPSC_TCRR_m;
592 		v = (v & ~0xf0f) | 0x100;
593 		pi->shared_regs->MPSC_TCRR_m = v;
594 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
595 	} else {
596 		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
597 		v &= ~0x1c7;
598 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
599 
600 		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
601 		v = (v & ~0xf0f) | 0x100;
602 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
603 
604 		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
605 		v = (v & ~0xf0f) | 0x100;
606 		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
607 	}
608 
609 	/* Put MPSC in UART mode & enabel Tx/Rx egines */
610 	writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
611 
612 	/* No preamble, 16x divider, low-latency, */
613 	writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
614 	mpsc_set_baudrate(pi, pi->default_baud);
615 
616 	if (pi->mirror_regs) {
617 		pi->MPSC_CHR_1_m = 0;
618 		pi->MPSC_CHR_2_m = 0;
619 	}
620 	writel(0, pi->mpsc_base + MPSC_CHR_1);
621 	writel(0, pi->mpsc_base + MPSC_CHR_2);
622 	writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
623 	writel(0, pi->mpsc_base + MPSC_CHR_4);
624 	writel(0, pi->mpsc_base + MPSC_CHR_5);
625 	writel(0, pi->mpsc_base + MPSC_CHR_6);
626 	writel(0, pi->mpsc_base + MPSC_CHR_7);
627 	writel(0, pi->mpsc_base + MPSC_CHR_8);
628 	writel(0, pi->mpsc_base + MPSC_CHR_9);
629 	writel(0, pi->mpsc_base + MPSC_CHR_10);
630 }
631 
mpsc_enter_hunt(struct mpsc_port_info * pi)632 static void mpsc_enter_hunt(struct mpsc_port_info *pi)
633 {
634 	pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
635 
636 	if (pi->mirror_regs) {
637 		writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
638 			pi->mpsc_base + MPSC_CHR_2);
639 		/* Erratum prevents reading CHR_2 so just delay for a while */
640 		udelay(100);
641 	} else {
642 		writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
643 				pi->mpsc_base + MPSC_CHR_2);
644 
645 		while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
646 			udelay(10);
647 	}
648 }
649 
mpsc_freeze(struct mpsc_port_info * pi)650 static void mpsc_freeze(struct mpsc_port_info *pi)
651 {
652 	u32	v;
653 
654 	pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
655 
656 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
657 		readl(pi->mpsc_base + MPSC_MPCR);
658 	v |= MPSC_MPCR_FRZ;
659 
660 	if (pi->mirror_regs)
661 		pi->MPSC_MPCR_m = v;
662 	writel(v, pi->mpsc_base + MPSC_MPCR);
663 }
664 
mpsc_unfreeze(struct mpsc_port_info * pi)665 static void mpsc_unfreeze(struct mpsc_port_info *pi)
666 {
667 	u32	v;
668 
669 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
670 		readl(pi->mpsc_base + MPSC_MPCR);
671 	v &= ~MPSC_MPCR_FRZ;
672 
673 	if (pi->mirror_regs)
674 		pi->MPSC_MPCR_m = v;
675 	writel(v, pi->mpsc_base + MPSC_MPCR);
676 
677 	pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
678 }
679 
mpsc_set_char_length(struct mpsc_port_info * pi,u32 len)680 static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
681 {
682 	u32	v;
683 
684 	pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
685 
686 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
687 		readl(pi->mpsc_base + MPSC_MPCR);
688 	v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
689 
690 	if (pi->mirror_regs)
691 		pi->MPSC_MPCR_m = v;
692 	writel(v, pi->mpsc_base + MPSC_MPCR);
693 }
694 
mpsc_set_stop_bit_length(struct mpsc_port_info * pi,u32 len)695 static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
696 {
697 	u32	v;
698 
699 	pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
700 		pi->port.line, len);
701 
702 	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
703 		readl(pi->mpsc_base + MPSC_MPCR);
704 
705 	v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
706 
707 	if (pi->mirror_regs)
708 		pi->MPSC_MPCR_m = v;
709 	writel(v, pi->mpsc_base + MPSC_MPCR);
710 }
711 
mpsc_set_parity(struct mpsc_port_info * pi,u32 p)712 static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
713 {
714 	u32	v;
715 
716 	pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
717 
718 	v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
719 		readl(pi->mpsc_base + MPSC_CHR_2);
720 
721 	p &= 0x3;
722 	v = (v & ~0xc000c) | (p << 18) | (p << 2);
723 
724 	if (pi->mirror_regs)
725 		pi->MPSC_CHR_2_m = v;
726 	writel(v, pi->mpsc_base + MPSC_CHR_2);
727 }
728 
729 /*
730  ******************************************************************************
731  *
732  * Driver Init Routines
733  *
734  ******************************************************************************
735  */
736 
mpsc_init_hw(struct mpsc_port_info * pi)737 static void mpsc_init_hw(struct mpsc_port_info *pi)
738 {
739 	pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
740 
741 	mpsc_brg_init(pi, pi->brg_clk_src);
742 	mpsc_brg_enable(pi);
743 	mpsc_sdma_init(pi, dma_get_cache_alignment());	/* burst a cacheline */
744 	mpsc_sdma_stop(pi);
745 	mpsc_hw_init(pi);
746 }
747 
mpsc_alloc_ring_mem(struct mpsc_port_info * pi)748 static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
749 {
750 	int rc = 0;
751 
752 	pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
753 		pi->port.line);
754 
755 	if (!pi->dma_region) {
756 		if (!dma_set_mask(pi->port.dev, 0xffffffff)) {
757 			printk(KERN_ERR "MPSC: Inadequate DMA support\n");
758 			rc = -ENXIO;
759 		} else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
760 						MPSC_DMA_ALLOC_SIZE,
761 						&pi->dma_region_p, GFP_KERNEL))
762 				== NULL) {
763 			printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
764 			rc = -ENOMEM;
765 		}
766 	}
767 
768 	return rc;
769 }
770 
mpsc_free_ring_mem(struct mpsc_port_info * pi)771 static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
772 {
773 	pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
774 
775 	if (pi->dma_region) {
776 		dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
777 				pi->dma_region, pi->dma_region_p);
778 		pi->dma_region = NULL;
779 		pi->dma_region_p = (dma_addr_t)NULL;
780 	}
781 }
782 
mpsc_init_rings(struct mpsc_port_info * pi)783 static void mpsc_init_rings(struct mpsc_port_info *pi)
784 {
785 	struct mpsc_rx_desc *rxre;
786 	struct mpsc_tx_desc *txre;
787 	dma_addr_t dp, dp_p;
788 	u8 *bp, *bp_p;
789 	int i;
790 
791 	pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
792 
793 	BUG_ON(pi->dma_region == NULL);
794 
795 	memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
796 
797 	/*
798 	 * Descriptors & buffers are multiples of cacheline size and must be
799 	 * cacheline aligned.
800 	 */
801 	dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment());
802 	dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment());
803 
804 	/*
805 	 * Partition dma region into rx ring descriptor, rx buffers,
806 	 * tx ring descriptors, and tx buffers.
807 	 */
808 	pi->rxr = dp;
809 	pi->rxr_p = dp_p;
810 	dp += MPSC_RXR_SIZE;
811 	dp_p += MPSC_RXR_SIZE;
812 
813 	pi->rxb = (u8 *)dp;
814 	pi->rxb_p = (u8 *)dp_p;
815 	dp += MPSC_RXB_SIZE;
816 	dp_p += MPSC_RXB_SIZE;
817 
818 	pi->rxr_posn = 0;
819 
820 	pi->txr = dp;
821 	pi->txr_p = dp_p;
822 	dp += MPSC_TXR_SIZE;
823 	dp_p += MPSC_TXR_SIZE;
824 
825 	pi->txb = (u8 *)dp;
826 	pi->txb_p = (u8 *)dp_p;
827 
828 	pi->txr_head = 0;
829 	pi->txr_tail = 0;
830 
831 	/* Init rx ring descriptors */
832 	dp = pi->rxr;
833 	dp_p = pi->rxr_p;
834 	bp = pi->rxb;
835 	bp_p = pi->rxb_p;
836 
837 	for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
838 		rxre = (struct mpsc_rx_desc *)dp;
839 
840 		rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
841 		rxre->bytecnt = cpu_to_be16(0);
842 		rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
843 				| SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
844 				| SDMA_DESC_CMDSTAT_L);
845 		rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
846 		rxre->buf_ptr = cpu_to_be32(bp_p);
847 
848 		dp += MPSC_RXRE_SIZE;
849 		dp_p += MPSC_RXRE_SIZE;
850 		bp += MPSC_RXBE_SIZE;
851 		bp_p += MPSC_RXBE_SIZE;
852 	}
853 	rxre->link = cpu_to_be32(pi->rxr_p);	/* Wrap last back to first */
854 
855 	/* Init tx ring descriptors */
856 	dp = pi->txr;
857 	dp_p = pi->txr_p;
858 	bp = pi->txb;
859 	bp_p = pi->txb_p;
860 
861 	for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
862 		txre = (struct mpsc_tx_desc *)dp;
863 
864 		txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
865 		txre->buf_ptr = cpu_to_be32(bp_p);
866 
867 		dp += MPSC_TXRE_SIZE;
868 		dp_p += MPSC_TXRE_SIZE;
869 		bp += MPSC_TXBE_SIZE;
870 		bp_p += MPSC_TXBE_SIZE;
871 	}
872 	txre->link = cpu_to_be32(pi->txr_p);	/* Wrap last back to first */
873 
874 	dma_cache_sync(pi->port.dev, (void *)pi->dma_region,
875 			MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL);
876 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
877 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
878 			flush_dcache_range((ulong)pi->dma_region,
879 					(ulong)pi->dma_region
880 					+ MPSC_DMA_ALLOC_SIZE);
881 #endif
882 
883 	return;
884 }
885 
mpsc_uninit_rings(struct mpsc_port_info * pi)886 static void mpsc_uninit_rings(struct mpsc_port_info *pi)
887 {
888 	pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
889 
890 	BUG_ON(pi->dma_region == NULL);
891 
892 	pi->rxr = 0;
893 	pi->rxr_p = 0;
894 	pi->rxb = NULL;
895 	pi->rxb_p = NULL;
896 	pi->rxr_posn = 0;
897 
898 	pi->txr = 0;
899 	pi->txr_p = 0;
900 	pi->txb = NULL;
901 	pi->txb_p = NULL;
902 	pi->txr_head = 0;
903 	pi->txr_tail = 0;
904 }
905 
mpsc_make_ready(struct mpsc_port_info * pi)906 static int mpsc_make_ready(struct mpsc_port_info *pi)
907 {
908 	int rc;
909 
910 	pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
911 
912 	if (!pi->ready) {
913 		mpsc_init_hw(pi);
914 		rc = mpsc_alloc_ring_mem(pi);
915 		if (rc)
916 			return rc;
917 		mpsc_init_rings(pi);
918 		pi->ready = 1;
919 	}
920 
921 	return 0;
922 }
923 
924 #ifdef CONFIG_CONSOLE_POLL
925 static int serial_polled;
926 #endif
927 
928 /*
929  ******************************************************************************
930  *
931  * Interrupt Handling Routines
932  *
933  ******************************************************************************
934  */
935 
mpsc_rx_intr(struct mpsc_port_info * pi,unsigned long * flags)936 static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags)
937 {
938 	struct mpsc_rx_desc *rxre;
939 	struct tty_port *port = &pi->port.state->port;
940 	u32	cmdstat, bytes_in, i;
941 	int	rc = 0;
942 	u8	*bp;
943 	char	flag = TTY_NORMAL;
944 
945 	pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
946 
947 	rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
948 
949 	dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
950 			DMA_FROM_DEVICE);
951 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
952 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
953 		invalidate_dcache_range((ulong)rxre,
954 				(ulong)rxre + MPSC_RXRE_SIZE);
955 #endif
956 
957 	/*
958 	 * Loop through Rx descriptors handling ones that have been completed.
959 	 */
960 	while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
961 				& SDMA_DESC_CMDSTAT_O)) {
962 		bytes_in = be16_to_cpu(rxre->bytecnt);
963 #ifdef CONFIG_CONSOLE_POLL
964 		if (unlikely(serial_polled)) {
965 			serial_polled = 0;
966 			return 0;
967 		}
968 #endif
969 		/* Following use of tty struct directly is deprecated */
970 		if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
971 			if (port->low_latency) {
972 				spin_unlock_irqrestore(&pi->port.lock, *flags);
973 				tty_flip_buffer_push(port);
974 				spin_lock_irqsave(&pi->port.lock, *flags);
975 			}
976 			/*
977 			 * If this failed then we will throw away the bytes
978 			 * but must do so to clear interrupts.
979 			 */
980 		}
981 
982 		bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
983 		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE,
984 				DMA_FROM_DEVICE);
985 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
986 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
987 			invalidate_dcache_range((ulong)bp,
988 					(ulong)bp + MPSC_RXBE_SIZE);
989 #endif
990 
991 		/*
992 		 * Other than for parity error, the manual provides little
993 		 * info on what data will be in a frame flagged by any of
994 		 * these errors.  For parity error, it is the last byte in
995 		 * the buffer that had the error.  As for the rest, I guess
996 		 * we'll assume there is no data in the buffer.
997 		 * If there is...it gets lost.
998 		 */
999 		if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1000 						| SDMA_DESC_CMDSTAT_FR
1001 						| SDMA_DESC_CMDSTAT_OR))) {
1002 
1003 			pi->port.icount.rx++;
1004 
1005 			if (cmdstat & SDMA_DESC_CMDSTAT_BR) {	/* Break */
1006 				pi->port.icount.brk++;
1007 
1008 				if (uart_handle_break(&pi->port))
1009 					goto next_frame;
1010 			} else if (cmdstat & SDMA_DESC_CMDSTAT_FR) {
1011 				pi->port.icount.frame++;
1012 			} else if (cmdstat & SDMA_DESC_CMDSTAT_OR) {
1013 				pi->port.icount.overrun++;
1014 			}
1015 
1016 			cmdstat &= pi->port.read_status_mask;
1017 
1018 			if (cmdstat & SDMA_DESC_CMDSTAT_BR)
1019 				flag = TTY_BREAK;
1020 			else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
1021 				flag = TTY_FRAME;
1022 			else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
1023 				flag = TTY_OVERRUN;
1024 			else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
1025 				flag = TTY_PARITY;
1026 		}
1027 
1028 		if (uart_handle_sysrq_char(&pi->port, *bp)) {
1029 			bp++;
1030 			bytes_in--;
1031 #ifdef CONFIG_CONSOLE_POLL
1032 			if (unlikely(serial_polled)) {
1033 				serial_polled = 0;
1034 				return 0;
1035 			}
1036 #endif
1037 			goto next_frame;
1038 		}
1039 
1040 		if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1041 						| SDMA_DESC_CMDSTAT_FR
1042 						| SDMA_DESC_CMDSTAT_OR)))
1043 				&& !(cmdstat & pi->port.ignore_status_mask)) {
1044 			tty_insert_flip_char(port, *bp, flag);
1045 		} else {
1046 			for (i=0; i<bytes_in; i++)
1047 				tty_insert_flip_char(port, *bp++, TTY_NORMAL);
1048 
1049 			pi->port.icount.rx += bytes_in;
1050 		}
1051 
1052 next_frame:
1053 		rxre->bytecnt = cpu_to_be16(0);
1054 		wmb();
1055 		rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
1056 				| SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
1057 				| SDMA_DESC_CMDSTAT_L);
1058 		wmb();
1059 		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1060 				DMA_BIDIRECTIONAL);
1061 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1062 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1063 			flush_dcache_range((ulong)rxre,
1064 					(ulong)rxre + MPSC_RXRE_SIZE);
1065 #endif
1066 
1067 		/* Advance to next descriptor */
1068 		pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
1069 		rxre = (struct mpsc_rx_desc *)
1070 			(pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE));
1071 		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1072 				DMA_FROM_DEVICE);
1073 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1074 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1075 			invalidate_dcache_range((ulong)rxre,
1076 					(ulong)rxre + MPSC_RXRE_SIZE);
1077 #endif
1078 		rc = 1;
1079 	}
1080 
1081 	/* Restart rx engine, if its stopped */
1082 	if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1083 		mpsc_start_rx(pi);
1084 
1085 	spin_unlock_irqrestore(&pi->port.lock, *flags);
1086 	tty_flip_buffer_push(port);
1087 	spin_lock_irqsave(&pi->port.lock, *flags);
1088 	return rc;
1089 }
1090 
mpsc_setup_tx_desc(struct mpsc_port_info * pi,u32 count,u32 intr)1091 static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1092 {
1093 	struct mpsc_tx_desc *txre;
1094 
1095 	txre = (struct mpsc_tx_desc *)(pi->txr
1096 			+ (pi->txr_head * MPSC_TXRE_SIZE));
1097 
1098 	txre->bytecnt = cpu_to_be16(count);
1099 	txre->shadow = txre->bytecnt;
1100 	wmb();			/* ensure cmdstat is last field updated */
1101 	txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F
1102 			| SDMA_DESC_CMDSTAT_L
1103 			| ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
1104 	wmb();
1105 	dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1106 			DMA_BIDIRECTIONAL);
1107 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1108 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1109 		flush_dcache_range((ulong)txre,
1110 				(ulong)txre + MPSC_TXRE_SIZE);
1111 #endif
1112 }
1113 
mpsc_copy_tx_data(struct mpsc_port_info * pi)1114 static void mpsc_copy_tx_data(struct mpsc_port_info *pi)
1115 {
1116 	struct circ_buf *xmit = &pi->port.state->xmit;
1117 	u8 *bp;
1118 	u32 i;
1119 
1120 	/* Make sure the desc ring isn't full */
1121 	while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES)
1122 			< (MPSC_TXR_ENTRIES - 1)) {
1123 		if (pi->port.x_char) {
1124 			/*
1125 			 * Ideally, we should use the TCS field in
1126 			 * CHR_1 to put the x_char out immediately but
1127 			 * errata prevents us from being able to read
1128 			 * CHR_2 to know that its safe to write to
1129 			 * CHR_1.  Instead, just put it in-band with
1130 			 * all the other Tx data.
1131 			 */
1132 			bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1133 			*bp = pi->port.x_char;
1134 			pi->port.x_char = 0;
1135 			i = 1;
1136 		} else if (!uart_circ_empty(xmit)
1137 				&& !uart_tx_stopped(&pi->port)) {
1138 			i = min((u32)MPSC_TXBE_SIZE,
1139 				(u32)uart_circ_chars_pending(xmit));
1140 			i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail,
1141 				UART_XMIT_SIZE));
1142 			bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1143 			memcpy(bp, &xmit->buf[xmit->tail], i);
1144 			xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
1145 
1146 			if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1147 				uart_write_wakeup(&pi->port);
1148 		} else { /* All tx data copied into ring bufs */
1149 			return;
1150 		}
1151 
1152 		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1153 				DMA_BIDIRECTIONAL);
1154 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1155 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1156 			flush_dcache_range((ulong)bp,
1157 					(ulong)bp + MPSC_TXBE_SIZE);
1158 #endif
1159 		mpsc_setup_tx_desc(pi, i, 1);
1160 
1161 		/* Advance to next descriptor */
1162 		pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1163 	}
1164 }
1165 
mpsc_tx_intr(struct mpsc_port_info * pi)1166 static int mpsc_tx_intr(struct mpsc_port_info *pi)
1167 {
1168 	struct mpsc_tx_desc *txre;
1169 	int rc = 0;
1170 	unsigned long iflags;
1171 
1172 	spin_lock_irqsave(&pi->tx_lock, iflags);
1173 
1174 	if (!mpsc_sdma_tx_active(pi)) {
1175 		txre = (struct mpsc_tx_desc *)(pi->txr
1176 				+ (pi->txr_tail * MPSC_TXRE_SIZE));
1177 
1178 		dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1179 				DMA_FROM_DEVICE);
1180 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1181 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1182 			invalidate_dcache_range((ulong)txre,
1183 					(ulong)txre + MPSC_TXRE_SIZE);
1184 #endif
1185 
1186 		while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
1187 			rc = 1;
1188 			pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
1189 			pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
1190 
1191 			/* If no more data to tx, fall out of loop */
1192 			if (pi->txr_head == pi->txr_tail)
1193 				break;
1194 
1195 			txre = (struct mpsc_tx_desc *)(pi->txr
1196 					+ (pi->txr_tail * MPSC_TXRE_SIZE));
1197 			dma_cache_sync(pi->port.dev, (void *)txre,
1198 					MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1199 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1200 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1201 				invalidate_dcache_range((ulong)txre,
1202 						(ulong)txre + MPSC_TXRE_SIZE);
1203 #endif
1204 		}
1205 
1206 		mpsc_copy_tx_data(pi);
1207 		mpsc_sdma_start_tx(pi);	/* start next desc if ready */
1208 	}
1209 
1210 	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1211 	return rc;
1212 }
1213 
1214 /*
1215  * This is the driver's interrupt handler.  To avoid a race, we first clear
1216  * the interrupt, then handle any completed Rx/Tx descriptors.  When done
1217  * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1218  */
mpsc_sdma_intr(int irq,void * dev_id)1219 static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
1220 {
1221 	struct mpsc_port_info *pi = dev_id;
1222 	ulong iflags;
1223 	int rc = IRQ_NONE;
1224 
1225 	pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1226 
1227 	spin_lock_irqsave(&pi->port.lock, iflags);
1228 	mpsc_sdma_intr_ack(pi);
1229 	if (mpsc_rx_intr(pi, &iflags))
1230 		rc = IRQ_HANDLED;
1231 	if (mpsc_tx_intr(pi))
1232 		rc = IRQ_HANDLED;
1233 	spin_unlock_irqrestore(&pi->port.lock, iflags);
1234 
1235 	pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1236 	return rc;
1237 }
1238 
1239 /*
1240  ******************************************************************************
1241  *
1242  * serial_core.c Interface routines
1243  *
1244  ******************************************************************************
1245  */
mpsc_tx_empty(struct uart_port * port)1246 static uint mpsc_tx_empty(struct uart_port *port)
1247 {
1248 	struct mpsc_port_info *pi =
1249 		container_of(port, struct mpsc_port_info, port);
1250 	ulong iflags;
1251 	uint rc;
1252 
1253 	spin_lock_irqsave(&pi->port.lock, iflags);
1254 	rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1255 	spin_unlock_irqrestore(&pi->port.lock, iflags);
1256 
1257 	return rc;
1258 }
1259 
mpsc_set_mctrl(struct uart_port * port,uint mctrl)1260 static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1261 {
1262 	/* Have no way to set modem control lines AFAICT */
1263 }
1264 
mpsc_get_mctrl(struct uart_port * port)1265 static uint mpsc_get_mctrl(struct uart_port *port)
1266 {
1267 	struct mpsc_port_info *pi =
1268 		container_of(port, struct mpsc_port_info, port);
1269 	u32 mflags, status;
1270 
1271 	status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
1272 		: readl(pi->mpsc_base + MPSC_CHR_10);
1273 
1274 	mflags = 0;
1275 	if (status & 0x1)
1276 		mflags |= TIOCM_CTS;
1277 	if (status & 0x2)
1278 		mflags |= TIOCM_CAR;
1279 
1280 	return mflags | TIOCM_DSR;	/* No way to tell if DSR asserted */
1281 }
1282 
mpsc_stop_tx(struct uart_port * port)1283 static void mpsc_stop_tx(struct uart_port *port)
1284 {
1285 	struct mpsc_port_info *pi =
1286 		container_of(port, struct mpsc_port_info, port);
1287 
1288 	pr_debug("mpsc_stop_tx[%d]\n", port->line);
1289 
1290 	mpsc_freeze(pi);
1291 }
1292 
mpsc_start_tx(struct uart_port * port)1293 static void mpsc_start_tx(struct uart_port *port)
1294 {
1295 	struct mpsc_port_info *pi =
1296 		container_of(port, struct mpsc_port_info, port);
1297 	unsigned long iflags;
1298 
1299 	spin_lock_irqsave(&pi->tx_lock, iflags);
1300 
1301 	mpsc_unfreeze(pi);
1302 	mpsc_copy_tx_data(pi);
1303 	mpsc_sdma_start_tx(pi);
1304 
1305 	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1306 
1307 	pr_debug("mpsc_start_tx[%d]\n", port->line);
1308 }
1309 
mpsc_start_rx(struct mpsc_port_info * pi)1310 static void mpsc_start_rx(struct mpsc_port_info *pi)
1311 {
1312 	pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1313 
1314 	if (pi->rcv_data) {
1315 		mpsc_enter_hunt(pi);
1316 		mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1317 	}
1318 }
1319 
mpsc_stop_rx(struct uart_port * port)1320 static void mpsc_stop_rx(struct uart_port *port)
1321 {
1322 	struct mpsc_port_info *pi =
1323 		container_of(port, struct mpsc_port_info, port);
1324 
1325 	pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1326 
1327 	if (pi->mirror_regs) {
1328 		writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA,
1329 				pi->mpsc_base + MPSC_CHR_2);
1330 		/* Erratum prevents reading CHR_2 so just delay for a while */
1331 		udelay(100);
1332 	} else {
1333 		writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA,
1334 				pi->mpsc_base + MPSC_CHR_2);
1335 
1336 		while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA)
1337 			udelay(10);
1338 	}
1339 
1340 	mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1341 }
1342 
mpsc_break_ctl(struct uart_port * port,int ctl)1343 static void mpsc_break_ctl(struct uart_port *port, int ctl)
1344 {
1345 	struct mpsc_port_info *pi =
1346 		container_of(port, struct mpsc_port_info, port);
1347 	ulong	flags;
1348 	u32	v;
1349 
1350 	v = ctl ? 0x00ff0000 : 0;
1351 
1352 	spin_lock_irqsave(&pi->port.lock, flags);
1353 	if (pi->mirror_regs)
1354 		pi->MPSC_CHR_1_m = v;
1355 	writel(v, pi->mpsc_base + MPSC_CHR_1);
1356 	spin_unlock_irqrestore(&pi->port.lock, flags);
1357 }
1358 
mpsc_startup(struct uart_port * port)1359 static int mpsc_startup(struct uart_port *port)
1360 {
1361 	struct mpsc_port_info *pi =
1362 		container_of(port, struct mpsc_port_info, port);
1363 	u32 flag = 0;
1364 	int rc;
1365 
1366 	pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1367 		port->line, pi->port.irq);
1368 
1369 	if ((rc = mpsc_make_ready(pi)) == 0) {
1370 		/* Setup IRQ handler */
1371 		mpsc_sdma_intr_ack(pi);
1372 
1373 		/* If irq's are shared, need to set flag */
1374 		if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
1375 			flag = IRQF_SHARED;
1376 
1377 		if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
1378 					"mpsc-sdma", pi))
1379 			printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
1380 					pi->port.irq);
1381 
1382 		mpsc_sdma_intr_unmask(pi, 0xf);
1383 		mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p
1384 					+ (pi->rxr_posn * MPSC_RXRE_SIZE)));
1385 	}
1386 
1387 	return rc;
1388 }
1389 
mpsc_shutdown(struct uart_port * port)1390 static void mpsc_shutdown(struct uart_port *port)
1391 {
1392 	struct mpsc_port_info *pi =
1393 		container_of(port, struct mpsc_port_info, port);
1394 
1395 	pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1396 
1397 	mpsc_sdma_stop(pi);
1398 	free_irq(pi->port.irq, pi);
1399 }
1400 
mpsc_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)1401 static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
1402 		 struct ktermios *old)
1403 {
1404 	struct mpsc_port_info *pi =
1405 		container_of(port, struct mpsc_port_info, port);
1406 	u32 baud;
1407 	ulong flags;
1408 	u32 chr_bits, stop_bits, par;
1409 
1410 	pi->c_iflag = termios->c_iflag;
1411 	pi->c_cflag = termios->c_cflag;
1412 
1413 	switch (termios->c_cflag & CSIZE) {
1414 	case CS5:
1415 		chr_bits = MPSC_MPCR_CL_5;
1416 		break;
1417 	case CS6:
1418 		chr_bits = MPSC_MPCR_CL_6;
1419 		break;
1420 	case CS7:
1421 		chr_bits = MPSC_MPCR_CL_7;
1422 		break;
1423 	case CS8:
1424 	default:
1425 		chr_bits = MPSC_MPCR_CL_8;
1426 		break;
1427 	}
1428 
1429 	if (termios->c_cflag & CSTOPB)
1430 		stop_bits = MPSC_MPCR_SBL_2;
1431 	else
1432 		stop_bits = MPSC_MPCR_SBL_1;
1433 
1434 	par = MPSC_CHR_2_PAR_EVEN;
1435 	if (termios->c_cflag & PARENB)
1436 		if (termios->c_cflag & PARODD)
1437 			par = MPSC_CHR_2_PAR_ODD;
1438 #ifdef	CMSPAR
1439 		if (termios->c_cflag & CMSPAR) {
1440 			if (termios->c_cflag & PARODD)
1441 				par = MPSC_CHR_2_PAR_MARK;
1442 			else
1443 				par = MPSC_CHR_2_PAR_SPACE;
1444 		}
1445 #endif
1446 
1447 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1448 
1449 	spin_lock_irqsave(&pi->port.lock, flags);
1450 
1451 	uart_update_timeout(port, termios->c_cflag, baud);
1452 
1453 	mpsc_set_char_length(pi, chr_bits);
1454 	mpsc_set_stop_bit_length(pi, stop_bits);
1455 	mpsc_set_parity(pi, par);
1456 	mpsc_set_baudrate(pi, baud);
1457 
1458 	/* Characters/events to read */
1459 	pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1460 
1461 	if (termios->c_iflag & INPCK)
1462 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
1463 			| SDMA_DESC_CMDSTAT_FR;
1464 
1465 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1466 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1467 
1468 	/* Characters/events to ignore */
1469 	pi->port.ignore_status_mask = 0;
1470 
1471 	if (termios->c_iflag & IGNPAR)
1472 		pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE
1473 			| SDMA_DESC_CMDSTAT_FR;
1474 
1475 	if (termios->c_iflag & IGNBRK) {
1476 		pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1477 
1478 		if (termios->c_iflag & IGNPAR)
1479 			pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1480 	}
1481 
1482 	if ((termios->c_cflag & CREAD)) {
1483 		if (!pi->rcv_data) {
1484 			pi->rcv_data = 1;
1485 			mpsc_start_rx(pi);
1486 		}
1487 	} else if (pi->rcv_data) {
1488 		mpsc_stop_rx(port);
1489 		pi->rcv_data = 0;
1490 	}
1491 
1492 	spin_unlock_irqrestore(&pi->port.lock, flags);
1493 }
1494 
mpsc_type(struct uart_port * port)1495 static const char *mpsc_type(struct uart_port *port)
1496 {
1497 	pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1498 	return MPSC_DRIVER_NAME;
1499 }
1500 
mpsc_request_port(struct uart_port * port)1501 static int mpsc_request_port(struct uart_port *port)
1502 {
1503 	/* Should make chip/platform specific call */
1504 	return 0;
1505 }
1506 
mpsc_release_port(struct uart_port * port)1507 static void mpsc_release_port(struct uart_port *port)
1508 {
1509 	struct mpsc_port_info *pi =
1510 		container_of(port, struct mpsc_port_info, port);
1511 
1512 	if (pi->ready) {
1513 		mpsc_uninit_rings(pi);
1514 		mpsc_free_ring_mem(pi);
1515 		pi->ready = 0;
1516 	}
1517 }
1518 
mpsc_config_port(struct uart_port * port,int flags)1519 static void mpsc_config_port(struct uart_port *port, int flags)
1520 {
1521 }
1522 
mpsc_verify_port(struct uart_port * port,struct serial_struct * ser)1523 static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1524 {
1525 	struct mpsc_port_info *pi =
1526 		container_of(port, struct mpsc_port_info, port);
1527 	int rc = 0;
1528 
1529 	pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1530 
1531 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1532 		rc = -EINVAL;
1533 	else if (pi->port.irq != ser->irq)
1534 		rc = -EINVAL;
1535 	else if (ser->io_type != SERIAL_IO_MEM)
1536 		rc = -EINVAL;
1537 	else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1538 		rc = -EINVAL;
1539 	else if ((void *)pi->port.mapbase != ser->iomem_base)
1540 		rc = -EINVAL;
1541 	else if (pi->port.iobase != ser->port)
1542 		rc = -EINVAL;
1543 	else if (ser->hub6 != 0)
1544 		rc = -EINVAL;
1545 
1546 	return rc;
1547 }
1548 #ifdef CONFIG_CONSOLE_POLL
1549 /* Serial polling routines for writing and reading from the uart while
1550  * in an interrupt or debug context.
1551  */
1552 
1553 static char poll_buf[2048];
1554 static int poll_ptr;
1555 static int poll_cnt;
1556 static void mpsc_put_poll_char(struct uart_port *port,
1557 							   unsigned char c);
1558 
mpsc_get_poll_char(struct uart_port * port)1559 static int mpsc_get_poll_char(struct uart_port *port)
1560 {
1561 	struct mpsc_port_info *pi =
1562 		container_of(port, struct mpsc_port_info, port);
1563 	struct mpsc_rx_desc *rxre;
1564 	u32	cmdstat, bytes_in, i;
1565 	u8	*bp;
1566 
1567 	if (!serial_polled)
1568 		serial_polled = 1;
1569 
1570 	pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1571 
1572 	if (poll_cnt) {
1573 		poll_cnt--;
1574 		return poll_buf[poll_ptr++];
1575 	}
1576 	poll_ptr = 0;
1577 	poll_cnt = 0;
1578 
1579 	while (poll_cnt == 0) {
1580 		rxre = (struct mpsc_rx_desc *)(pi->rxr +
1581 		       (pi->rxr_posn*MPSC_RXRE_SIZE));
1582 		dma_cache_sync(pi->port.dev, (void *)rxre,
1583 			       MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1584 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1585 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1586 			invalidate_dcache_range((ulong)rxre,
1587 			(ulong)rxre + MPSC_RXRE_SIZE);
1588 #endif
1589 		/*
1590 		 * Loop through Rx descriptors handling ones that have
1591 		 * been completed.
1592 		 */
1593 		while (poll_cnt == 0 &&
1594 		       !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1595 			 SDMA_DESC_CMDSTAT_O)){
1596 			bytes_in = be16_to_cpu(rxre->bytecnt);
1597 			bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1598 			dma_cache_sync(pi->port.dev, (void *) bp,
1599 				       MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1600 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1601 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1602 				invalidate_dcache_range((ulong)bp,
1603 					(ulong)bp + MPSC_RXBE_SIZE);
1604 #endif
1605 			if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1606 			 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1607 				!(cmdstat & pi->port.ignore_status_mask)) {
1608 				poll_buf[poll_cnt] = *bp;
1609 				poll_cnt++;
1610 			} else {
1611 				for (i = 0; i < bytes_in; i++) {
1612 					poll_buf[poll_cnt] = *bp++;
1613 					poll_cnt++;
1614 				}
1615 				pi->port.icount.rx += bytes_in;
1616 			}
1617 			rxre->bytecnt = cpu_to_be16(0);
1618 			wmb();
1619 			rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1620 						    SDMA_DESC_CMDSTAT_EI |
1621 						    SDMA_DESC_CMDSTAT_F |
1622 						    SDMA_DESC_CMDSTAT_L);
1623 			wmb();
1624 			dma_cache_sync(pi->port.dev, (void *)rxre,
1625 				       MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1626 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1627 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1628 				flush_dcache_range((ulong)rxre,
1629 					   (ulong)rxre + MPSC_RXRE_SIZE);
1630 #endif
1631 
1632 			/* Advance to next descriptor */
1633 			pi->rxr_posn = (pi->rxr_posn + 1) &
1634 				(MPSC_RXR_ENTRIES - 1);
1635 			rxre = (struct mpsc_rx_desc *)(pi->rxr +
1636 				       (pi->rxr_posn * MPSC_RXRE_SIZE));
1637 			dma_cache_sync(pi->port.dev, (void *)rxre,
1638 				       MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1639 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1640 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1641 				invalidate_dcache_range((ulong)rxre,
1642 						(ulong)rxre + MPSC_RXRE_SIZE);
1643 #endif
1644 		}
1645 
1646 		/* Restart rx engine, if its stopped */
1647 		if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1648 			mpsc_start_rx(pi);
1649 	}
1650 	if (poll_cnt) {
1651 		poll_cnt--;
1652 		return poll_buf[poll_ptr++];
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 
mpsc_put_poll_char(struct uart_port * port,unsigned char c)1659 static void mpsc_put_poll_char(struct uart_port *port,
1660 			 unsigned char c)
1661 {
1662 	struct mpsc_port_info *pi =
1663 		container_of(port, struct mpsc_port_info, port);
1664 	u32 data;
1665 
1666 	data = readl(pi->mpsc_base + MPSC_MPCR);
1667 	writeb(c, pi->mpsc_base + MPSC_CHR_1);
1668 	mb();
1669 	data = readl(pi->mpsc_base + MPSC_CHR_2);
1670 	data |= MPSC_CHR_2_TTCS;
1671 	writel(data, pi->mpsc_base + MPSC_CHR_2);
1672 	mb();
1673 
1674 	while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1675 }
1676 #endif
1677 
1678 static struct uart_ops mpsc_pops = {
1679 	.tx_empty	= mpsc_tx_empty,
1680 	.set_mctrl	= mpsc_set_mctrl,
1681 	.get_mctrl	= mpsc_get_mctrl,
1682 	.stop_tx	= mpsc_stop_tx,
1683 	.start_tx	= mpsc_start_tx,
1684 	.stop_rx	= mpsc_stop_rx,
1685 	.break_ctl	= mpsc_break_ctl,
1686 	.startup	= mpsc_startup,
1687 	.shutdown	= mpsc_shutdown,
1688 	.set_termios	= mpsc_set_termios,
1689 	.type		= mpsc_type,
1690 	.release_port	= mpsc_release_port,
1691 	.request_port	= mpsc_request_port,
1692 	.config_port	= mpsc_config_port,
1693 	.verify_port	= mpsc_verify_port,
1694 #ifdef CONFIG_CONSOLE_POLL
1695 	.poll_get_char = mpsc_get_poll_char,
1696 	.poll_put_char = mpsc_put_poll_char,
1697 #endif
1698 };
1699 
1700 /*
1701  ******************************************************************************
1702  *
1703  * Console Interface Routines
1704  *
1705  ******************************************************************************
1706  */
1707 
1708 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
mpsc_console_write(struct console * co,const char * s,uint count)1709 static void mpsc_console_write(struct console *co, const char *s, uint count)
1710 {
1711 	struct mpsc_port_info *pi = &mpsc_ports[co->index];
1712 	u8 *bp, *dp, add_cr = 0;
1713 	int i;
1714 	unsigned long iflags;
1715 
1716 	spin_lock_irqsave(&pi->tx_lock, iflags);
1717 
1718 	while (pi->txr_head != pi->txr_tail) {
1719 		while (mpsc_sdma_tx_active(pi))
1720 			udelay(100);
1721 		mpsc_sdma_intr_ack(pi);
1722 		mpsc_tx_intr(pi);
1723 	}
1724 
1725 	while (mpsc_sdma_tx_active(pi))
1726 		udelay(100);
1727 
1728 	while (count > 0) {
1729 		bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1730 
1731 		for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1732 			if (count == 0)
1733 				break;
1734 
1735 			if (add_cr) {
1736 				*(dp++) = '\r';
1737 				add_cr = 0;
1738 			} else {
1739 				*(dp++) = *s;
1740 
1741 				if (*(s++) == '\n') { /* add '\r' after '\n' */
1742 					add_cr = 1;
1743 					count++;
1744 				}
1745 			}
1746 
1747 			count--;
1748 		}
1749 
1750 		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1751 				DMA_BIDIRECTIONAL);
1752 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1753 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1754 			flush_dcache_range((ulong)bp,
1755 					(ulong)bp + MPSC_TXBE_SIZE);
1756 #endif
1757 		mpsc_setup_tx_desc(pi, i, 0);
1758 		pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1759 		mpsc_sdma_start_tx(pi);
1760 
1761 		while (mpsc_sdma_tx_active(pi))
1762 			udelay(100);
1763 
1764 		pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1765 	}
1766 
1767 	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1768 }
1769 
mpsc_console_setup(struct console * co,char * options)1770 static int __init mpsc_console_setup(struct console *co, char *options)
1771 {
1772 	struct mpsc_port_info *pi;
1773 	int baud, bits, parity, flow;
1774 
1775 	pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1776 
1777 	if (co->index >= MPSC_NUM_CTLRS)
1778 		co->index = 0;
1779 
1780 	pi = &mpsc_ports[co->index];
1781 
1782 	baud = pi->default_baud;
1783 	bits = pi->default_bits;
1784 	parity = pi->default_parity;
1785 	flow = pi->default_flow;
1786 
1787 	if (!pi->port.ops)
1788 		return -ENODEV;
1789 
1790 	spin_lock_init(&pi->port.lock);	/* Temporary fix--copied from 8250.c */
1791 
1792 	if (options)
1793 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1794 
1795 	return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1796 }
1797 
1798 static struct console mpsc_console = {
1799 	.name	= MPSC_DEV_NAME,
1800 	.write	= mpsc_console_write,
1801 	.device	= uart_console_device,
1802 	.setup	= mpsc_console_setup,
1803 	.flags	= CON_PRINTBUFFER,
1804 	.index	= -1,
1805 	.data	= &mpsc_reg,
1806 };
1807 
mpsc_late_console_init(void)1808 static int __init mpsc_late_console_init(void)
1809 {
1810 	pr_debug("mpsc_late_console_init: Enter\n");
1811 
1812 	if (!(mpsc_console.flags & CON_ENABLED))
1813 		register_console(&mpsc_console);
1814 	return 0;
1815 }
1816 
1817 late_initcall(mpsc_late_console_init);
1818 
1819 #define MPSC_CONSOLE	&mpsc_console
1820 #else
1821 #define MPSC_CONSOLE	NULL
1822 #endif
1823 /*
1824  ******************************************************************************
1825  *
1826  * Dummy Platform Driver to extract & map shared register regions
1827  *
1828  ******************************************************************************
1829  */
mpsc_resource_err(char * s)1830 static void mpsc_resource_err(char *s)
1831 {
1832 	printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1833 }
1834 
mpsc_shared_map_regs(struct platform_device * pd)1835 static int mpsc_shared_map_regs(struct platform_device *pd)
1836 {
1837 	struct resource	*r;
1838 
1839 	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1840 					MPSC_ROUTING_BASE_ORDER))
1841 			&& request_mem_region(r->start,
1842 				MPSC_ROUTING_REG_BLOCK_SIZE,
1843 				"mpsc_routing_regs")) {
1844 		mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
1845 				MPSC_ROUTING_REG_BLOCK_SIZE);
1846 		mpsc_shared_regs.mpsc_routing_base_p = r->start;
1847 	} else {
1848 		mpsc_resource_err("MPSC routing base");
1849 		return -ENOMEM;
1850 	}
1851 
1852 	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1853 					MPSC_SDMA_INTR_BASE_ORDER))
1854 			&& request_mem_region(r->start,
1855 				MPSC_SDMA_INTR_REG_BLOCK_SIZE,
1856 				"sdma_intr_regs")) {
1857 		mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1858 			MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1859 		mpsc_shared_regs.sdma_intr_base_p = r->start;
1860 	} else {
1861 		iounmap(mpsc_shared_regs.mpsc_routing_base);
1862 		release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1863 				MPSC_ROUTING_REG_BLOCK_SIZE);
1864 		mpsc_resource_err("SDMA intr base");
1865 		return -ENOMEM;
1866 	}
1867 
1868 	return 0;
1869 }
1870 
mpsc_shared_unmap_regs(void)1871 static void mpsc_shared_unmap_regs(void)
1872 {
1873 	if (!mpsc_shared_regs.mpsc_routing_base) {
1874 		iounmap(mpsc_shared_regs.mpsc_routing_base);
1875 		release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1876 				MPSC_ROUTING_REG_BLOCK_SIZE);
1877 	}
1878 	if (!mpsc_shared_regs.sdma_intr_base) {
1879 		iounmap(mpsc_shared_regs.sdma_intr_base);
1880 		release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
1881 				MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1882 	}
1883 
1884 	mpsc_shared_regs.mpsc_routing_base = NULL;
1885 	mpsc_shared_regs.sdma_intr_base = NULL;
1886 
1887 	mpsc_shared_regs.mpsc_routing_base_p = 0;
1888 	mpsc_shared_regs.sdma_intr_base_p = 0;
1889 }
1890 
mpsc_shared_drv_probe(struct platform_device * dev)1891 static int mpsc_shared_drv_probe(struct platform_device *dev)
1892 {
1893 	struct mpsc_shared_pdata	*pdata;
1894 	int				 rc = -ENODEV;
1895 
1896 	if (dev->id == 0) {
1897 		rc = mpsc_shared_map_regs(dev);
1898 		if (!rc) {
1899 			pdata = (struct mpsc_shared_pdata *)
1900 				dev_get_platdata(&dev->dev);
1901 
1902 			mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1903 			mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1904 			mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1905 			mpsc_shared_regs.SDMA_INTR_CAUSE_m =
1906 				pdata->intr_cause_val;
1907 			mpsc_shared_regs.SDMA_INTR_MASK_m =
1908 				pdata->intr_mask_val;
1909 
1910 			rc = 0;
1911 		}
1912 	}
1913 
1914 	return rc;
1915 }
1916 
mpsc_shared_drv_remove(struct platform_device * dev)1917 static int mpsc_shared_drv_remove(struct platform_device *dev)
1918 {
1919 	int	rc = -ENODEV;
1920 
1921 	if (dev->id == 0) {
1922 		mpsc_shared_unmap_regs();
1923 		mpsc_shared_regs.MPSC_MRR_m = 0;
1924 		mpsc_shared_regs.MPSC_RCRR_m = 0;
1925 		mpsc_shared_regs.MPSC_TCRR_m = 0;
1926 		mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1927 		mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1928 		rc = 0;
1929 	}
1930 
1931 	return rc;
1932 }
1933 
1934 static struct platform_driver mpsc_shared_driver = {
1935 	.probe	= mpsc_shared_drv_probe,
1936 	.remove	= mpsc_shared_drv_remove,
1937 	.driver	= {
1938 		.name	= MPSC_SHARED_NAME,
1939 	},
1940 };
1941 
1942 /*
1943  ******************************************************************************
1944  *
1945  * Driver Interface Routines
1946  *
1947  ******************************************************************************
1948  */
1949 static struct uart_driver mpsc_reg = {
1950 	.owner		= THIS_MODULE,
1951 	.driver_name	= MPSC_DRIVER_NAME,
1952 	.dev_name	= MPSC_DEV_NAME,
1953 	.major		= MPSC_MAJOR,
1954 	.minor		= MPSC_MINOR_START,
1955 	.nr		= MPSC_NUM_CTLRS,
1956 	.cons		= MPSC_CONSOLE,
1957 };
1958 
mpsc_drv_map_regs(struct mpsc_port_info * pi,struct platform_device * pd)1959 static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
1960 		struct platform_device *pd)
1961 {
1962 	struct resource	*r;
1963 
1964 	if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER))
1965 			&& request_mem_region(r->start, MPSC_REG_BLOCK_SIZE,
1966 			"mpsc_regs")) {
1967 		pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1968 		pi->mpsc_base_p = r->start;
1969 	} else {
1970 		mpsc_resource_err("MPSC base");
1971 		goto err;
1972 	}
1973 
1974 	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1975 					MPSC_SDMA_BASE_ORDER))
1976 			&& request_mem_region(r->start,
1977 				MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1978 		pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1979 		pi->sdma_base_p = r->start;
1980 	} else {
1981 		mpsc_resource_err("SDMA base");
1982 		if (pi->mpsc_base) {
1983 			iounmap(pi->mpsc_base);
1984 			pi->mpsc_base = NULL;
1985 		}
1986 		goto err;
1987 	}
1988 
1989 	if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
1990 			&& request_mem_region(r->start,
1991 				MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) {
1992 		pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1993 		pi->brg_base_p = r->start;
1994 	} else {
1995 		mpsc_resource_err("BRG base");
1996 		if (pi->mpsc_base) {
1997 			iounmap(pi->mpsc_base);
1998 			pi->mpsc_base = NULL;
1999 		}
2000 		if (pi->sdma_base) {
2001 			iounmap(pi->sdma_base);
2002 			pi->sdma_base = NULL;
2003 		}
2004 		goto err;
2005 	}
2006 	return 0;
2007 
2008 err:
2009 	return -ENOMEM;
2010 }
2011 
mpsc_drv_unmap_regs(struct mpsc_port_info * pi)2012 static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
2013 {
2014 	if (!pi->mpsc_base) {
2015 		iounmap(pi->mpsc_base);
2016 		release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
2017 	}
2018 	if (!pi->sdma_base) {
2019 		iounmap(pi->sdma_base);
2020 		release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
2021 	}
2022 	if (!pi->brg_base) {
2023 		iounmap(pi->brg_base);
2024 		release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
2025 	}
2026 
2027 	pi->mpsc_base = NULL;
2028 	pi->sdma_base = NULL;
2029 	pi->brg_base = NULL;
2030 
2031 	pi->mpsc_base_p = 0;
2032 	pi->sdma_base_p = 0;
2033 	pi->brg_base_p = 0;
2034 }
2035 
mpsc_drv_get_platform_data(struct mpsc_port_info * pi,struct platform_device * pd,int num)2036 static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
2037 		struct platform_device *pd, int num)
2038 {
2039 	struct mpsc_pdata	*pdata;
2040 
2041 	pdata = dev_get_platdata(&pd->dev);
2042 
2043 	pi->port.uartclk = pdata->brg_clk_freq;
2044 	pi->port.iotype = UPIO_MEM;
2045 	pi->port.line = num;
2046 	pi->port.type = PORT_MPSC;
2047 	pi->port.fifosize = MPSC_TXBE_SIZE;
2048 	pi->port.membase = pi->mpsc_base;
2049 	pi->port.mapbase = (ulong)pi->mpsc_base;
2050 	pi->port.ops = &mpsc_pops;
2051 
2052 	pi->mirror_regs = pdata->mirror_regs;
2053 	pi->cache_mgmt = pdata->cache_mgmt;
2054 	pi->brg_can_tune = pdata->brg_can_tune;
2055 	pi->brg_clk_src = pdata->brg_clk_src;
2056 	pi->mpsc_max_idle = pdata->max_idle;
2057 	pi->default_baud = pdata->default_baud;
2058 	pi->default_bits = pdata->default_bits;
2059 	pi->default_parity = pdata->default_parity;
2060 	pi->default_flow = pdata->default_flow;
2061 
2062 	/* Initial values of mirrored regs */
2063 	pi->MPSC_CHR_1_m = pdata->chr_1_val;
2064 	pi->MPSC_CHR_2_m = pdata->chr_2_val;
2065 	pi->MPSC_CHR_10_m = pdata->chr_10_val;
2066 	pi->MPSC_MPCR_m = pdata->mpcr_val;
2067 	pi->BRG_BCR_m = pdata->bcr_val;
2068 
2069 	pi->shared_regs = &mpsc_shared_regs;
2070 
2071 	pi->port.irq = platform_get_irq(pd, 0);
2072 }
2073 
mpsc_drv_probe(struct platform_device * dev)2074 static int mpsc_drv_probe(struct platform_device *dev)
2075 {
2076 	struct mpsc_port_info	*pi;
2077 	int			rc = -ENODEV;
2078 
2079 	pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
2080 
2081 	if (dev->id < MPSC_NUM_CTLRS) {
2082 		pi = &mpsc_ports[dev->id];
2083 
2084 		rc = mpsc_drv_map_regs(pi, dev);
2085 		if (!rc) {
2086 			mpsc_drv_get_platform_data(pi, dev, dev->id);
2087 			pi->port.dev = &dev->dev;
2088 
2089 			rc = mpsc_make_ready(pi);
2090 			if (!rc) {
2091 				spin_lock_init(&pi->tx_lock);
2092 				rc = uart_add_one_port(&mpsc_reg, &pi->port);
2093 				if (!rc) {
2094 					rc = 0;
2095 				} else {
2096 					mpsc_release_port((struct uart_port *)
2097 							pi);
2098 					mpsc_drv_unmap_regs(pi);
2099 				}
2100 			} else {
2101 				mpsc_drv_unmap_regs(pi);
2102 			}
2103 		}
2104 	}
2105 
2106 	return rc;
2107 }
2108 
2109 static struct platform_driver mpsc_driver = {
2110 	.probe	= mpsc_drv_probe,
2111 	.driver	= {
2112 		.name			= MPSC_CTLR_NAME,
2113 		.suppress_bind_attrs	= true,
2114 	},
2115 };
2116 
mpsc_drv_init(void)2117 static int __init mpsc_drv_init(void)
2118 {
2119 	int	rc;
2120 
2121 	printk(KERN_INFO "Serial: MPSC driver\n");
2122 
2123 	memset(mpsc_ports, 0, sizeof(mpsc_ports));
2124 	memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2125 
2126 	rc = uart_register_driver(&mpsc_reg);
2127 	if (!rc) {
2128 		rc = platform_driver_register(&mpsc_shared_driver);
2129 		if (!rc) {
2130 			rc = platform_driver_register(&mpsc_driver);
2131 			if (rc) {
2132 				platform_driver_unregister(&mpsc_shared_driver);
2133 				uart_unregister_driver(&mpsc_reg);
2134 			}
2135 		} else {
2136 			uart_unregister_driver(&mpsc_reg);
2137 		}
2138 	}
2139 
2140 	return rc;
2141 }
2142 device_initcall(mpsc_drv_init);
2143 
2144 /*
2145 MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2146 MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
2147 MODULE_LICENSE("GPL");
2148 */
2149